diff --git a/expected/pathman_rebuild_updates.out b/expected/pathman_rebuild_updates.out index eb078303..dfa4a5ce 100644 --- a/expected/pathman_rebuild_updates.out +++ b/expected/pathman_rebuild_updates.out @@ -155,6 +155,45 @@ UPDATE test_updates.test SET val = -1 WHERE val = 95 RETURNING *, tableoid::RE -1 | 105 | test_updates.test_13 (1 row) +/* basic check for 'ALTER TABLE ... ADD COLUMN'; PGPRO-5113 */ +create table test_updates.test_5113(val int4 not null); +insert into test_updates.test_5113 values (1); +select create_range_partitions('test_updates.test_5113', 'val', 1, 10); + create_range_partitions +------------------------- + 1 +(1 row) + +update test_updates.test_5113 set val = 11 where val = 1; +alter table test_updates.test_5113 add column x varchar; +/* no error here: */ +select * from test_updates.test_5113 where val = 11; + val | x +-----+--- + 11 | +(1 row) + +drop table test_updates.test_5113 cascade; +NOTICE: drop cascades to 3 other objects +create table test_updates.test_5113(val int4 not null); +insert into test_updates.test_5113 values (1); +select create_range_partitions('test_updates.test_5113', 'val', 1, 10); + create_range_partitions +------------------------- + 1 +(1 row) + +update test_updates.test_5113 set val = 11 where val = 1; +alter table test_updates.test_5113 add column x int8; +/* no extra data in column 'x' here: */ +select * from test_updates.test_5113 where val = 11; + val | x +-----+--- + 11 | +(1 row) + +drop table test_updates.test_5113 cascade; +NOTICE: drop cascades to 3 other objects DROP SCHEMA test_updates CASCADE; NOTICE: drop cascades to 15 other objects DROP EXTENSION pg_pathman; diff --git a/expected/pathman_rebuild_updates_1.out b/expected/pathman_rebuild_updates_1.out index 10ec256e..5bda15ce 100644 --- a/expected/pathman_rebuild_updates_1.out +++ b/expected/pathman_rebuild_updates_1.out @@ -155,6 +155,45 @@ UPDATE test_updates.test SET val = -1 WHERE val = 95 RETURNING *, tableoid::RE -1 | 105 | test_updates.test_13 (1 row) +/* basic check for 'ALTER TABLE ... ADD COLUMN'; PGPRO-5113 */ +create table test_updates.test_5113(val int4 not null); +insert into test_updates.test_5113 values (1); +select create_range_partitions('test_updates.test_5113', 'val', 1, 10); + create_range_partitions +------------------------- + 1 +(1 row) + +update test_updates.test_5113 set val = 11 where val = 1; +alter table test_updates.test_5113 add column x varchar; +/* no error here: */ +select * from test_updates.test_5113 where val = 11; + val | x +-----+--- + 11 | +(1 row) + +drop table test_updates.test_5113 cascade; +NOTICE: drop cascades to 3 other objects +create table test_updates.test_5113(val int4 not null); +insert into test_updates.test_5113 values (1); +select create_range_partitions('test_updates.test_5113', 'val', 1, 10); + create_range_partitions +------------------------- + 1 +(1 row) + +update test_updates.test_5113 set val = 11 where val = 1; +alter table test_updates.test_5113 add column x int8; +/* no extra data in column 'x' here: */ +select * from test_updates.test_5113 where val = 11; + val | x +-----+--- + 11 | +(1 row) + +drop table test_updates.test_5113 cascade; +NOTICE: drop cascades to 3 other objects DROP SCHEMA test_updates CASCADE; NOTICE: drop cascades to 15 other objects DROP EXTENSION pg_pathman; diff --git a/sql/pathman_rebuild_updates.sql b/sql/pathman_rebuild_updates.sql index f4229d09..01757c2c 100644 --- a/sql/pathman_rebuild_updates.sql +++ b/sql/pathman_rebuild_updates.sql @@ -79,6 +79,25 @@ UPDATE test_updates.test SET val = 95 WHERE val = 115 RETURNING *, tableoid::RE UPDATE test_updates.test SET val = -1 WHERE val = 95 RETURNING *, tableoid::REGCLASS; +/* basic check for 'ALTER TABLE ... ADD COLUMN'; PGPRO-5113 */ +create table test_updates.test_5113(val int4 not null); +insert into test_updates.test_5113 values (1); +select create_range_partitions('test_updates.test_5113', 'val', 1, 10); +update test_updates.test_5113 set val = 11 where val = 1; +alter table test_updates.test_5113 add column x varchar; +/* no error here: */ +select * from test_updates.test_5113 where val = 11; +drop table test_updates.test_5113 cascade; + +create table test_updates.test_5113(val int4 not null); +insert into test_updates.test_5113 values (1); +select create_range_partitions('test_updates.test_5113', 'val', 1, 10); +update test_updates.test_5113 set val = 11 where val = 1; +alter table test_updates.test_5113 add column x int8; +/* no extra data in column 'x' here: */ +select * from test_updates.test_5113 where val = 11; +drop table test_updates.test_5113 cascade; + DROP SCHEMA test_updates CASCADE; DROP EXTENSION pg_pathman; diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index 233054b7..0c912abe 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -48,6 +48,7 @@ typedef struct Oid partid; /* partition's relid */ ResultRelInfo *result_rel_info; /* cached ResultRelInfo */ TupleConversionMap *tuple_map; /* tuple mapping (parent => child) */ + TupleConversionMap *tuple_map_child; /* tuple mapping (child => child), for exclude 'ctid' */ PartRelationInfo *prel; /* this child might be a parent... */ ExprState *prel_expr_state; /* and have its own part. expression */ @@ -173,6 +174,10 @@ PartRelationInfo * refresh_result_parts_storage(ResultPartsStorage *parts_storag TupleConversionMap * build_part_tuple_map(Relation parent_rel, Relation child_rel); +TupleConversionMap * build_part_tuple_map_child(Relation child_rel); + +void destroy_tuple_map(TupleConversionMap *tuple_map); + List * pfilter_build_tlist(Plan *subplan); diff --git a/src/partition_filter.c b/src/partition_filter.c index 5d1f4943..0ef84e61 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -239,13 +239,9 @@ fini_result_parts_storage(ResultPartsStorage *parts_storage) } /* Free conversion-related stuff */ - if (rri_holder->tuple_map) - { - FreeTupleDesc(rri_holder->tuple_map->indesc); - FreeTupleDesc(rri_holder->tuple_map->outdesc); + destroy_tuple_map(rri_holder->tuple_map); - free_conversion_map(rri_holder->tuple_map); - } + destroy_tuple_map(rri_holder->tuple_map_child); /* Don't forget to close 'prel'! */ if (rri_holder->prel) @@ -381,6 +377,13 @@ scan_result_parts_storage(ResultPartsStorage *parts_storage, Oid partid) */ rri_holder->tuple_map = build_part_tuple_map(base_rel, child_rel); + /* + * Field for child->child tuple transformation map. We need to + * convert tuples because child TupleDesc might have extra + * columns ('ctid' etc.) and need remove them. + */ + rri_holder->tuple_map_child = NULL; + /* Default values */ rri_holder->prel = NULL; rri_holder->prel_expr_state = NULL; @@ -468,6 +471,73 @@ build_part_tuple_map(Relation base_rel, Relation child_rel) return tuple_map; } +/* + * Build tuple conversion map (e.g. partition tuple has extra column(s)). + * We create a special tuple map (tuple_map_child), which, when applied to the + * tuple of partition, translates the tuple attributes into the tuple + * attributes of the same partition, discarding service attributes like "ctid" + * (i.e. working like junkFilter). + */ +TupleConversionMap * +build_part_tuple_map_child(Relation child_rel) +{ + TupleConversionMap *tuple_map; + TupleDesc child_tupdesc1; + TupleDesc child_tupdesc2; + int n; +#if PG_VERSION_NUM >= 130000 + AttrMap *attrMap; +#else + AttrNumber *attrMap; +#endif + + child_tupdesc1 = CreateTupleDescCopy(RelationGetDescr(child_rel)); + child_tupdesc1->tdtypeid = InvalidOid; + + child_tupdesc2 = CreateTupleDescCopy(RelationGetDescr(child_rel)); + child_tupdesc2->tdtypeid = InvalidOid; + + /* Generate tuple transformation map */ +#if PG_VERSION_NUM >= 130000 + attrMap = build_attrmap_by_name(child_tupdesc1, child_tupdesc2); +#else + attrMap = convert_tuples_by_name_map(child_tupdesc1, child_tupdesc2, + ERR_PART_DESC_CONVERT); +#endif + + /* Prepare the map structure */ + tuple_map = (TupleConversionMap *) palloc(sizeof(TupleConversionMap)); + tuple_map->indesc = child_tupdesc1; + tuple_map->outdesc = child_tupdesc2; + tuple_map->attrMap = attrMap; + + /* preallocate workspace for Datum arrays */ + n = child_tupdesc1->natts; + tuple_map->outvalues = (Datum *) palloc(n * sizeof(Datum)); + tuple_map->outisnull = (bool *) palloc(n * sizeof(bool)); + + n = child_tupdesc1->natts + 1; /* +1 for NULL */ + tuple_map->invalues = (Datum *) palloc(n * sizeof(Datum)); + tuple_map->inisnull = (bool *) palloc(n * sizeof(bool)); + + tuple_map->invalues[0] = (Datum) 0; /* set up the NULL entry */ + tuple_map->inisnull[0] = true; + + return tuple_map; +} + +/* Destroy tuple conversion map */ +void +destroy_tuple_map(TupleConversionMap *tuple_map) +{ + if (tuple_map) + { + FreeTupleDesc(tuple_map->indesc); + FreeTupleDesc(tuple_map->outdesc); + + free_conversion_map(tuple_map); + } +} /* * ----------------------------------- @@ -812,6 +882,7 @@ partition_filter_exec(CustomScanState *node) MemoryContext old_mcxt; ResultRelInfoHolder *rri_holder; ResultRelInfo *rri; + JunkFilter *junkfilter = NULL; #if PG_VERSION_NUM >= 140000 PartitionRouterState *pr_state = linitial(node->custom_ps); @@ -827,6 +898,8 @@ partition_filter_exec(CustomScanState *node) */ reint_partition_filter_state(state, pr_state->current_rri); } +#else + junkfilter = estate->es_result_relation_info->ri_junkFilter; #endif /* Switch to per-tuple context */ @@ -844,18 +917,58 @@ partition_filter_exec(CustomScanState *node) /* Magic: replace parent's ResultRelInfo with ours */ estate->es_result_relation_info = rri; + /* + * Besides 'transform map' we should process two cases: + * 1) CMD_UPDATE, row moved to other partition, junkfilter == NULL + * (filled in router_set_slot() for SELECT + INSERT); + * we should clear attribute 'ctid' (do not insert it into database); + * 2) CMD_INSERT/CMD_UPDATE operations for partitions with deleted column(s), + * junkfilter == NULL. + */ /* If there's a transform map, rebuild the tuple */ - if (rri_holder->tuple_map) + if (rri_holder->tuple_map || + (!junkfilter && + (state->command_type == CMD_INSERT || state->command_type == CMD_UPDATE) && + (slot->tts_tupleDescriptor->natts > rri->ri_RelationDesc->rd_att->natts /* extra fields */ +#if PG_VERSION_NUM < 120000 + /* + * If we have a regular physical tuple 'slot->tts_tuple' and + * it's locally palloc'd => we will use this tuple in + * ExecMaterializeSlot() instead of materialize the slot, so + * need to check number of attributes for this tuple: + */ + || (slot->tts_tuple && slot->tts_shouldFree && + HeapTupleHeaderGetNatts(slot->tts_tuple->t_data) > + rri->ri_RelationDesc->rd_att->natts /* extra fields */) +#endif + ))) { - Relation child_rel = rri->ri_RelationDesc; - - /* xxx why old code decided to materialize it? */ #if PG_VERSION_NUM < 120000 HeapTuple htup_old, htup_new; +#endif + Relation child_rel = rri->ri_RelationDesc; + TupleConversionMap *tuple_map; + if (rri_holder->tuple_map) + tuple_map = rri_holder->tuple_map; + else + { + if (!rri_holder->tuple_map_child) + { /* + * Generate child->child tuple transformation map. We need to + * convert tuples because child TupleDesc has extra + * columns ('ctid' etc.) and need remove them. + */ + rri_holder->tuple_map_child = build_part_tuple_map_child(child_rel); + } + tuple_map = rri_holder->tuple_map_child; + } + + /* xxx why old code decided to materialize it? */ +#if PG_VERSION_NUM < 120000 htup_old = ExecMaterializeSlot(slot); - htup_new = do_convert_tuple(htup_old, rri_holder->tuple_map); + htup_new = do_convert_tuple(htup_old, tuple_map); ExecClearTuple(slot); #endif @@ -872,7 +985,7 @@ partition_filter_exec(CustomScanState *node) /* TODO: why should we *always* set a new slot descriptor? */ ExecSetSlotDescriptor(state->tup_convert_slot, RelationGetDescr(child_rel)); #if PG_VERSION_NUM >= 120000 - slot = execute_attr_map_slot(rri_holder->tuple_map->attrMap, slot, state->tup_convert_slot); + slot = execute_attr_map_slot(tuple_map->attrMap, slot, state->tup_convert_slot); #else slot = ExecStoreTuple(htup_new, state->tup_convert_slot, InvalidBuffer, true); #endif