static ExecutorFinish_hook_type prev_ExecutorFinish = NULL;
static ExecutorEnd_hook_type prev_ExecutorEnd = NULL;
-static void explain_ExecutorStart(QueryDesc *queryDesc, int eflags);
+static bool explain_ExecutorStart(QueryDesc *queryDesc, int eflags);
static void explain_ExecutorRun(QueryDesc *queryDesc,
ScanDirection direction,
uint64 count);
/*
* ExecutorStart hook: start up logging if needed
*/
-static void
+static bool
explain_ExecutorStart(QueryDesc *queryDesc, int eflags)
{
+ bool plan_valid;
+
/*
* At the beginning of each top-level statement, decide whether we'll
* sample this statement. If nested-statement explaining is enabled,
}
if (prev_ExecutorStart)
- prev_ExecutorStart(queryDesc, eflags);
+ plan_valid = prev_ExecutorStart(queryDesc, eflags);
else
- standard_ExecutorStart(queryDesc, eflags);
+ plan_valid = standard_ExecutorStart(queryDesc, eflags);
+
+ /* The plan may have become invalid during standard_ExecutorStart() */
+ if (!plan_valid)
+ return false;
if (auto_explain_enabled())
{
MemoryContextSwitchTo(oldcxt);
}
}
+
+ return true;
}
/*
const char *query_string,
int cursorOptions,
ParamListInfo boundParams);
-static void pgss_ExecutorStart(QueryDesc *queryDesc, int eflags);
+static bool pgss_ExecutorStart(QueryDesc *queryDesc, int eflags);
static void pgss_ExecutorRun(QueryDesc *queryDesc,
ScanDirection direction,
uint64 count);
/*
* ExecutorStart hook: start up tracking if needed
*/
-static void
+static bool
pgss_ExecutorStart(QueryDesc *queryDesc, int eflags)
{
+ bool plan_valid;
+
if (prev_ExecutorStart)
- prev_ExecutorStart(queryDesc, eflags);
+ plan_valid = prev_ExecutorStart(queryDesc, eflags);
else
- standard_ExecutorStart(queryDesc, eflags);
+ plan_valid = standard_ExecutorStart(queryDesc, eflags);
+
+ /* The plan may have become invalid during standard_ExecutorStart() */
+ if (!plan_valid)
+ return false;
/*
* If query has queryId zero, don't track it. This prevents double
MemoryContextSwitchTo(oldcxt);
}
}
+
+ return true;
}
/*
((DR_copy *) dest)->cstate = cstate;
/* Create a QueryDesc requesting no output */
- cstate->queryDesc = CreateQueryDesc(plan, pstate->p_sourcetext,
+ cstate->queryDesc = CreateQueryDesc(plan, NULL, pstate->p_sourcetext,
GetActiveSnapshot(),
InvalidSnapshot,
dest, NULL, NULL, 0);
*
* ExecutorStart computes a result tupdesc for us
*/
- ExecutorStart(cstate->queryDesc, 0);
+ if (!ExecutorStart(cstate->queryDesc, 0))
+ elog(ERROR, "ExecutorStart() failed unexpectedly");
tupDesc = cstate->queryDesc->tupDesc;
}
UpdateActiveSnapshotCommandId();
/* Create a QueryDesc, redirecting output to our tuple receiver */
- queryDesc = CreateQueryDesc(plan, pstate->p_sourcetext,
+ queryDesc = CreateQueryDesc(plan, NULL, pstate->p_sourcetext,
GetActiveSnapshot(), InvalidSnapshot,
dest, params, queryEnv, 0);
/* call ExecutorStart to prepare the plan for execution */
- ExecutorStart(queryDesc, GetIntoRelEFlags(into));
+ if (!ExecutorStart(queryDesc, GetIntoRelEFlags(into)))
+ elog(ERROR, "ExecutorStart() failed unexpectedly");
/* run the plan to completion */
ExecutorRun(queryDesc, ForwardScanDirection, 0);
}
/* run it (if needed) and produce output */
- ExplainOnePlan(plan, into, es, queryString, params, queryEnv,
+ ExplainOnePlan(plan, NULL, NULL, -1, into, es, queryString, params,
+ queryEnv,
&planduration, (es->buffers ? &bufusage : NULL),
es->memory ? &mem_counters : NULL);
}
* to call it.
*/
void
-ExplainOnePlan(PlannedStmt *plannedstmt, IntoClause *into, ExplainState *es,
+ExplainOnePlan(PlannedStmt *plannedstmt, CachedPlan *cplan,
+ CachedPlanSource *plansource, int query_index,
+ IntoClause *into, ExplainState *es,
const char *queryString, ParamListInfo params,
QueryEnvironment *queryEnv, const instr_time *planduration,
const BufferUsage *bufusage,
dest = None_Receiver;
/* Create a QueryDesc for the query */
- queryDesc = CreateQueryDesc(plannedstmt, queryString,
+ queryDesc = CreateQueryDesc(plannedstmt, cplan, queryString,
GetActiveSnapshot(), InvalidSnapshot,
dest, params, queryEnv, instrument_option);
if (into)
eflags |= GetIntoRelEFlags(into);
- /* call ExecutorStart to prepare the plan for execution */
- ExecutorStart(queryDesc, eflags);
+ /* Prepare the plan for execution. */
+ if (queryDesc->cplan)
+ {
+ ExecutorStartCachedPlan(queryDesc, eflags, plansource, query_index);
+ Assert(queryDesc->planstate);
+ }
+ else
+ {
+ if (!ExecutorStart(queryDesc, eflags))
+ elog(ERROR, "ExecutorStart() failed unexpectedly");
+ }
/* Execute the plan for statistics if asked for */
if (es->analyze)
QueryDesc *qdesc;
qdesc = CreateQueryDesc(stmt,
+ NULL,
sql,
GetActiveSnapshot(), NULL,
dest, NULL, NULL, 0);
- ExecutorStart(qdesc, 0);
+ if (!ExecutorStart(qdesc, 0))
+ elog(ERROR, "ExecutorStart() failed unexpectedly");
ExecutorRun(qdesc, ForwardScanDirection, 0);
ExecutorFinish(qdesc);
ExecutorEnd(qdesc);
UpdateActiveSnapshotCommandId();
/* Create a QueryDesc, redirecting output to our tuple receiver */
- queryDesc = CreateQueryDesc(plan, queryString,
+ queryDesc = CreateQueryDesc(plan, NULL, queryString,
GetActiveSnapshot(), InvalidSnapshot,
dest, NULL, NULL, 0);
/* call ExecutorStart to prepare the plan for execution */
- ExecutorStart(queryDesc, 0);
+ if (!ExecutorStart(queryDesc, 0))
+ elog(ERROR, "ExecutorStart() failed unexpectedly");
/* run the plan */
ExecutorRun(queryDesc, ForwardScanDirection, 0);
queryString,
CMDTAG_SELECT, /* cursor's query is always a SELECT */
list_make1(plan),
+ NULL,
NULL);
/*----------
query_string,
entry->plansource->commandTag,
plan_list,
- cplan);
+ cplan,
+ entry->plansource);
/*
* For CREATE TABLE ... AS EXECUTE, we must verify that the prepared
MemoryContextCounters mem_counters;
MemoryContext planner_ctx = NULL;
MemoryContext saved_ctx = NULL;
+ int query_index = 0;
if (es->memory)
{
PlannedStmt *pstmt = lfirst_node(PlannedStmt, p);
if (pstmt->commandType != CMD_UTILITY)
- ExplainOnePlan(pstmt, into, es, query_string, paramLI, pstate->p_queryEnv,
+ ExplainOnePlan(pstmt, cplan, entry->plansource, query_index,
+ into, es, query_string, paramLI, pstate->p_queryEnv,
&planduration, (es->buffers ? &bufusage : NULL),
es->memory ? &mem_counters : NULL);
else
/* Separate plans with an appropriate separator */
if (lnext(plan_list, p) != NULL)
ExplainSeparatePlans(es);
+
+ query_index++;
}
if (estate)
}
+/* ----------
+ * AfterTriggerAbortQuery()
+ *
+ * Called by standard_ExecutorEnd() if the query execution was aborted due to
+ * the plan becoming invalid during initialization.
+ * ----------
+ */
+void
+AfterTriggerAbortQuery(void)
+{
+ /* Revert the actions of AfterTriggerBeginQuery(). */
+ afterTriggers.query_depth--;
+}
+
+
/* ----------
* AfterTriggerEndQuery()
*
associated with ExprContexts, and commonly each PlanState node has its own
ExprContext to evaluate its qual and targetlist expressions in.
+Relation Locking
+----------------
+
+When the executor initializes a plan tree for execution, it doesn't lock
+non-index relations if the plan tree is freshly generated and not derived
+from a CachedPlan. This is because such locks have already been established
+during the query's parsing, rewriting, and planning phases. However, with a
+cached plan tree, some relations may remain unlocked. The function
+AcquireExecutorLocks() only locks unprunable relations in the plan, deferring
+the locking of prunable ones to executor initialization. This avoids
+unnecessary locking of relations that will be pruned during "initial" runtime
+pruning in ExecDoInitialPruning().
+
+This approach creates a window where a cached plan tree with child tables
+could become outdated if another backend modifies these tables before
+ExecDoInitialPruning() locks them. As a result, the executor has the added duty
+to verify the plan tree's validity whenever it locks a child table after
+doing initial pruning. This validation is done by checking the CachedPlan.is_valid
+flag. If the plan tree is outdated (is_valid = false), the executor stops
+further initialization, cleans up anything in EState that would have been
+allocated up to that point, and retries execution after recreating the
+invalid plan in the CachedPlan. See ExecutorStartCachedPlan().
Query Processing Control Flow
-----------------------------
CreateQueryDesc
- ExecutorStart
+ ExecutorStart or ExecutorStartCachedPlan
CreateExecutorState
creates per-query context
- switch to per-query context to run ExecInitNode
+ switch to per-query context to run ExecDoInitialPruning and ExecInitNode
AfterTriggerBeginQuery
+ ExecDoInitialPruning
+ does initial pruning and locks surviving partitions if needed
ExecInitNode --- recursively scans plan tree
ExecInitNode
recurse into subsidiary nodes
FreeQueryDesc
-Per above comments, it's not really critical for ExecEndNode to free any
+As mentioned in the "Relation Locking" section, if the plan tree is found to
+be stale after locking partitions in ExecDoInitialPruning(), the control is
+immediately returned to ExecutorStartCachedPlan(), which will create a new plan
+tree and perform the steps starting from CreateExecutorState() again.
+
+Per above comments, it's not really critical for ExecEndPlan to free any
memory; it'll all go away in FreeExecutorState anyway. However, we do need to
be careful to close relations, drop buffer pins, etc, so we do need to scan
the plan state tree to find these sorts of resources.
#include "parser/parse_relation.h"
#include "pgstat.h"
#include "rewrite/rewriteHandler.h"
+#include "storage/lmgr.h"
#include "tcop/utility.h"
#include "utils/acl.h"
#include "utils/backend_status.h"
#include "utils/lsyscache.h"
#include "utils/partcache.h"
+#include "utils/plancache.h"
#include "utils/rls.h"
#include "utils/snapmgr.h"
* get control when ExecutorStart is called. Such a plugin would
* normally call standard_ExecutorStart().
*
+ * Return value indicates if the plan has been initialized successfully so
+ * that queryDesc->planstate contains a valid PlanState tree. It may not
+ * if the plan got invalidated during InitPlan().
* ----------------------------------------------------------------
*/
-void
+bool
ExecutorStart(QueryDesc *queryDesc, int eflags)
{
+ bool plan_valid;
+
/*
* In some cases (e.g. an EXECUTE statement or an execute message with the
* extended query protocol) the query_id won't be reported, so do it now.
pgstat_report_query_id(queryDesc->plannedstmt->queryId, false);
if (ExecutorStart_hook)
- (*ExecutorStart_hook) (queryDesc, eflags);
+ plan_valid = (*ExecutorStart_hook) (queryDesc, eflags);
else
- standard_ExecutorStart(queryDesc, eflags);
+ plan_valid = standard_ExecutorStart(queryDesc, eflags);
+
+ return plan_valid;
}
-void
+bool
standard_ExecutorStart(QueryDesc *queryDesc, int eflags)
{
EState *estate;
InitPlan(queryDesc, eflags);
MemoryContextSwitchTo(oldcontext);
+
+ return ExecPlanStillValid(queryDesc->estate);
+}
+
+/*
+ * ExecutorStartCachedPlan
+ * Start execution for a given query in the CachedPlanSource, replanning
+ * if the plan is invalidated due to deferred locks taken during the
+ * plan's initialization
+ *
+ * This function handles cases where the CachedPlan given in queryDesc->cplan
+ * might become invalid during the initialization of the plan given in
+ * queryDesc->plannedstmt, particularly when prunable relations in it are
+ * locked after performing initial pruning. If the locks invalidate the plan,
+ * the function calls UpdateCachedPlan() to replan all queries in the
+ * CachedPlan, and then retries initialization.
+ *
+ * The function repeats the process until ExecutorStart() successfully
+ * initializes the plan, that is without the CachedPlan becoming invalid.
+ */
+void
+ExecutorStartCachedPlan(QueryDesc *queryDesc, int eflags,
+ CachedPlanSource *plansource,
+ int query_index)
+{
+ if (unlikely(queryDesc->cplan == NULL))
+ elog(ERROR, "ExecutorStartCachedPlan(): missing CachedPlan");
+ if (unlikely(plansource == NULL))
+ elog(ERROR, "ExecutorStartCachedPlan(): missing CachedPlanSource");
+
+ /*
+ * Loop and retry with an updated plan until no further invalidation
+ * occurs.
+ */
+ while (1)
+ {
+ if (!ExecutorStart(queryDesc, eflags))
+ {
+ /*
+ * Clean up the current execution state before creating the new
+ * plan to retry ExecutorStart(). Mark execution as aborted to
+ * ensure that AFTER trigger state is properly reset.
+ */
+ queryDesc->estate->es_aborted = true;
+ ExecutorEnd(queryDesc);
+
+ /* Retry ExecutorStart() with an updated plan tree. */
+ queryDesc->plannedstmt = UpdateCachedPlan(plansource, query_index,
+ queryDesc->queryEnv);
+ }
+ else
+
+ /*
+ * Exit the loop if the plan is initialized successfully and no
+ * sinval messages were received that invalidated the CachedPlan.
+ */
+ break;
+ }
}
/* ----------------------------------------------------------------
estate = queryDesc->estate;
Assert(estate != NULL);
+ Assert(!estate->es_aborted);
Assert(!(estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
/* caller must ensure the query's snapshot is active */
Assert(estate != NULL);
Assert(!(estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
- /* This should be run once and only once per Executor instance */
- Assert(!estate->es_finished);
+ /*
+ * This should be run once and only once per Executor instance and never
+ * if the execution was aborted.
+ */
+ Assert(!estate->es_finished && !estate->es_aborted);
/* Switch into per-query memory context */
oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
(PgStat_Counter) estate->es_parallel_workers_launched);
/*
- * Check that ExecutorFinish was called, unless in EXPLAIN-only mode. This
- * Assert is needed because ExecutorFinish is new as of 9.1, and callers
- * might forget to call it.
+ * Check that ExecutorFinish was called, unless in EXPLAIN-only mode or if
+ * execution was aborted.
*/
- Assert(estate->es_finished ||
+ Assert(estate->es_finished || estate->es_aborted ||
(estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
/*
UnregisterSnapshot(estate->es_snapshot);
UnregisterSnapshot(estate->es_crosscheck_snapshot);
+ /*
+ * Reset AFTER trigger module if the query execution was aborted.
+ */
+ if (estate->es_aborted &&
+ !(estate->es_top_eflags &
+ (EXEC_FLAG_SKIP_TRIGGERS | EXEC_FLAG_EXPLAIN_ONLY)))
+ AfterTriggerAbortQuery();
+
/*
* Must switch out of context before destroying it
*/
(rte->rtekind == RTE_SUBQUERY &&
rte->relkind == RELKIND_VIEW));
+ /*
+ * Ensure that we have at least an AccessShareLock on relations
+ * whose permissions need to be checked.
+ *
+ * Skip this check in a parallel worker because locks won't be
+ * taken until ExecInitNode() performs plan initialization.
+ *
+ * XXX: ExecCheckPermissions() in a parallel worker may be
+ * redundant with the checks done in the leader process, so this
+ * should be reviewed to ensure it’s necessary.
+ */
+ Assert(IsParallelWorker() ||
+ CheckRelationOidLockedByMe(rte->relid, AccessShareLock,
+ true));
+
(void) getRTEPermissionInfo(rteperminfos, rte);
/* Many-to-one mapping not allowed */
Assert(!bms_is_member(rte->perminfoindex, indexset));
*
* Initializes the query plan: open files, allocate storage
* and start up the rule manager
+ *
+ * If the plan originates from a CachedPlan (given in queryDesc->cplan),
+ * it can become invalid during runtime "initial" pruning when the
+ * remaining set of locks is taken. The function returns early in that
+ * case without initializing the plan, and the caller is expected to
+ * retry with a new valid plan.
* ----------------------------------------------------------------
*/
static void
{
CmdType operation = queryDesc->operation;
PlannedStmt *plannedstmt = queryDesc->plannedstmt;
+ CachedPlan *cachedplan = queryDesc->cplan;
Plan *plan = plannedstmt->planTree;
List *rangeTable = plannedstmt->rtable;
EState *estate = queryDesc->estate;
bms_copy(plannedstmt->unprunableRelids));
estate->es_plannedstmt = plannedstmt;
+ estate->es_cachedplan = cachedplan;
estate->es_part_prune_infos = plannedstmt->partPruneInfos;
/*
*/
ExecDoInitialPruning(estate);
+ if (!ExecPlanStillValid(estate))
+ return;
+
/*
* Next, build the ExecRowMark array from the PlanRowMark(s), if any.
*/
* the snapshot, rangetable, and external Param info. They need their own
* copies of local state, including a tuple table, es_param_exec_vals,
* result-rel info, etc.
+ *
+ * es_cachedplan is not copied because EPQ plan execution does not acquire
+ * any new locks that could invalidate the CachedPlan.
*/
rcestate->es_direction = ForwardScanDirection;
rcestate->es_snapshot = parentestate->es_snapshot;
paramspace = shm_toc_lookup(toc, PARALLEL_KEY_PARAMLISTINFO, false);
paramLI = RestoreParamList(¶mspace);
- /* Create a QueryDesc for the query. */
+ /*
+ * Create a QueryDesc for the query. We pass NULL for cachedplan, because
+ * we don't have a pointer to the CachedPlan in the leader's process. It's
+ * fine because the only reason the executor needs to see it is to decide
+ * if it should take locks on certain relations, but parallel workers
+ * always take locks anyway.
+ */
return CreateQueryDesc(pstmt,
+ NULL,
queryString,
GetActiveSnapshot(), InvalidSnapshot,
receiver, paramLI, NULL, instrument_options);
/* Start up the executor */
queryDesc->plannedstmt->jitFlags = fpes->jit_flags;
- ExecutorStart(queryDesc, fpes->eflags);
+ if (!ExecutorStart(queryDesc, fpes->eflags))
+ elog(ERROR, "ExecutorStart() failed unexpectedly");
/* Special executor initialization steps for parallel workers */
queryDesc->planstate->state->es_query_dsa = area;
#include "partitioning/partdesc.h"
#include "partitioning/partprune.h"
#include "rewrite/rewriteManip.h"
+#include "storage/lmgr.h"
#include "utils/acl.h"
#include "utils/lsyscache.h"
#include "utils/partcache.h"
* ExecDoInitialPruning:
* Perform runtime "initial" pruning, if necessary, to determine the set
* of child subnodes that need to be initialized during ExecInitNode() for
- * all plan nodes that contain a PartitionPruneInfo.
+ * all plan nodes that contain a PartitionPruneInfo. This also locks the
+ * leaf partitions whose subnodes will be initialized if needed.
*
* ExecInitPartitionExecPruning:
* Updates the PartitionPruneState found at given part_prune_index in
*-------------------------------------------------------------------------
*/
+
/*
* ExecDoInitialPruning
* Perform runtime "initial" pruning, if necessary, to determine the set
* of child subnodes that need to be initialized during ExecInitNode() for
- * plan nodes that support partition pruning.
+ * plan nodes that support partition pruning. This also locks the leaf
+ * partitions whose subnodes will be initialized if needed.
*
* This function iterates over each PartitionPruneInfo entry in
* estate->es_part_prune_infos. For each entry, it creates a PartitionPruneState
ExecDoInitialPruning(EState *estate)
{
ListCell *lc;
+ List *locked_relids = NIL;
foreach(lc, estate->es_part_prune_infos)
{
else
validsubplan_rtis = all_leafpart_rtis;
+ if (ExecShouldLockRelations(estate))
+ {
+ int rtindex = -1;
+
+ while ((rtindex = bms_next_member(validsubplan_rtis,
+ rtindex)) >= 0)
+ {
+ RangeTblEntry *rte = exec_rt_fetch(rtindex, estate);
+
+ Assert(rte->rtekind == RTE_RELATION &&
+ rte->rellockmode != NoLock);
+ LockRelationOid(rte->relid, rte->rellockmode);
+ locked_relids = lappend_int(locked_relids, rtindex);
+ }
+ }
estate->es_unpruned_relids = bms_add_members(estate->es_unpruned_relids,
validsubplan_rtis);
estate->es_part_prune_results = lappend(estate->es_part_prune_results,
validsubplans);
}
+
+ /*
+ * Release the useless locks if the plan won't be executed. This is the
+ * same as what CheckCachedPlan() in plancache.c does.
+ */
+ if (!ExecPlanStillValid(estate))
+ {
+ foreach(lc, locked_relids)
+ {
+ RangeTblEntry *rte = exec_rt_fetch(lfirst_int(lc), estate);
+
+ UnlockRelationOid(rte->relid, rte->rellockmode);
+ }
+ }
}
/*
estate->es_top_eflags = 0;
estate->es_instrument = 0;
estate->es_finished = false;
+ estate->es_aborted = false;
estate->es_exprcontexts = NIL;
* Open the Relation for a range table entry, if not already done
*
* The Relations will be closed in ExecEndPlan().
+ *
+ * Note: The caller must ensure that 'rti' refers to an unpruned relation
+ * (i.e., it is a member of estate->es_unpruned_relids) before calling this
+ * function. Attempting to open a pruned relation will result in an error.
*/
Relation
ExecGetRangeTableRelation(EState *estate, Index rti)
Assert(rti > 0 && rti <= estate->es_range_table_size);
+ if (!bms_is_member(rti, estate->es_unpruned_relids))
+ elog(ERROR, "trying to open a pruned relation");
+
rel = estate->es_relations[rti - 1];
if (rel == NULL)
{
dest = None_Receiver;
es->qd = CreateQueryDesc(es->stmt,
+ NULL,
fcache->src,
GetActiveSnapshot(),
InvalidSnapshot,
eflags = EXEC_FLAG_SKIP_TRIGGERS;
else
eflags = 0; /* default run-to-completion flags */
- ExecutorStart(es->qd, eflags);
+ if (!ExecutorStart(es->qd, eflags))
+ elog(ERROR, "ExecutorStart() failed unexpectedly");
}
es->status = F_EXEC_RUN;
static ParamListInfo _SPI_convert_params(int nargs, Oid *argtypes,
Datum *Values, const char *Nulls);
-static int _SPI_pquery(QueryDesc *queryDesc, bool fire_triggers, uint64 tcount);
+static int _SPI_pquery(QueryDesc *queryDesc, bool fire_triggers, uint64 tcount,
+ CachedPlanSource *plansource, int query_index);
static void _SPI_error_callback(void *arg);
query_string,
plansource->commandTag,
stmt_list,
- cplan);
+ cplan,
+ plansource);
/*
* Set up options for portal. Default SCROLL type is chosen the same way
CachedPlanSource *plansource = (CachedPlanSource *) lfirst(lc1);
List *stmt_list;
ListCell *lc2;
+ int query_index = 0;
spicallbackarg.query = plansource->query_string;
snap = InvalidSnapshot;
qdesc = CreateQueryDesc(stmt,
+ cplan,
plansource->query_string,
snap, crosscheck_snapshot,
dest,
options->params,
_SPI_current->queryEnv,
0);
- res = _SPI_pquery(qdesc, fire_triggers,
- canSetTag ? options->tcount : 0);
+
+ res = _SPI_pquery(qdesc, fire_triggers, canSetTag ? options->tcount : 0,
+ plansource, query_index);
FreeQueryDesc(qdesc);
}
else
my_res = res;
goto fail;
}
+
+ query_index++;
}
/* Done with this plan, so release refcount */
}
static int
-_SPI_pquery(QueryDesc *queryDesc, bool fire_triggers, uint64 tcount)
+_SPI_pquery(QueryDesc *queryDesc, bool fire_triggers, uint64 tcount,
+ CachedPlanSource *plansource, int query_index)
{
int operation = queryDesc->operation;
int eflags;
else
eflags = EXEC_FLAG_SKIP_TRIGGERS;
- ExecutorStart(queryDesc, eflags);
+ if (queryDesc->cplan)
+ {
+ ExecutorStartCachedPlan(queryDesc, eflags, plansource, query_index);
+ Assert(queryDesc->planstate);
+ }
+ else
+ {
+ if (!ExecutorStart(queryDesc, eflags))
+ elog(ERROR, "ExecutorStart() failed unexpectedly");
+ }
ExecutorRun(queryDesc, ForwardScanDirection, tcount);
query_string,
commandTag,
plantree_list,
+ NULL,
NULL);
/*
query_string,
psrc->commandTag,
cplan->stmt_list,
- cplan);
+ cplan,
+ psrc);
/* Done with the snapshot used for parameter I/O and parsing/planning */
if (snapshot_set)
#include "access/xact.h"
#include "commands/prepare.h"
+#include "executor/execdesc.h"
#include "executor/tstoreReceiver.h"
#include "miscadmin.h"
#include "pg_trace.h"
static void ProcessQuery(PlannedStmt *plan,
+ CachedPlan *cplan,
+ CachedPlanSource *plansource,
+ int query_index,
const char *sourceText,
ParamListInfo params,
QueryEnvironment *queryEnv,
*/
QueryDesc *
CreateQueryDesc(PlannedStmt *plannedstmt,
+ CachedPlan *cplan,
const char *sourceText,
Snapshot snapshot,
Snapshot crosscheck_snapshot,
qd->operation = plannedstmt->commandType; /* operation */
qd->plannedstmt = plannedstmt; /* plan */
+ qd->cplan = cplan; /* CachedPlan supplying the plannedstmt */
qd->sourceText = sourceText; /* query text */
qd->snapshot = RegisterSnapshot(snapshot); /* snapshot */
/* RI check snapshot */
* PORTAL_ONE_RETURNING, or PORTAL_ONE_MOD_WITH portal
*
* plan: the plan tree for the query
+ * cplan: CachedPlan supplying the plan
+ * plansource: CachedPlanSource supplying the cplan
+ * query_index: index of the query in plansource->query_list
* sourceText: the source text of the query
* params: any parameters needed
* dest: where to send results
*/
static void
ProcessQuery(PlannedStmt *plan,
+ CachedPlan *cplan,
+ CachedPlanSource *plansource,
+ int query_index,
const char *sourceText,
ParamListInfo params,
QueryEnvironment *queryEnv,
/*
* Create the QueryDesc object
*/
- queryDesc = CreateQueryDesc(plan, sourceText,
+ queryDesc = CreateQueryDesc(plan, cplan, sourceText,
GetActiveSnapshot(), InvalidSnapshot,
dest, params, queryEnv, 0);
/*
- * Call ExecutorStart to prepare the plan for execution
+ * Prepare the plan for execution
*/
- ExecutorStart(queryDesc, 0);
+ if (queryDesc->cplan)
+ {
+ ExecutorStartCachedPlan(queryDesc, 0, plansource, query_index);
+ Assert(queryDesc->planstate);
+ }
+ else
+ {
+ if (!ExecutorStart(queryDesc, 0))
+ elog(ERROR, "ExecutorStart() failed unexpectedly");
+ }
/*
* Run the plan to completion.
* the destination to DestNone.
*/
queryDesc = CreateQueryDesc(linitial_node(PlannedStmt, portal->stmts),
+ portal->cplan,
portal->sourceText,
GetActiveSnapshot(),
InvalidSnapshot,
myeflags = eflags;
/*
- * Call ExecutorStart to prepare the plan for execution
+ * Prepare the plan for execution.
*/
- ExecutorStart(queryDesc, myeflags);
+ if (portal->cplan)
+ {
+ ExecutorStartCachedPlan(queryDesc, myeflags,
+ portal->plansource, 0);
+ Assert(queryDesc->planstate);
+ }
+ else
+ {
+ if (!ExecutorStart(queryDesc, myeflags))
+ elog(ERROR, "ExecutorStart() failed unexpectedly");
+ }
/*
* This tells PortalCleanup to shut down the executor
{
bool active_snapshot_set = false;
ListCell *stmtlist_item;
+ int query_index = 0;
/*
* If the destination is DestRemoteExecute, change to DestNone. The
{
/* statement can set tag string */
ProcessQuery(pstmt,
+ portal->cplan,
+ portal->plansource,
+ query_index,
portal->sourceText,
portal->portalParams,
portal->queryEnv,
{
/* stmt added by rewrite cannot set tag */
ProcessQuery(pstmt,
+ portal->cplan,
+ portal->plansource,
+ query_index,
portal->sourceText,
portal->portalParams,
portal->queryEnv,
*/
if (lnext(portal->stmts, stmtlist_item) != NULL)
CommandCounterIncrement();
+
+ query_index++;
}
/* Pop the snapshot if we pushed one. */
static void ReleaseGenericPlan(CachedPlanSource *plansource);
static List *RevalidateCachedQuery(CachedPlanSource *plansource,
- QueryEnvironment *queryEnv);
+ QueryEnvironment *queryEnv,
+ bool release_generic);
static bool CheckCachedPlan(CachedPlanSource *plansource);
static CachedPlan *BuildCachedPlan(CachedPlanSource *plansource, List *qlist,
ParamListInfo boundParams, QueryEnvironment *queryEnv);
* The result value is the transient analyzed-and-rewritten query tree if we
* had to do re-analysis, and NIL otherwise. (This is returned just to save
* a tree copying step in a subsequent BuildCachedPlan call.)
+ *
+ * This also releases and drops the generic plan (plansource->gplan), if any,
+ * as most callers will typically build a new CachedPlan for the plansource
+ * right after this. However, when called from UpdateCachedPlan(), the
+ * function does not release the generic plan, as UpdateCachedPlan() updates
+ * an existing CachedPlan in place.
*/
static List *
RevalidateCachedQuery(CachedPlanSource *plansource,
- QueryEnvironment *queryEnv)
+ QueryEnvironment *queryEnv,
+ bool release_generic)
{
bool snapshot_set;
RawStmt *rawtree;
MemoryContextDelete(qcxt);
}
- /* Drop the generic plan reference if any */
- ReleaseGenericPlan(plansource);
+ /* Drop the generic plan reference, if any, and if requested */
+ if (release_generic)
+ ReleaseGenericPlan(plansource);
/*
* Now re-do parse analysis and rewrite. This not incidentally acquires
* Caller must have already called RevalidateCachedQuery to verify that the
* querytree is up to date.
*
- * On a "true" return, we have acquired the locks needed to run the plan.
- * (We must do this for the "true" result to be race-condition-free.)
+ * On a "true" return, we have acquired locks on the "unprunableRelids" set
+ * for all plans in plansource->stmt_list. However, the plans are not fully
+ * race-condition-free until the executor acquires locks on the prunable
+ * relations that survive initial runtime pruning during InitPlan().
*/
static bool
CheckCachedPlan(CachedPlanSource *plansource)
* Planning work is done in the caller's memory context. The finished plan
* is in a child memory context, which typically should get reparented
* (unless this is a one-shot plan, in which case we don't copy the plan).
+ *
+ * Note: When changing this, you should also look at UpdateCachedPlan().
*/
static CachedPlan *
BuildCachedPlan(CachedPlanSource *plansource, List *qlist,
bool snapshot_set;
bool is_transient;
MemoryContext plan_context;
+ MemoryContext stmt_context = NULL;
MemoryContext oldcxt = CurrentMemoryContext;
ListCell *lc;
* let's treat it as real and redo the RevalidateCachedQuery call.
*/
if (!plansource->is_valid)
- qlist = RevalidateCachedQuery(plansource, queryEnv);
+ qlist = RevalidateCachedQuery(plansource, queryEnv, true);
/*
* If we don't already have a copy of the querytree list that can be
PopActiveSnapshot();
/*
- * Normally we make a dedicated memory context for the CachedPlan and its
- * subsidiary data. (It's probably not going to be large, but just in
- * case, allow it to grow large. It's transient for the moment.) But for
- * a one-shot plan, we just leave it in the caller's memory context.
+ * Normally, we create a dedicated memory context for the CachedPlan and
+ * its subsidiary data. Although it's usually not very large, the context
+ * is designed to allow growth if necessary.
+ *
+ * The PlannedStmts are stored in a separate child context (stmt_context)
+ * of the CachedPlan's memory context. This separation allows
+ * UpdateCachedPlan() to free and replace the PlannedStmts without
+ * affecting the CachedPlan structure or its stmt_list List.
+ *
+ * For one-shot plans, we instead use the caller's memory context, as the
+ * CachedPlan will not persist. stmt_context will be set to NULL in this
+ * case, because UpdateCachedPlan() should never get called on a one-shot
+ * plan.
*/
if (!plansource->is_oneshot)
{
ALLOCSET_START_SMALL_SIZES);
MemoryContextCopyAndSetIdentifier(plan_context, plansource->query_string);
- /*
- * Copy plan into the new context.
- */
- MemoryContextSwitchTo(plan_context);
+ stmt_context = AllocSetContextCreate(CurrentMemoryContext,
+ "CachedPlan PlannedStmts",
+ ALLOCSET_START_SMALL_SIZES);
+ MemoryContextCopyAndSetIdentifier(stmt_context, plansource->query_string);
+ MemoryContextSetParent(stmt_context, plan_context);
+ MemoryContextSwitchTo(stmt_context);
plist = copyObject(plist);
+
+ MemoryContextSwitchTo(plan_context);
+ plist = list_copy(plist);
}
else
plan_context = CurrentMemoryContext;
plan->saved_xmin = InvalidTransactionId;
plan->refcount = 0;
plan->context = plan_context;
+ plan->stmt_context = stmt_context;
plan->is_oneshot = plansource->is_oneshot;
plan->is_saved = false;
+ plan->is_reused = false;
plan->is_valid = true;
/* assign generation number to new plan */
return plan;
}
+/*
+ * UpdateCachedPlan
+ * Create fresh plans for all queries in the CachedPlanSource, replacing
+ * those in the generic plan's stmt_list, and return the plan for the
+ * query_index'th query.
+ *
+ * This function is primarily used by ExecutorStartCachedPlan() to handle
+ * cases where the original generic CachedPlan becomes invalid. Such
+ * invalidation may occur when prunable relations in the old plan for the
+ * query_index'th query are locked in preparation for execution.
+ *
+ * Note that invalidations received during the execution of the query_index'th
+ * query can affect both the queries that have already finished execution
+ * (e.g., due to concurrent modifications on prunable relations that were not
+ * locked during their execution) and also the queries that have not yet been
+ * executed. As a result, this function updates all plans to ensure
+ * CachedPlan.is_valid is safely set to true.
+ *
+ * The old PlannedStmts in plansource->gplan->stmt_list are freed here, so
+ * the caller and any of its callers must not rely on them remaining accessible
+ * after this function is called.
+ */
+PlannedStmt *
+UpdateCachedPlan(CachedPlanSource *plansource, int query_index,
+ QueryEnvironment *queryEnv)
+{
+ List *query_list = plansource->query_list,
+ *plan_list;
+ ListCell *l1,
+ *l2;
+ CachedPlan *plan = plansource->gplan;
+ MemoryContext oldcxt;
+
+ Assert(ActiveSnapshotSet());
+
+ /* Sanity checks (XXX can be Asserts?) */
+ if (plan == NULL)
+ elog(ERROR, "UpdateCachedPlan() called in the wrong context: plansource->gplan is NULL");
+ else if (plan->is_valid)
+ elog(ERROR, "UpdateCachedPlan() called in the wrong context: plansource->gplan->is_valid is true");
+ else if (plan->is_oneshot)
+ elog(ERROR, "UpdateCachedPlan() called in the wrong context: plansource->gplan->is_oneshot is true");
+
+ /*
+ * The plansource might have become invalid since GetCachedPlan() returned
+ * the CachedPlan. See the comment in BuildCachedPlan() for details on why
+ * this might happen. Although invalidation is likely a false positive as
+ * stated there, we make the plan valid to ensure the query list used for
+ * planning is up to date.
+ *
+ * The risk of catching an invalidation is higher here than when
+ * BuildCachedPlan() is called from GetCachedPlan(), because this function
+ * is normally called long after GetCachedPlan() returns the CachedPlan,
+ * so much more processing could have occurred including things that mark
+ * the CachedPlanSource invalid.
+ *
+ * Note: Do not release plansource->gplan, because the upstream callers
+ * (such as the callers of ExecutorStartCachedPlan()) would still be
+ * referencing it.
+ */
+ if (!plansource->is_valid)
+ query_list = RevalidateCachedQuery(plansource, queryEnv, false);
+ Assert(query_list != NIL);
+
+ /*
+ * Build a new generic plan for all the queries after making a copy to be
+ * scribbled on by the planner.
+ */
+ query_list = copyObject(query_list);
+
+ /*
+ * Planning work is done in the caller's memory context. The resulting
+ * PlannedStmt is then copied into plan->stmt_context after throwing away
+ * the old ones.
+ */
+ plan_list = pg_plan_queries(query_list, plansource->query_string,
+ plansource->cursor_options, NULL);
+ Assert(list_length(plan_list) == list_length(plan->stmt_list));
+
+ MemoryContextReset(plan->stmt_context);
+ oldcxt = MemoryContextSwitchTo(plan->stmt_context);
+ forboth(l1, plan_list, l2, plan->stmt_list)
+ {
+ PlannedStmt *plannedstmt = lfirst(l1);
+
+ lfirst(l2) = copyObject(plannedstmt);
+ }
+ MemoryContextSwitchTo(oldcxt);
+
+ /*
+ * XXX Should this also (re)set the properties of the CachedPlan that are
+ * set in BuildCachedPlan() after creating the fresh plans such as
+ * planRoleId, dependsOnRole, and save_xmin?
+ */
+
+ /*
+ * We've updated all the plans that might have been invalidated, so mark
+ * the CachedPlan as valid.
+ */
+ plan->is_valid = true;
+
+ /* Also update generic_cost because we just created a new generic plan. */
+ plansource->generic_cost = cached_plan_cost(plan, false);
+
+ return list_nth_node(PlannedStmt, plan->stmt_list, query_index);
+}
+
/*
* choose_custom_plan: choose whether to use custom or generic plan
*
* plan or a custom plan for the given parameters: the caller does not know
* which it will get.
*
- * On return, the plan is valid and we have sufficient locks to begin
- * execution.
+ * On return, the plan is valid, but if it is a reused generic plan, not all
+ * locks are acquired. In such cases, CheckCachedPlan() does not take locks
+ * on relations subject to initial runtime pruning; instead, these locks are
+ * deferred until execution startup, when ExecDoInitialPruning() performs
+ * initial pruning. The plan's "is_reused" flag is set to indicate that
+ * CachedPlanRequiresLocking() should return true when called by
+ * ExecDoInitialPruning().
*
* On return, the refcount of the plan has been incremented; a later
* ReleaseCachedPlan() call is expected. If "owner" is not NULL then
elog(ERROR, "cannot apply ResourceOwner to non-saved cached plan");
/* Make sure the querytree list is valid and we have parse-time locks */
- qlist = RevalidateCachedQuery(plansource, queryEnv);
+ qlist = RevalidateCachedQuery(plansource, queryEnv, true);
/* Decide whether to use a custom plan */
customplan = choose_custom_plan(plansource, boundParams);
/* We want a generic plan, and we already have a valid one */
plan = plansource->gplan;
Assert(plan->magic == CACHEDPLAN_MAGIC);
+ /* Reusing the existing plan, so not all locks may be acquired. */
+ plan->is_reused = true;
}
else
{
return NIL;
/* Make sure the querytree list is valid and we have parse-time locks */
- RevalidateCachedQuery(plansource, queryEnv);
+ RevalidateCachedQuery(plansource, queryEnv, true);
/* Get the primary statement and find out what it returns */
pstmt = QueryListGetPrimaryStmt(plansource->query_list);
foreach(lc1, stmt_list)
{
PlannedStmt *plannedstmt = lfirst_node(PlannedStmt, lc1);
- ListCell *lc2;
+ int rtindex;
if (plannedstmt->commandType == CMD_UTILITY)
{
continue;
}
- foreach(lc2, plannedstmt->rtable)
+ rtindex = -1;
+ while ((rtindex = bms_next_member(plannedstmt->unprunableRelids,
+ rtindex)) >= 0)
{
- RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc2);
+ RangeTblEntry *rte = list_nth_node(RangeTblEntry,
+ plannedstmt->rtable,
+ rtindex - 1);
- if (!(rte->rtekind == RTE_RELATION ||
- (rte->rtekind == RTE_SUBQUERY && OidIsValid(rte->relid))))
- continue;
+ Assert(rte->rtekind == RTE_RELATION ||
+ (rte->rtekind == RTE_SUBQUERY && OidIsValid(rte->relid)));
/*
* Acquire the appropriate type of lock on each relation OID. Note
const char *sourceText,
CommandTag commandTag,
List *stmts,
- CachedPlan *cplan)
+ CachedPlan *cplan,
+ CachedPlanSource *plansource)
{
Assert(PortalIsValid(portal));
Assert(portal->status == PORTAL_NEW);
portal->commandTag = commandTag;
portal->stmts = stmts;
portal->cplan = cplan;
+ portal->plansource = plansource;
portal->status = PORTAL_DEFINED;
}
ExplainState *es, ParseState *pstate,
ParamListInfo params);
-extern void ExplainOnePlan(PlannedStmt *plannedstmt, IntoClause *into,
- ExplainState *es, const char *queryString,
+extern void ExplainOnePlan(PlannedStmt *plannedstmt, CachedPlan *cplan,
+ CachedPlanSource *plansource, int plan_index,
+ IntoClause *into, ExplainState *es,
+ const char *queryString,
ParamListInfo params, QueryEnvironment *queryEnv,
const instr_time *planduration,
const BufferUsage *bufusage,
extern void AfterTriggerBeginXact(void);
extern void AfterTriggerBeginQuery(void);
extern void AfterTriggerEndQuery(EState *estate);
+extern void AfterTriggerAbortQuery(void);
extern void AfterTriggerFireDeferred(void);
extern void AfterTriggerEndXact(bool isCommit);
extern void AfterTriggerBeginSubXact(void);
/* These fields are provided by CreateQueryDesc */
CmdType operation; /* CMD_SELECT, CMD_UPDATE, etc. */
PlannedStmt *plannedstmt; /* planner's output (could be utility, too) */
+ CachedPlan *cplan; /* CachedPlan that supplies the plannedstmt */
const char *sourceText; /* source text of the query */
Snapshot snapshot; /* snapshot to use for query */
Snapshot crosscheck_snapshot; /* crosscheck for RI update/delete */
/* in pquery.c */
extern QueryDesc *CreateQueryDesc(PlannedStmt *plannedstmt,
+ CachedPlan *cplan,
const char *sourceText,
Snapshot snapshot,
Snapshot crosscheck_snapshot,
#include "nodes/lockoptions.h"
#include "nodes/parsenodes.h"
#include "utils/memutils.h"
+#include "utils/plancache.h"
/*
/* Hook for plugins to get control in ExecutorStart() */
-typedef void (*ExecutorStart_hook_type) (QueryDesc *queryDesc, int eflags);
+typedef bool (*ExecutorStart_hook_type) (QueryDesc *queryDesc, int eflags);
extern PGDLLIMPORT ExecutorStart_hook_type ExecutorStart_hook;
/* Hook for plugins to get control in ExecutorRun() */
/*
* prototypes from functions in execMain.c
*/
-extern void ExecutorStart(QueryDesc *queryDesc, int eflags);
-extern void standard_ExecutorStart(QueryDesc *queryDesc, int eflags);
+extern bool ExecutorStart(QueryDesc *queryDesc, int eflags);
+extern void ExecutorStartCachedPlan(QueryDesc *queryDesc, int eflags,
+ CachedPlanSource *plansource,
+ int query_index);
+extern bool standard_ExecutorStart(QueryDesc *queryDesc, int eflags);
extern void ExecutorRun(QueryDesc *queryDesc,
ScanDirection direction, uint64 count);
extern void standard_ExecutorRun(QueryDesc *queryDesc,
extern void ExecShutdownNode(PlanState *node);
extern void ExecSetTupleBound(int64 tuples_needed, PlanState *child_node);
+/*
+ * Is the CachedPlan in es_cachedplan still valid?
+ *
+ * Called from InitPlan() because invalidation messages that affect the plan
+ * might be received after locks have been taken on runtime-prunable relations.
+ * The caller should take appropriate action if the plan has become invalid.
+ */
+static inline bool
+ExecPlanStillValid(EState *estate)
+{
+ return estate->es_cachedplan == NULL ? true :
+ CachedPlanValid(estate->es_cachedplan);
+}
+
+/*
+ * Locks are needed only if running a cached plan that might contain unlocked
+ * relations, such as a reused generic plan.
+ */
+static inline bool
+ExecShouldLockRelations(EState *estate)
+{
+ return estate->es_cachedplan == NULL ? false :
+ CachedPlanRequiresLocking(estate->es_cachedplan);
+}
/* ----------------------------------------------------------------
* ExecProcNode
#include "storage/condition_variable.h"
#include "utils/hsearch.h"
#include "utils/queryenvironment.h"
+#include "utils/plancache.h"
#include "utils/reltrigger.h"
#include "utils/sharedtuplestore.h"
#include "utils/snapshot.h"
* ExecRowMarks, or NULL if none */
List *es_rteperminfos; /* List of RTEPermissionInfo */
PlannedStmt *es_plannedstmt; /* link to top of plan tree */
+ CachedPlan *es_cachedplan; /* CachedPlan providing the plan tree */
List *es_part_prune_infos; /* List of PartitionPruneInfo */
List *es_part_prune_states; /* List of PartitionPruneState */
List *es_part_prune_results; /* List of Bitmapset */
int es_top_eflags; /* eflags passed to ExecutorStart */
int es_instrument; /* OR of InstrumentOption flags */
bool es_finished; /* true when ExecutorFinish is done */
+ bool es_aborted; /* true when execution was aborted */
List *es_exprcontexts; /* List of ExprContexts within EState */
#include "access/tupdesc.h"
#include "lib/ilist.h"
#include "nodes/params.h"
+#include "nodes/parsenodes.h"
+#include "nodes/plannodes.h"
#include "tcop/cmdtag.h"
#include "utils/queryenvironment.h"
#include "utils/resowner.h"
* The reference count includes both the link from the parent CachedPlanSource
* (if any), and any active plan executions, so the plan can be discarded
* exactly when refcount goes to zero. Both the struct itself and the
- * subsidiary data live in the context denoted by the context field.
- * This makes it easy to free a no-longer-needed cached plan. (However,
- * if is_oneshot is true, the context does not belong solely to the CachedPlan
- * so no freeing is possible.)
+ * subsidiary data, except the PlannedStmts in stmt_list live in the context
+ * denoted by the context field; the PlannedStmts live in the context denoted
+ * by stmt_context. Separate contexts makes it easy to free a no-longer-needed
+ * cached plan. (However, if is_oneshot is true, the context does not belong
+ * solely to the CachedPlan so no freeing is possible.)
*/
typedef struct CachedPlan
{
List *stmt_list; /* list of PlannedStmts */
bool is_oneshot; /* is it a "oneshot" plan? */
bool is_saved; /* is CachedPlan in a long-lived context? */
+ bool is_reused; /* is it a reused generic plan? */
bool is_valid; /* is the stmt_list currently valid? */
Oid planRoleId; /* Role ID the plan was created for */
bool dependsOnRole; /* is plan specific to that role? */
int generation; /* parent's generation number for this plan */
int refcount; /* count of live references to this struct */
MemoryContext context; /* context containing this CachedPlan */
+ MemoryContext stmt_context; /* context containing the PlannedStmts in
+ * stmt_list, but not the List itself which is
+ * in the above context; NULL if is_oneshot is
+ * true. */
} CachedPlan;
/*
ParamListInfo boundParams,
ResourceOwner owner,
QueryEnvironment *queryEnv);
+extern PlannedStmt *UpdateCachedPlan(CachedPlanSource *plansource,
+ int query_index,
+ QueryEnvironment *queryEnv);
+
extern void ReleaseCachedPlan(CachedPlan *plan, ResourceOwner owner);
extern bool CachedPlanAllowsSimpleValidityCheck(CachedPlanSource *plansource,
extern CachedExpression *GetCachedExpression(Node *expr);
extern void FreeCachedExpression(CachedExpression *cexpr);
+/*
+ * CachedPlanRequiresLocking: should the executor acquire additional locks?
+ *
+ * If the plan is a saved generic plan, the executor must acquire locks for
+ * relations that are not covered by AcquireExecutorLocks(), such as partitions
+ * that are subject to initial runtime pruning.
+ */
+static inline bool
+CachedPlanRequiresLocking(CachedPlan *cplan)
+{
+ return !cplan->is_oneshot && cplan->is_reused;
+}
+
+/*
+ * CachedPlanValid
+ * Returns whether a cached generic plan is still valid.
+ *
+ * Invoked by the executor to check if the plan has not been invalidated after
+ * taking locks during the initialization of the plan.
+ */
+static inline bool
+CachedPlanValid(CachedPlan *cplan)
+{
+ return cplan->is_valid;
+}
+
#endif /* PLANCACHE_H */
QueryCompletion qc; /* command completion data for executed query */
List *stmts; /* list of PlannedStmts */
CachedPlan *cplan; /* CachedPlan, if stmts are from one */
+ CachedPlanSource *plansource; /* CachedPlanSource, for cplan */
ParamListInfo portalParams; /* params to pass to query */
QueryEnvironment *queryEnv; /* environment for query */
const char *sourceText,
CommandTag commandTag,
List *stmts,
- CachedPlan *cplan);
+ CachedPlan *cplan,
+ CachedPlanSource *plansource);
extern PlannedStmt *PortalGetPrimaryStmt(Portal portal);
extern void PortalCreateHoldStore(Portal portal);
extern void PortalHashTableDeleteAll(void);
delay_execution.o
ISOLATION = partition-addition \
- partition-removal-1
+ partition-removal-1 \
+ cached-plan-inval
ifdef USE_PGXS
PG_CONFIG = pg_config
/*-------------------------------------------------------------------------
*
* delay_execution.c
- * Test module to allow delay between parsing and execution of a query.
+ * Test module to introduce delay at various points during execution of a
+ * query to test that execution proceeds safely in light of concurrent
+ * changes.
*
* The delay is implemented by taking and immediately releasing a specified
* advisory lock. If another process has previously taken that lock, the
* current process will be blocked until the lock is released; otherwise,
* there's no effect. This allows an isolationtester script to reliably
- * test behaviors where some specified action happens in another backend
- * between parsing and execution of any desired query.
+ * test behaviors where some specified action happens in another backend in
+ * a couple of cases: 1) between parsing and execution of any desired query
+ * when using the planner_hook, 2) between RevalidateCachedQuery() and
+ * ExecutorStart() when using the ExecutorStart_hook.
*
* Copyright (c) 2020-2025, PostgreSQL Global Development Group
*
#include <limits.h>
+#include "executor/executor.h"
#include "optimizer/planner.h"
#include "utils/fmgrprotos.h"
#include "utils/guc.h"
/* GUC: advisory lock ID to use. Zero disables the feature. */
static int post_planning_lock_id = 0;
+static int executor_start_lock_id = 0;
-/* Save previous planner hook user to be a good citizen */
+/* Save previous hook users to be a good citizen */
static planner_hook_type prev_planner_hook = NULL;
+static ExecutorStart_hook_type prev_ExecutorStart_hook = NULL;
/* planner_hook function to provide the desired delay */
return result;
}
+/* ExecutorStart_hook function to provide the desired delay */
+static bool
+delay_execution_ExecutorStart(QueryDesc *queryDesc, int eflags)
+{
+ bool plan_valid;
+
+ /* If enabled, delay by taking and releasing the specified lock */
+ if (executor_start_lock_id != 0)
+ {
+ DirectFunctionCall1(pg_advisory_lock_int8,
+ Int64GetDatum((int64) executor_start_lock_id));
+ DirectFunctionCall1(pg_advisory_unlock_int8,
+ Int64GetDatum((int64) executor_start_lock_id));
+
+ /*
+ * Ensure that we notice any pending invalidations, since the advisory
+ * lock functions don't do this.
+ */
+ AcceptInvalidationMessages();
+ }
+
+ /* Now start the executor, possibly via a previous hook user */
+ if (prev_ExecutorStart_hook)
+ plan_valid = prev_ExecutorStart_hook(queryDesc, eflags);
+ else
+ plan_valid = standard_ExecutorStart(queryDesc, eflags);
+
+ if (executor_start_lock_id != 0)
+ elog(NOTICE, "Finished ExecutorStart(): CachedPlan is %s",
+ plan_valid ? "valid" : "not valid");
+
+ return plan_valid;
+}
+
/* Module load function */
void
_PG_init(void)
{
- /* Set up the GUC to control which lock is used */
+ /* Set up GUCs to control which lock is used */
DefineCustomIntVariable("delay_execution.post_planning_lock_id",
"Sets the advisory lock ID to be locked/unlocked after planning.",
"Zero disables the delay.",
NULL,
NULL);
+ DefineCustomIntVariable("delay_execution.executor_start_lock_id",
+ "Sets the advisory lock ID to be locked/unlocked before starting execution.",
+ "Zero disables the delay.",
+ &executor_start_lock_id,
+ 0,
+ 0, INT_MAX,
+ PGC_USERSET,
+ 0,
+ NULL,
+ NULL,
+ NULL);
MarkGUCPrefixReserved("delay_execution");
- /* Install our hook */
+ /* Install our hooks. */
prev_planner_hook = planner_hook;
planner_hook = delay_execution_planner;
+ prev_ExecutorStart_hook = ExecutorStart_hook;
+ ExecutorStart_hook = delay_execution_ExecutorStart;
}
--- /dev/null
+Parsed test spec with 2 sessions
+
+starting permutation: s1prep s2lock s1exec s2dropi s2unlock
+step s1prep: SET plan_cache_mode = force_generic_plan;
+ PREPARE q AS SELECT * FROM foov WHERE a = $1 FOR UPDATE;
+ EXPLAIN (COSTS OFF) EXECUTE q (1);
+QUERY PLAN
+-----------------------------------------------------
+LockRows
+ -> Append
+ Subplans Removed: 2
+ -> Index Scan using foo1_1_a on foo1_1 foo_1
+ Index Cond: (a = $1)
+(5 rows)
+
+step s2lock: SELECT pg_advisory_lock(12345);
+pg_advisory_lock
+----------------
+
+(1 row)
+
+step s1exec: LOAD 'delay_execution';
+ SET delay_execution.executor_start_lock_id = 12345;
+ EXPLAIN (COSTS OFF) EXECUTE q (1); <waiting ...>
+step s2dropi: DROP INDEX foo1_1_a;
+step s2unlock: SELECT pg_advisory_unlock(12345);
+pg_advisory_unlock
+------------------
+t
+(1 row)
+
+step s1exec: <... completed>
+s1: NOTICE: Finished ExecutorStart(): CachedPlan is not valid
+s1: NOTICE: Finished ExecutorStart(): CachedPlan is valid
+QUERY PLAN
+------------------------------------
+LockRows
+ -> Append
+ Subplans Removed: 2
+ -> Seq Scan on foo1_1 foo_1
+ Filter: (a = $1)
+(5 rows)
+
+
+starting permutation: s1prep2 s2lock s1exec2 s2dropi s2unlock
+step s1prep2: SET plan_cache_mode = force_generic_plan;
+ PREPARE q2 AS SELECT * FROM foov WHERE a = one() or a = two();
+ EXPLAIN (COSTS OFF) EXECUTE q2;
+s1: NOTICE: Finished ExecutorStart(): CachedPlan is valid
+QUERY PLAN
+---------------------------------------------------
+Append
+ Subplans Removed: 1
+ -> Index Scan using foo1_1_a on foo1_1 foo_1
+ Index Cond: (a = ANY (ARRAY[one(), two()]))
+ -> Seq Scan on foo1_2 foo_2
+ Filter: ((a = one()) OR (a = two()))
+(6 rows)
+
+step s2lock: SELECT pg_advisory_lock(12345);
+pg_advisory_lock
+----------------
+
+(1 row)
+
+step s1exec2: LOAD 'delay_execution';
+ SET delay_execution.executor_start_lock_id = 12345;
+ EXPLAIN (COSTS OFF) EXECUTE q2; <waiting ...>
+step s2dropi: DROP INDEX foo1_1_a;
+step s2unlock: SELECT pg_advisory_unlock(12345);
+pg_advisory_unlock
+------------------
+t
+(1 row)
+
+step s1exec2: <... completed>
+s1: NOTICE: Finished ExecutorStart(): CachedPlan is not valid
+s1: NOTICE: Finished ExecutorStart(): CachedPlan is valid
+QUERY PLAN
+--------------------------------------------
+Append
+ Subplans Removed: 1
+ -> Seq Scan on foo1_1 foo_1
+ Filter: ((a = one()) OR (a = two()))
+ -> Seq Scan on foo1_2 foo_2
+ Filter: ((a = one()) OR (a = two()))
+(6 rows)
+
+
+starting permutation: s1prep3 s2lock s1exec3 s2dropi s2unlock
+step s1prep3: SET plan_cache_mode = force_generic_plan;
+ PREPARE q3 AS UPDATE foov SET a = a WHERE a = one() or a = two();
+ EXPLAIN (COSTS OFF) EXECUTE q3;
+s1: NOTICE: Finished ExecutorStart(): CachedPlan is valid
+s1: NOTICE: Finished ExecutorStart(): CachedPlan is valid
+s1: NOTICE: Finished ExecutorStart(): CachedPlan is valid
+QUERY PLAN
+---------------------------------------------------------------
+Nested Loop
+ -> Append
+ Subplans Removed: 1
+ -> Index Only Scan using foo1_1_a on foo1_1 foo_1
+ Index Cond: (a = ANY (ARRAY[one(), two()]))
+ -> Seq Scan on foo1_2 foo_2
+ Filter: ((a = one()) OR (a = two()))
+ -> Materialize
+ -> Append
+ Subplans Removed: 1
+ -> Seq Scan on bar1 bar_1
+ Filter: (a = one())
+
+Update on bar
+ Update on bar1 bar_1
+ -> Nested Loop
+ -> Append
+ Subplans Removed: 1
+ -> Index Scan using foo1_1_a on foo1_1 foo_1
+ Index Cond: (a = ANY (ARRAY[one(), two()]))
+ -> Seq Scan on foo1_2 foo_2
+ Filter: ((a = one()) OR (a = two()))
+ -> Materialize
+ -> Append
+ Subplans Removed: 1
+ -> Seq Scan on bar1 bar_1
+ Filter: (a = one())
+
+Update on foo
+ Update on foo1_1 foo_1
+ Update on foo1_2 foo_2
+ -> Append
+ Subplans Removed: 1
+ -> Index Scan using foo1_1_a on foo1_1 foo_1
+ Index Cond: (a = ANY (ARRAY[one(), two()]))
+ -> Seq Scan on foo1_2 foo_2
+ Filter: ((a = one()) OR (a = two()))
+(37 rows)
+
+step s2lock: SELECT pg_advisory_lock(12345);
+pg_advisory_lock
+----------------
+
+(1 row)
+
+step s1exec3: LOAD 'delay_execution';
+ SET delay_execution.executor_start_lock_id = 12345;
+ EXPLAIN (COSTS OFF) EXECUTE q3; <waiting ...>
+step s2dropi: DROP INDEX foo1_1_a;
+step s2unlock: SELECT pg_advisory_unlock(12345);
+pg_advisory_unlock
+------------------
+t
+(1 row)
+
+step s1exec3: <... completed>
+s1: NOTICE: Finished ExecutorStart(): CachedPlan is not valid
+s1: NOTICE: Finished ExecutorStart(): CachedPlan is valid
+s1: NOTICE: Finished ExecutorStart(): CachedPlan is valid
+s1: NOTICE: Finished ExecutorStart(): CachedPlan is valid
+QUERY PLAN
+--------------------------------------------------------
+Nested Loop
+ -> Append
+ Subplans Removed: 1
+ -> Seq Scan on foo1_1 foo_1
+ Filter: ((a = one()) OR (a = two()))
+ -> Seq Scan on foo1_2 foo_2
+ Filter: ((a = one()) OR (a = two()))
+ -> Materialize
+ -> Append
+ Subplans Removed: 1
+ -> Seq Scan on bar1 bar_1
+ Filter: (a = one())
+
+Update on bar
+ Update on bar1 bar_1
+ -> Nested Loop
+ -> Append
+ Subplans Removed: 1
+ -> Seq Scan on foo1_1 foo_1
+ Filter: ((a = one()) OR (a = two()))
+ -> Seq Scan on foo1_2 foo_2
+ Filter: ((a = one()) OR (a = two()))
+ -> Materialize
+ -> Append
+ Subplans Removed: 1
+ -> Seq Scan on bar1 bar_1
+ Filter: (a = one())
+
+Update on foo
+ Update on foo1_1 foo_1
+ Update on foo1_2 foo_2
+ -> Append
+ Subplans Removed: 1
+ -> Seq Scan on foo1_1 foo_1
+ Filter: ((a = one()) OR (a = two()))
+ -> Seq Scan on foo1_2 foo_2
+ Filter: ((a = one()) OR (a = two()))
+(37 rows)
+
+
+starting permutation: s1prep4 s2lock s1exec4 s2dropi s2unlock
+step s1prep4: SET plan_cache_mode = force_generic_plan;
+ PREPARE q4 AS SELECT * FROM generate_series(1, 1) WHERE EXISTS (SELECT * FROM foov WHERE a = $1 FOR UPDATE);
+ EXPLAIN (COSTS OFF) EXECUTE q4 (1);
+s1: NOTICE: Finished ExecutorStart(): CachedPlan is valid
+QUERY PLAN
+-------------------------------------------------------------
+Result
+ One-Time Filter: (InitPlan 1).col1
+ InitPlan 1
+ -> LockRows
+ -> Append
+ Subplans Removed: 2
+ -> Index Scan using foo1_1_a on foo1_1 foo_1
+ Index Cond: (a = $1)
+ -> Function Scan on generate_series
+(9 rows)
+
+step s2lock: SELECT pg_advisory_lock(12345);
+pg_advisory_lock
+----------------
+
+(1 row)
+
+step s1exec4: LOAD 'delay_execution';
+ SET delay_execution.executor_start_lock_id = 12345;
+ EXPLAIN (COSTS OFF) EXECUTE q4 (1); <waiting ...>
+step s2dropi: DROP INDEX foo1_1_a;
+step s2unlock: SELECT pg_advisory_unlock(12345);
+pg_advisory_unlock
+------------------
+t
+(1 row)
+
+step s1exec4: <... completed>
+s1: NOTICE: Finished ExecutorStart(): CachedPlan is not valid
+s1: NOTICE: Finished ExecutorStart(): CachedPlan is valid
+QUERY PLAN
+--------------------------------------------
+Result
+ One-Time Filter: (InitPlan 1).col1
+ InitPlan 1
+ -> LockRows
+ -> Append
+ Subplans Removed: 2
+ -> Seq Scan on foo1_1 foo_1
+ Filter: (a = $1)
+ -> Function Scan on generate_series
+(9 rows)
+
'specs': [
'partition-addition',
'partition-removal-1',
+ 'cached-plan-inval',
],
},
}
--- /dev/null
+# Test to check that invalidation of cached generic plans during ExecutorStart
+# is correctly detected causing an updated plan to be re-executed.
+
+setup
+{
+ CREATE TABLE foo (a int, b text) PARTITION BY RANGE (a);
+ CREATE TABLE foo1 PARTITION OF foo FOR VALUES FROM (MINVALUE) TO (3) PARTITION BY RANGE (a);
+ CREATE TABLE foo1_1 PARTITION OF foo1 FOR VALUES FROM (MINVALUE) TO (2);
+ CREATE TABLE foo1_2 PARTITION OF foo1 FOR VALUES FROM (2) TO (3);
+ CREATE INDEX foo1_1_a ON foo1_1 (a);
+ CREATE TABLE foo2 PARTITION OF foo FOR VALUES FROM (3) TO (MAXVALUE);
+ INSERT INTO foo SELECT generate_series(-1000, 1000);
+ CREATE VIEW foov AS SELECT * FROM foo;
+ CREATE FUNCTION one () RETURNS int AS $$ BEGIN RETURN 1; END; $$ LANGUAGE PLPGSQL STABLE;
+ CREATE FUNCTION two () RETURNS int AS $$ BEGIN RETURN 2; END; $$ LANGUAGE PLPGSQL STABLE;
+ CREATE TABLE bar (a int, b text) PARTITION BY LIST(a);
+ CREATE TABLE bar1 PARTITION OF bar FOR VALUES IN (1);
+ CREATE INDEX ON bar1(a);
+ CREATE TABLE bar2 PARTITION OF bar FOR VALUES IN (2);
+ CREATE RULE update_foo AS ON UPDATE TO foo DO ALSO UPDATE bar SET a = a WHERE a = one();
+ CREATE RULE update_bar AS ON UPDATE TO bar DO ALSO SELECT 1;
+ ANALYZE;
+}
+
+teardown
+{
+ DROP VIEW foov;
+ DROP RULE update_foo ON foo;
+ DROP TABLE foo, bar;
+ DROP FUNCTION one(), two();
+}
+
+session "s1"
+step "s1prep" { SET plan_cache_mode = force_generic_plan;
+ PREPARE q AS SELECT * FROM foov WHERE a = $1 FOR UPDATE;
+ EXPLAIN (COSTS OFF) EXECUTE q (1); }
+
+step "s1prep2" { SET plan_cache_mode = force_generic_plan;
+ PREPARE q2 AS SELECT * FROM foov WHERE a = one() or a = two();
+ EXPLAIN (COSTS OFF) EXECUTE q2; }
+
+step "s1prep3" { SET plan_cache_mode = force_generic_plan;
+ PREPARE q3 AS UPDATE foov SET a = a WHERE a = one() or a = two();
+ EXPLAIN (COSTS OFF) EXECUTE q3; }
+
+step "s1prep4" { SET plan_cache_mode = force_generic_plan;
+ PREPARE q4 AS SELECT * FROM generate_series(1, 1) WHERE EXISTS (SELECT * FROM foov WHERE a = $1 FOR UPDATE);
+ EXPLAIN (COSTS OFF) EXECUTE q4 (1); }
+
+step "s1exec" { LOAD 'delay_execution';
+ SET delay_execution.executor_start_lock_id = 12345;
+ EXPLAIN (COSTS OFF) EXECUTE q (1); }
+step "s1exec2" { LOAD 'delay_execution';
+ SET delay_execution.executor_start_lock_id = 12345;
+ EXPLAIN (COSTS OFF) EXECUTE q2; }
+step "s1exec3" { LOAD 'delay_execution';
+ SET delay_execution.executor_start_lock_id = 12345;
+ EXPLAIN (COSTS OFF) EXECUTE q3; }
+step "s1exec4" { LOAD 'delay_execution';
+ SET delay_execution.executor_start_lock_id = 12345;
+ EXPLAIN (COSTS OFF) EXECUTE q4 (1); }
+
+session "s2"
+step "s2lock" { SELECT pg_advisory_lock(12345); }
+step "s2unlock" { SELECT pg_advisory_unlock(12345); }
+step "s2dropi" { DROP INDEX foo1_1_a; }
+
+# In all permutations below, while "s1exec", "s1exec2", etc. wait to
+# acquire the advisory lock, "s2drop" drops the index being used in the
+# cached plan. When "s1exec" and others are unblocked and begin initializing
+# the plan, including acquiring necessary locks on partitions, the concurrent
+# index drop is detected. This causes plan initialization to be aborted,
+# prompting the caller to retry with a new plan.
+
+# Case with runtime pruning using EXTERN parameter
+permutation "s1prep" "s2lock" "s1exec" "s2dropi" "s2unlock"
+
+# Case with runtime pruning using stable function
+permutation "s1prep2" "s2lock" "s1exec2" "s2dropi" "s2unlock"
+
+# Case with a rule adding another query causing the CachedPlan to contain
+# multiple PlannedStmts
+permutation "s1prep3" "s2lock" "s1exec3" "s2dropi" "s2unlock"
+
+# Case with run-time pruning inside a subquery
+permutation "s1prep4" "s2lock" "s1exec4" "s2dropi" "s2unlock"