PostgreSQL Source Code git master
pathnode.h File Reference
#include "nodes/bitmapset.h"
#include "nodes/pathnodes.h"
Include dependency graph for pathnode.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Functions

int compare_path_costs (Path *path1, Path *path2, CostSelector criterion)
 
int compare_fractional_path_costs (Path *path1, Path *path2, double fraction)
 
void set_cheapest (RelOptInfo *parent_rel)
 
void add_path (RelOptInfo *parent_rel, Path *new_path)
 
bool add_path_precheck (RelOptInfo *parent_rel, int disabled_nodes, Cost startup_cost, Cost total_cost, List *pathkeys, Relids required_outer)
 
void add_partial_path (RelOptInfo *parent_rel, Path *new_path)
 
bool add_partial_path_precheck (RelOptInfo *parent_rel, int disabled_nodes, Cost total_cost, List *pathkeys)
 
Pathcreate_seqscan_path (PlannerInfo *root, RelOptInfo *rel, Relids required_outer, int parallel_workers)
 
Pathcreate_samplescan_path (PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
 
IndexPathcreate_index_path (PlannerInfo *root, IndexOptInfo *index, List *indexclauses, List *indexorderbys, List *indexorderbycols, List *pathkeys, ScanDirection indexscandir, bool indexonly, Relids required_outer, double loop_count, bool partial_path)
 
BitmapHeapPathcreate_bitmap_heap_path (PlannerInfo *root, RelOptInfo *rel, Path *bitmapqual, Relids required_outer, double loop_count, int parallel_degree)
 
BitmapAndPathcreate_bitmap_and_path (PlannerInfo *root, RelOptInfo *rel, List *bitmapquals)
 
BitmapOrPathcreate_bitmap_or_path (PlannerInfo *root, RelOptInfo *rel, List *bitmapquals)
 
TidPathcreate_tidscan_path (PlannerInfo *root, RelOptInfo *rel, List *tidquals, Relids required_outer)
 
TidRangePathcreate_tidrangescan_path (PlannerInfo *root, RelOptInfo *rel, List *tidrangequals, Relids required_outer)
 
AppendPathcreate_append_path (PlannerInfo *root, RelOptInfo *rel, List *subpaths, List *partial_subpaths, List *pathkeys, Relids required_outer, int parallel_workers, bool parallel_aware, double rows)
 
MergeAppendPathcreate_merge_append_path (PlannerInfo *root, RelOptInfo *rel, List *subpaths, List *pathkeys, Relids required_outer)
 
GroupResultPathcreate_group_result_path (PlannerInfo *root, RelOptInfo *rel, PathTarget *target, List *havingqual)
 
MaterialPathcreate_material_path (RelOptInfo *rel, Path *subpath)
 
MemoizePathcreate_memoize_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *param_exprs, List *hash_operators, bool singlerow, bool binary_mode, double calls)
 
UniquePathcreate_unique_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, SpecialJoinInfo *sjinfo)
 
GatherPathcreate_gather_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, Relids required_outer, double *rows)
 
GatherMergePathcreate_gather_merge_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, List *pathkeys, Relids required_outer, double *rows)
 
SubqueryScanPathcreate_subqueryscan_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, bool trivial_pathtarget, List *pathkeys, Relids required_outer)
 
Pathcreate_functionscan_path (PlannerInfo *root, RelOptInfo *rel, List *pathkeys, Relids required_outer)
 
Pathcreate_valuesscan_path (PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
 
Pathcreate_tablefuncscan_path (PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
 
Pathcreate_ctescan_path (PlannerInfo *root, RelOptInfo *rel, List *pathkeys, Relids required_outer)
 
Pathcreate_namedtuplestorescan_path (PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
 
Pathcreate_resultscan_path (PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
 
Pathcreate_worktablescan_path (PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
 
ForeignPathcreate_foreignscan_path (PlannerInfo *root, RelOptInfo *rel, PathTarget *target, double rows, int disabled_nodes, Cost startup_cost, Cost total_cost, List *pathkeys, Relids required_outer, Path *fdw_outerpath, List *fdw_restrictinfo, List *fdw_private)
 
ForeignPathcreate_foreign_join_path (PlannerInfo *root, RelOptInfo *rel, PathTarget *target, double rows, int disabled_nodes, Cost startup_cost, Cost total_cost, List *pathkeys, Relids required_outer, Path *fdw_outerpath, List *fdw_restrictinfo, List *fdw_private)
 
ForeignPathcreate_foreign_upper_path (PlannerInfo *root, RelOptInfo *rel, PathTarget *target, double rows, int disabled_nodes, Cost startup_cost, Cost total_cost, List *pathkeys, Path *fdw_outerpath, List *fdw_restrictinfo, List *fdw_private)
 
Relids calc_nestloop_required_outer (Relids outerrelids, Relids outer_paramrels, Relids innerrelids, Relids inner_paramrels)
 
Relids calc_non_nestloop_required_outer (Path *outer_path, Path *inner_path)
 
NestPathcreate_nestloop_path (PlannerInfo *root, RelOptInfo *joinrel, JoinType jointype, JoinCostWorkspace *workspace, JoinPathExtraData *extra, Path *outer_path, Path *inner_path, List *restrict_clauses, List *pathkeys, Relids required_outer)
 
MergePathcreate_mergejoin_path (PlannerInfo *root, RelOptInfo *joinrel, JoinType jointype, JoinCostWorkspace *workspace, JoinPathExtraData *extra, Path *outer_path, Path *inner_path, List *restrict_clauses, List *pathkeys, Relids required_outer, List *mergeclauses, List *outersortkeys, List *innersortkeys, int outer_presorted_keys)
 
HashPathcreate_hashjoin_path (PlannerInfo *root, RelOptInfo *joinrel, JoinType jointype, JoinCostWorkspace *workspace, JoinPathExtraData *extra, Path *outer_path, Path *inner_path, bool parallel_hash, List *restrict_clauses, Relids required_outer, List *hashclauses)
 
ProjectionPathcreate_projection_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target)
 
Pathapply_projection_to_path (PlannerInfo *root, RelOptInfo *rel, Path *path, PathTarget *target)
 
ProjectSetPathcreate_set_projection_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target)
 
SortPathcreate_sort_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *pathkeys, double limit_tuples)
 
IncrementalSortPathcreate_incremental_sort_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *pathkeys, int presorted_keys, double limit_tuples)
 
GroupPathcreate_group_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *groupClause, List *qual, double numGroups)
 
UpperUniquePathcreate_upper_unique_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, int numCols, double numGroups)
 
AggPathcreate_agg_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, AggStrategy aggstrategy, AggSplit aggsplit, List *groupClause, List *qual, const AggClauseCosts *aggcosts, double numGroups)
 
GroupingSetsPathcreate_groupingsets_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *having_qual, AggStrategy aggstrategy, List *rollups, const AggClauseCosts *agg_costs)
 
MinMaxAggPathcreate_minmaxagg_path (PlannerInfo *root, RelOptInfo *rel, PathTarget *target, List *mmaggregates, List *quals)
 
WindowAggPathcreate_windowagg_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, List *windowFuncs, List *runCondition, WindowClause *winclause, List *qual, bool topwindow)
 
SetOpPathcreate_setop_path (PlannerInfo *root, RelOptInfo *rel, Path *leftpath, Path *rightpath, SetOpCmd cmd, SetOpStrategy strategy, List *groupList, double numGroups, double outputRows)
 
RecursiveUnionPathcreate_recursiveunion_path (PlannerInfo *root, RelOptInfo *rel, Path *leftpath, Path *rightpath, PathTarget *target, List *distinctList, int wtParam, double numGroups)
 
LockRowsPathcreate_lockrows_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *rowMarks, int epqParam)
 
ModifyTablePathcreate_modifytable_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, CmdType operation, bool canSetTag, Index nominalRelation, Index rootRelation, bool partColsUpdated, List *resultRelations, List *updateColnosLists, List *withCheckOptionLists, List *returningLists, List *rowMarks, OnConflictExpr *onconflict, List *mergeActionLists, List *mergeJoinConditions, int epqParam)
 
LimitPathcreate_limit_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, Node *limitOffset, Node *limitCount, LimitOption limitOption, int64 offset_est, int64 count_est)
 
void adjust_limit_rows_costs (double *rows, Cost *startup_cost, Cost *total_cost, int64 offset_est, int64 count_est)
 
Pathreparameterize_path (PlannerInfo *root, Path *path, Relids required_outer, double loop_count)
 
Pathreparameterize_path_by_child (PlannerInfo *root, Path *path, RelOptInfo *child_rel)
 
bool path_is_reparameterizable_by_child (Path *path, RelOptInfo *child_rel)
 
void setup_simple_rel_arrays (PlannerInfo *root)
 
void expand_planner_arrays (PlannerInfo *root, int add_size)
 
RelOptInfobuild_simple_rel (PlannerInfo *root, int relid, RelOptInfo *parent)
 
RelOptInfofind_base_rel (PlannerInfo *root, int relid)
 
RelOptInfofind_base_rel_noerr (PlannerInfo *root, int relid)
 
RelOptInfofind_base_rel_ignore_join (PlannerInfo *root, int relid)
 
RelOptInfofind_join_rel (PlannerInfo *root, Relids relids)
 
RelOptInfobuild_join_rel (PlannerInfo *root, Relids joinrelids, RelOptInfo *outer_rel, RelOptInfo *inner_rel, SpecialJoinInfo *sjinfo, List *pushed_down_joins, List **restrictlist_ptr)
 
Relids min_join_parameterization (PlannerInfo *root, Relids joinrelids, RelOptInfo *outer_rel, RelOptInfo *inner_rel)
 
RelOptInfofetch_upper_rel (PlannerInfo *root, UpperRelationKind kind, Relids relids)
 
Relids find_childrel_parents (PlannerInfo *root, RelOptInfo *rel)
 
ParamPathInfoget_baserel_parampathinfo (PlannerInfo *root, RelOptInfo *baserel, Relids required_outer)
 
ParamPathInfoget_joinrel_parampathinfo (PlannerInfo *root, RelOptInfo *joinrel, Path *outer_path, Path *inner_path, SpecialJoinInfo *sjinfo, Relids required_outer, List **restrict_clauses)
 
ParamPathInfoget_appendrel_parampathinfo (RelOptInfo *appendrel, Relids required_outer)
 
ParamPathInfofind_param_path_info (RelOptInfo *rel, Relids required_outer)
 
Bitmapsetget_param_path_clause_serials (Path *path)
 
RelOptInfobuild_child_join_rel (PlannerInfo *root, RelOptInfo *outer_rel, RelOptInfo *inner_rel, RelOptInfo *parent_joinrel, List *restrictlist, SpecialJoinInfo *sjinfo, int nappinfos, AppendRelInfo **appinfos)
 

Function Documentation

◆ add_partial_path()

void add_partial_path ( RelOptInfo parent_rel,
Path new_path 
)

Definition at line 795 of file pathnode.c.

796{
797 bool accept_new = true; /* unless we find a superior old path */
798 int insert_at = 0; /* where to insert new item */
799 ListCell *p1;
800
801 /* Check for query cancel. */
803
804 /* Path to be added must be parallel safe. */
805 Assert(new_path->parallel_safe);
806
807 /* Relation should be OK for parallelism, too. */
808 Assert(parent_rel->consider_parallel);
809
810 /*
811 * As in add_path, throw out any paths which are dominated by the new
812 * path, but throw out the new path if some existing path dominates it.
813 */
814 foreach(p1, parent_rel->partial_pathlist)
815 {
816 Path *old_path = (Path *) lfirst(p1);
817 bool remove_old = false; /* unless new proves superior */
818 PathKeysComparison keyscmp;
819
820 /* Compare pathkeys. */
821 keyscmp = compare_pathkeys(new_path->pathkeys, old_path->pathkeys);
822
823 /* Unless pathkeys are incompatible, keep just one of the two paths. */
824 if (keyscmp != PATHKEYS_DIFFERENT)
825 {
826 if (unlikely(new_path->disabled_nodes != old_path->disabled_nodes))
827 {
828 if (new_path->disabled_nodes > old_path->disabled_nodes)
829 accept_new = false;
830 else
831 remove_old = true;
832 }
833 else if (new_path->total_cost > old_path->total_cost
835 {
836 /* New path costs more; keep it only if pathkeys are better. */
837 if (keyscmp != PATHKEYS_BETTER1)
838 accept_new = false;
839 }
840 else if (old_path->total_cost > new_path->total_cost
842 {
843 /* Old path costs more; keep it only if pathkeys are better. */
844 if (keyscmp != PATHKEYS_BETTER2)
845 remove_old = true;
846 }
847 else if (keyscmp == PATHKEYS_BETTER1)
848 {
849 /* Costs are about the same, new path has better pathkeys. */
850 remove_old = true;
851 }
852 else if (keyscmp == PATHKEYS_BETTER2)
853 {
854 /* Costs are about the same, old path has better pathkeys. */
855 accept_new = false;
856 }
857 else if (old_path->total_cost > new_path->total_cost * 1.0000000001)
858 {
859 /* Pathkeys are the same, and the old path costs more. */
860 remove_old = true;
861 }
862 else
863 {
864 /*
865 * Pathkeys are the same, and new path isn't materially
866 * cheaper.
867 */
868 accept_new = false;
869 }
870 }
871
872 /*
873 * Remove current element from partial_pathlist if dominated by new.
874 */
875 if (remove_old)
876 {
877 parent_rel->partial_pathlist =
879 pfree(old_path);
880 }
881 else
882 {
883 /* new belongs after this old path if it has cost >= old's */
884 if (new_path->total_cost >= old_path->total_cost)
885 insert_at = foreach_current_index(p1) + 1;
886 }
887
888 /*
889 * If we found an old path that dominates new_path, we can quit
890 * scanning the partial_pathlist; we will not add new_path, and we
891 * assume new_path cannot dominate any later path.
892 */
893 if (!accept_new)
894 break;
895 }
896
897 if (accept_new)
898 {
899 /* Accept the new path: insert it at proper place */
900 parent_rel->partial_pathlist =
901 list_insert_nth(parent_rel->partial_pathlist, insert_at, new_path);
902 }
903 else
904 {
905 /* Reject and recycle the new path */
906 pfree(new_path);
907 }
908}
#define unlikely(x)
Definition: c.h:347
Assert(PointerIsAligned(start, uint64))
List * list_insert_nth(List *list, int pos, void *datum)
Definition: list.c:439
void pfree(void *pointer)
Definition: mcxt.c:2152
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:123
PathKeysComparison compare_pathkeys(List *keys1, List *keys2)
Definition: pathkeys.c:304
#define STD_FUZZ_FACTOR
Definition: pathnode.c:47
PathKeysComparison
Definition: paths.h:212
@ PATHKEYS_BETTER2
Definition: paths.h:215
@ PATHKEYS_BETTER1
Definition: paths.h:214
@ PATHKEYS_DIFFERENT
Definition: paths.h:216
#define lfirst(lc)
Definition: pg_list.h:172
#define foreach_current_index(var_or_cell)
Definition: pg_list.h:403
#define foreach_delete_current(lst, var_or_cell)
Definition: pg_list.h:391
List * pathkeys
Definition: pathnodes.h:1799
int disabled_nodes
Definition: pathnodes.h:1794
Cost total_cost
Definition: pathnodes.h:1796
bool parallel_safe
Definition: pathnodes.h:1788
bool consider_parallel
Definition: pathnodes.h:911
List * partial_pathlist
Definition: pathnodes.h:924

References Assert(), CHECK_FOR_INTERRUPTS, compare_pathkeys(), RelOptInfo::consider_parallel, Path::disabled_nodes, foreach_current_index, foreach_delete_current, lfirst, list_insert_nth(), Path::parallel_safe, RelOptInfo::partial_pathlist, Path::pathkeys, PATHKEYS_BETTER1, PATHKEYS_BETTER2, PATHKEYS_DIFFERENT, pfree(), STD_FUZZ_FACTOR, Path::total_cost, and unlikely.

Referenced by add_paths_to_append_rel(), build_index_paths(), build_setop_child_paths(), create_partial_bitmap_paths(), create_partial_distinct_paths(), create_partial_grouping_paths(), create_plain_partial_paths(), grouping_planner(), set_subquery_pathlist(), try_partial_hashjoin_path(), try_partial_mergejoin_path(), and try_partial_nestloop_path().

◆ add_partial_path_precheck()

bool add_partial_path_precheck ( RelOptInfo parent_rel,
int  disabled_nodes,
Cost  total_cost,
List pathkeys 
)

Definition at line 921 of file pathnode.c.

923{
924 ListCell *p1;
925
926 /*
927 * Our goal here is twofold. First, we want to find out whether this path
928 * is clearly inferior to some existing partial path. If so, we want to
929 * reject it immediately. Second, we want to find out whether this path
930 * is clearly superior to some existing partial path -- at least, modulo
931 * final cost computations. If so, we definitely want to consider it.
932 *
933 * Unlike add_path(), we always compare pathkeys here. This is because we
934 * expect partial_pathlist to be very short, and getting a definitive
935 * answer at this stage avoids the need to call add_path_precheck.
936 */
937 foreach(p1, parent_rel->partial_pathlist)
938 {
939 Path *old_path = (Path *) lfirst(p1);
940 PathKeysComparison keyscmp;
941
942 keyscmp = compare_pathkeys(pathkeys, old_path->pathkeys);
943 if (keyscmp != PATHKEYS_DIFFERENT)
944 {
945 if (total_cost > old_path->total_cost * STD_FUZZ_FACTOR &&
946 keyscmp != PATHKEYS_BETTER1)
947 return false;
948 if (old_path->total_cost > total_cost * STD_FUZZ_FACTOR &&
949 keyscmp != PATHKEYS_BETTER2)
950 return true;
951 }
952 }
953
954 /*
955 * This path is neither clearly inferior to an existing partial path nor
956 * clearly good enough that it might replace one. Compare it to
957 * non-parallel plans. If it loses even before accounting for the cost of
958 * the Gather node, we should definitely reject it.
959 *
960 * Note that we pass the total_cost to add_path_precheck twice. This is
961 * because it's never advantageous to consider the startup cost of a
962 * partial path; the resulting plans, if run in parallel, will be run to
963 * completion.
964 */
965 if (!add_path_precheck(parent_rel, disabled_nodes, total_cost, total_cost,
966 pathkeys, NULL))
967 return false;
968
969 return true;
970}
bool add_path_precheck(RelOptInfo *parent_rel, int disabled_nodes, Cost startup_cost, Cost total_cost, List *pathkeys, Relids required_outer)
Definition: pathnode.c:688

References add_path_precheck(), compare_pathkeys(), lfirst, RelOptInfo::partial_pathlist, Path::pathkeys, PATHKEYS_BETTER1, PATHKEYS_BETTER2, PATHKEYS_DIFFERENT, STD_FUZZ_FACTOR, and Path::total_cost.

Referenced by try_partial_hashjoin_path(), try_partial_mergejoin_path(), and try_partial_nestloop_path().

◆ add_path()

void add_path ( RelOptInfo parent_rel,
Path new_path 
)

Definition at line 461 of file pathnode.c.

462{
463 bool accept_new = true; /* unless we find a superior old path */
464 int insert_at = 0; /* where to insert new item */
465 List *new_path_pathkeys;
466 ListCell *p1;
467
468 /*
469 * This is a convenient place to check for query cancel --- no part of the
470 * planner goes very long without calling add_path().
471 */
473
474 /* Pretend parameterized paths have no pathkeys, per comment above */
475 new_path_pathkeys = new_path->param_info ? NIL : new_path->pathkeys;
476
477 /*
478 * Loop to check proposed new path against old paths. Note it is possible
479 * for more than one old path to be tossed out because new_path dominates
480 * it.
481 */
482 foreach(p1, parent_rel->pathlist)
483 {
484 Path *old_path = (Path *) lfirst(p1);
485 bool remove_old = false; /* unless new proves superior */
486 PathCostComparison costcmp;
487 PathKeysComparison keyscmp;
488 BMS_Comparison outercmp;
489
490 /*
491 * Do a fuzzy cost comparison with standard fuzziness limit.
492 */
493 costcmp = compare_path_costs_fuzzily(new_path, old_path,
495
496 /*
497 * If the two paths compare differently for startup and total cost,
498 * then we want to keep both, and we can skip comparing pathkeys and
499 * required_outer rels. If they compare the same, proceed with the
500 * other comparisons. Row count is checked last. (We make the tests
501 * in this order because the cost comparison is most likely to turn
502 * out "different", and the pathkeys comparison next most likely. As
503 * explained above, row count very seldom makes a difference, so even
504 * though it's cheap to compare there's not much point in checking it
505 * earlier.)
506 */
507 if (costcmp != COSTS_DIFFERENT)
508 {
509 /* Similarly check to see if either dominates on pathkeys */
510 List *old_path_pathkeys;
511
512 old_path_pathkeys = old_path->param_info ? NIL : old_path->pathkeys;
513 keyscmp = compare_pathkeys(new_path_pathkeys,
514 old_path_pathkeys);
515 if (keyscmp != PATHKEYS_DIFFERENT)
516 {
517 switch (costcmp)
518 {
519 case COSTS_EQUAL:
520 outercmp = bms_subset_compare(PATH_REQ_OUTER(new_path),
521 PATH_REQ_OUTER(old_path));
522 if (keyscmp == PATHKEYS_BETTER1)
523 {
524 if ((outercmp == BMS_EQUAL ||
525 outercmp == BMS_SUBSET1) &&
526 new_path->rows <= old_path->rows &&
527 new_path->parallel_safe >= old_path->parallel_safe)
528 remove_old = true; /* new dominates old */
529 }
530 else if (keyscmp == PATHKEYS_BETTER2)
531 {
532 if ((outercmp == BMS_EQUAL ||
533 outercmp == BMS_SUBSET2) &&
534 new_path->rows >= old_path->rows &&
535 new_path->parallel_safe <= old_path->parallel_safe)
536 accept_new = false; /* old dominates new */
537 }
538 else /* keyscmp == PATHKEYS_EQUAL */
539 {
540 if (outercmp == BMS_EQUAL)
541 {
542 /*
543 * Same pathkeys and outer rels, and fuzzily
544 * the same cost, so keep just one; to decide
545 * which, first check parallel-safety, then
546 * rows, then do a fuzzy cost comparison with
547 * very small fuzz limit. (We used to do an
548 * exact cost comparison, but that results in
549 * annoying platform-specific plan variations
550 * due to roundoff in the cost estimates.) If
551 * things are still tied, arbitrarily keep
552 * only the old path. Notice that we will
553 * keep only the old path even if the
554 * less-fuzzy comparison decides the startup
555 * and total costs compare differently.
556 */
557 if (new_path->parallel_safe >
558 old_path->parallel_safe)
559 remove_old = true; /* new dominates old */
560 else if (new_path->parallel_safe <
561 old_path->parallel_safe)
562 accept_new = false; /* old dominates new */
563 else if (new_path->rows < old_path->rows)
564 remove_old = true; /* new dominates old */
565 else if (new_path->rows > old_path->rows)
566 accept_new = false; /* old dominates new */
567 else if (compare_path_costs_fuzzily(new_path,
568 old_path,
569 1.0000000001) == COSTS_BETTER1)
570 remove_old = true; /* new dominates old */
571 else
572 accept_new = false; /* old equals or
573 * dominates new */
574 }
575 else if (outercmp == BMS_SUBSET1 &&
576 new_path->rows <= old_path->rows &&
577 new_path->parallel_safe >= old_path->parallel_safe)
578 remove_old = true; /* new dominates old */
579 else if (outercmp == BMS_SUBSET2 &&
580 new_path->rows >= old_path->rows &&
581 new_path->parallel_safe <= old_path->parallel_safe)
582 accept_new = false; /* old dominates new */
583 /* else different parameterizations, keep both */
584 }
585 break;
586 case COSTS_BETTER1:
587 if (keyscmp != PATHKEYS_BETTER2)
588 {
589 outercmp = bms_subset_compare(PATH_REQ_OUTER(new_path),
590 PATH_REQ_OUTER(old_path));
591 if ((outercmp == BMS_EQUAL ||
592 outercmp == BMS_SUBSET1) &&
593 new_path->rows <= old_path->rows &&
594 new_path->parallel_safe >= old_path->parallel_safe)
595 remove_old = true; /* new dominates old */
596 }
597 break;
598 case COSTS_BETTER2:
599 if (keyscmp != PATHKEYS_BETTER1)
600 {
601 outercmp = bms_subset_compare(PATH_REQ_OUTER(new_path),
602 PATH_REQ_OUTER(old_path));
603 if ((outercmp == BMS_EQUAL ||
604 outercmp == BMS_SUBSET2) &&
605 new_path->rows >= old_path->rows &&
606 new_path->parallel_safe <= old_path->parallel_safe)
607 accept_new = false; /* old dominates new */
608 }
609 break;
610 case COSTS_DIFFERENT:
611
612 /*
613 * can't get here, but keep this case to keep compiler
614 * quiet
615 */
616 break;
617 }
618 }
619 }
620
621 /*
622 * Remove current element from pathlist if dominated by new.
623 */
624 if (remove_old)
625 {
626 parent_rel->pathlist = foreach_delete_current(parent_rel->pathlist,
627 p1);
628
629 /*
630 * Delete the data pointed-to by the deleted cell, if possible
631 */
632 if (!IsA(old_path, IndexPath))
633 pfree(old_path);
634 }
635 else
636 {
637 /*
638 * new belongs after this old path if it has more disabled nodes
639 * or if it has the same number of nodes but a greater total cost
640 */
641 if (new_path->disabled_nodes > old_path->disabled_nodes ||
642 (new_path->disabled_nodes == old_path->disabled_nodes &&
643 new_path->total_cost >= old_path->total_cost))
644 insert_at = foreach_current_index(p1) + 1;
645 }
646
647 /*
648 * If we found an old path that dominates new_path, we can quit
649 * scanning the pathlist; we will not add new_path, and we assume
650 * new_path cannot dominate any other elements of the pathlist.
651 */
652 if (!accept_new)
653 break;
654 }
655
656 if (accept_new)
657 {
658 /* Accept the new path: insert it at proper place in pathlist */
659 parent_rel->pathlist =
660 list_insert_nth(parent_rel->pathlist, insert_at, new_path);
661 }
662 else
663 {
664 /* Reject and recycle the new path */
665 if (!IsA(new_path, IndexPath))
666 pfree(new_path);
667 }
668}
BMS_Comparison bms_subset_compare(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:445
BMS_Comparison
Definition: bitmapset.h:61
@ BMS_SUBSET1
Definition: bitmapset.h:63
@ BMS_EQUAL
Definition: bitmapset.h:62
@ BMS_SUBSET2
Definition: bitmapset.h:64
#define IsA(nodeptr, _type_)
Definition: nodes.h:164
static PathCostComparison compare_path_costs_fuzzily(Path *path1, Path *path2, double fuzz_factor)
Definition: pathnode.c:182
PathCostComparison
Definition: pathnode.c:35
@ COSTS_EQUAL
Definition: pathnode.c:36
@ COSTS_BETTER1
Definition: pathnode.c:37
@ COSTS_BETTER2
Definition: pathnode.c:38
@ COSTS_DIFFERENT
Definition: pathnode.c:39
#define PATH_REQ_OUTER(path)
Definition: pathnodes.h:1803
#define NIL
Definition: pg_list.h:68
Definition: pg_list.h:54
Cardinality rows
Definition: pathnodes.h:1793
List * pathlist
Definition: pathnodes.h:922

References BMS_EQUAL, BMS_SUBSET1, BMS_SUBSET2, bms_subset_compare(), CHECK_FOR_INTERRUPTS, compare_path_costs_fuzzily(), compare_pathkeys(), COSTS_BETTER1, COSTS_BETTER2, COSTS_DIFFERENT, COSTS_EQUAL, Path::disabled_nodes, foreach_current_index, foreach_delete_current, IsA, lfirst, list_insert_nth(), NIL, Path::parallel_safe, PATH_REQ_OUTER, Path::pathkeys, PATHKEYS_BETTER1, PATHKEYS_BETTER2, PATHKEYS_DIFFERENT, RelOptInfo::pathlist, pfree(), Path::rows, STD_FUZZ_FACTOR, and Path::total_cost.

Referenced by add_foreign_final_paths(), add_foreign_grouping_paths(), add_foreign_ordered_paths(), add_paths_to_append_rel(), add_paths_to_grouping_rel(), add_paths_with_pathkeys_for_rel(), build_setop_child_paths(), BuildParameterizedTidPaths(), consider_groupingsets_paths(), create_degenerate_grouping_paths(), create_final_distinct_paths(), create_index_paths(), create_one_window_path(), create_ordered_paths(), create_partial_grouping_paths(), create_tidscan_paths(), fileGetForeignPaths(), gather_grouping_paths(), generate_gather_paths(), generate_nonunion_paths(), generate_orderedappend_paths(), generate_recursion_path(), generate_union_paths(), generate_useful_gather_paths(), get_index_paths(), grouping_planner(), mark_dummy_rel(), postgresGetForeignJoinPaths(), postgresGetForeignPaths(), preprocess_minmax_aggregates(), query_planner(), set_cte_pathlist(), set_dummy_rel_pathlist(), set_function_pathlist(), set_namedtuplestore_pathlist(), set_plain_rel_pathlist(), set_result_pathlist(), set_subquery_pathlist(), set_tablefunc_pathlist(), set_tablesample_rel_pathlist(), set_values_pathlist(), set_worktable_pathlist(), try_hashjoin_path(), try_mergejoin_path(), and try_nestloop_path().

◆ add_path_precheck()

bool add_path_precheck ( RelOptInfo parent_rel,
int  disabled_nodes,
Cost  startup_cost,
Cost  total_cost,
List pathkeys,
Relids  required_outer 
)

Definition at line 688 of file pathnode.c.

691{
692 List *new_path_pathkeys;
693 bool consider_startup;
694 ListCell *p1;
695
696 /* Pretend parameterized paths have no pathkeys, per add_path policy */
697 new_path_pathkeys = required_outer ? NIL : pathkeys;
698
699 /* Decide whether new path's startup cost is interesting */
700 consider_startup = required_outer ? parent_rel->consider_param_startup : parent_rel->consider_startup;
701
702 foreach(p1, parent_rel->pathlist)
703 {
704 Path *old_path = (Path *) lfirst(p1);
705 PathKeysComparison keyscmp;
706
707 /*
708 * Since the pathlist is sorted by disabled_nodes and then by
709 * total_cost, we can stop looking once we reach a path with more
710 * disabled nodes, or the same number of disabled nodes plus a
711 * total_cost larger than the new path's.
712 */
713 if (unlikely(old_path->disabled_nodes != disabled_nodes))
714 {
715 if (disabled_nodes < old_path->disabled_nodes)
716 break;
717 }
718 else if (total_cost <= old_path->total_cost * STD_FUZZ_FACTOR)
719 break;
720
721 /*
722 * We are looking for an old_path with the same parameterization (and
723 * by assumption the same rowcount) that dominates the new path on
724 * pathkeys as well as both cost metrics. If we find one, we can
725 * reject the new path.
726 *
727 * Cost comparisons here should match compare_path_costs_fuzzily.
728 */
729 /* new path can win on startup cost only if consider_startup */
730 if (startup_cost > old_path->startup_cost * STD_FUZZ_FACTOR ||
731 !consider_startup)
732 {
733 /* new path loses on cost, so check pathkeys... */
734 List *old_path_pathkeys;
735
736 old_path_pathkeys = old_path->param_info ? NIL : old_path->pathkeys;
737 keyscmp = compare_pathkeys(new_path_pathkeys,
738 old_path_pathkeys);
739 if (keyscmp == PATHKEYS_EQUAL ||
740 keyscmp == PATHKEYS_BETTER2)
741 {
742 /* new path does not win on pathkeys... */
743 if (bms_equal(required_outer, PATH_REQ_OUTER(old_path)))
744 {
745 /* Found an old path that dominates the new one */
746 return false;
747 }
748 }
749 }
750 }
751
752 return true;
753}
bool bms_equal(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:142
@ PATHKEYS_EQUAL
Definition: paths.h:213
Cost startup_cost
Definition: pathnodes.h:1795
bool consider_param_startup
Definition: pathnodes.h:909
bool consider_startup
Definition: pathnodes.h:907

References bms_equal(), compare_pathkeys(), RelOptInfo::consider_param_startup, RelOptInfo::consider_startup, Path::disabled_nodes, lfirst, NIL, PATH_REQ_OUTER, Path::pathkeys, PATHKEYS_BETTER2, PATHKEYS_EQUAL, RelOptInfo::pathlist, Path::startup_cost, STD_FUZZ_FACTOR, and unlikely.

Referenced by add_partial_path_precheck(), try_hashjoin_path(), try_mergejoin_path(), and try_nestloop_path().

◆ adjust_limit_rows_costs()

void adjust_limit_rows_costs ( double *  rows,
Cost startup_cost,
Cost total_cost,
int64  offset_est,
int64  count_est 
)

Definition at line 4038 of file pathnode.c.

4043{
4044 double input_rows = *rows;
4045 Cost input_startup_cost = *startup_cost;
4046 Cost input_total_cost = *total_cost;
4047
4048 if (offset_est != 0)
4049 {
4050 double offset_rows;
4051
4052 if (offset_est > 0)
4053 offset_rows = (double) offset_est;
4054 else
4055 offset_rows = clamp_row_est(input_rows * 0.10);
4056 if (offset_rows > *rows)
4057 offset_rows = *rows;
4058 if (input_rows > 0)
4059 *startup_cost +=
4060 (input_total_cost - input_startup_cost)
4061 * offset_rows / input_rows;
4062 *rows -= offset_rows;
4063 if (*rows < 1)
4064 *rows = 1;
4065 }
4066
4067 if (count_est != 0)
4068 {
4069 double count_rows;
4070
4071 if (count_est > 0)
4072 count_rows = (double) count_est;
4073 else
4074 count_rows = clamp_row_est(input_rows * 0.10);
4075 if (count_rows > *rows)
4076 count_rows = *rows;
4077 if (input_rows > 0)
4078 *total_cost = *startup_cost +
4079 (input_total_cost - input_startup_cost)
4080 * count_rows / input_rows;
4081 *rows = count_rows;
4082 if (*rows < 1)
4083 *rows = 1;
4084 }
4085}
double clamp_row_est(double nrows)
Definition: costsize.c:213
double Cost
Definition: nodes.h:257

References clamp_row_est().

Referenced by create_limit_path(), and estimate_path_cost_size().

◆ apply_projection_to_path()

Path * apply_projection_to_path ( PlannerInfo root,
RelOptInfo rel,
Path path,
PathTarget target 
)

Definition at line 2876 of file pathnode.c.

2880{
2881 QualCost oldcost;
2882
2883 /*
2884 * If given path can't project, we might need a Result node, so make a
2885 * separate ProjectionPath.
2886 */
2887 if (!is_projection_capable_path(path))
2888 return (Path *) create_projection_path(root, rel, path, target);
2889
2890 /*
2891 * We can just jam the desired tlist into the existing path, being sure to
2892 * update its cost estimates appropriately.
2893 */
2894 oldcost = path->pathtarget->cost;
2895 path->pathtarget = target;
2896
2897 path->startup_cost += target->cost.startup - oldcost.startup;
2898 path->total_cost += target->cost.startup - oldcost.startup +
2899 (target->cost.per_tuple - oldcost.per_tuple) * path->rows;
2900
2901 /*
2902 * If the path happens to be a Gather or GatherMerge path, we'd like to
2903 * arrange for the subpath to return the required target list so that
2904 * workers can help project. But if there is something that is not
2905 * parallel-safe in the target expressions, then we can't.
2906 */
2907 if ((IsA(path, GatherPath) || IsA(path, GatherMergePath)) &&
2908 is_parallel_safe(root, (Node *) target->exprs))
2909 {
2910 /*
2911 * We always use create_projection_path here, even if the subpath is
2912 * projection-capable, so as to avoid modifying the subpath in place.
2913 * It seems unlikely at present that there could be any other
2914 * references to the subpath, but better safe than sorry.
2915 *
2916 * Note that we don't change the parallel path's cost estimates; it
2917 * might be appropriate to do so, to reflect the fact that the bulk of
2918 * the target evaluation will happen in workers.
2919 */
2920 if (IsA(path, GatherPath))
2921 {
2922 GatherPath *gpath = (GatherPath *) path;
2923
2924 gpath->subpath = (Path *)
2926 gpath->subpath->parent,
2927 gpath->subpath,
2928 target);
2929 }
2930 else
2931 {
2932 GatherMergePath *gmpath = (GatherMergePath *) path;
2933
2934 gmpath->subpath = (Path *)
2936 gmpath->subpath->parent,
2937 gmpath->subpath,
2938 target);
2939 }
2940 }
2941 else if (path->parallel_safe &&
2942 !is_parallel_safe(root, (Node *) target->exprs))
2943 {
2944 /*
2945 * We're inserting a parallel-restricted target list into a path
2946 * currently marked parallel-safe, so we have to mark it as no longer
2947 * safe.
2948 */
2949 path->parallel_safe = false;
2950 }
2951
2952 return path;
2953}
bool is_parallel_safe(PlannerInfo *root, Node *node)
Definition: clauses.c:754
bool is_projection_capable_path(Path *path)
Definition: createplan.c:7310
if(TABLE==NULL||TABLE_index==NULL)
Definition: isn.c:81
ProjectionPath * create_projection_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target)
Definition: pathnode.c:2766
tree ctl root
Definition: radixtree.h:1857
Path * subpath
Definition: pathnodes.h:2176
Definition: nodes.h:135
List * exprs
Definition: pathnodes.h:1666
QualCost cost
Definition: pathnodes.h:1672
Cost per_tuple
Definition: pathnodes.h:48
Cost startup
Definition: pathnodes.h:47

References PathTarget::cost, create_projection_path(), PathTarget::exprs, if(), is_parallel_safe(), is_projection_capable_path(), IsA, Path::parallel_safe, QualCost::per_tuple, root, Path::rows, QualCost::startup, Path::startup_cost, GatherPath::subpath, GatherMergePath::subpath, and Path::total_cost.

Referenced by adjust_paths_for_srfs(), build_minmax_path(), create_ordered_paths(), and recurse_set_operations().

◆ build_child_join_rel()

RelOptInfo * build_child_join_rel ( PlannerInfo root,
RelOptInfo outer_rel,
RelOptInfo inner_rel,
RelOptInfo parent_joinrel,
List restrictlist,
SpecialJoinInfo sjinfo,
int  nappinfos,
AppendRelInfo **  appinfos 
)

Definition at line 882 of file relnode.c.

886{
887 RelOptInfo *joinrel = makeNode(RelOptInfo);
888
889 /* Only joins between "other" relations land here. */
890 Assert(IS_OTHER_REL(outer_rel) && IS_OTHER_REL(inner_rel));
891
892 /* The parent joinrel should have consider_partitionwise_join set. */
893 Assert(parent_joinrel->consider_partitionwise_join);
894
896 joinrel->relids = adjust_child_relids(parent_joinrel->relids,
897 nappinfos, appinfos);
898 joinrel->rows = 0;
899 /* cheap startup cost is interesting iff not all tuples to be retrieved */
900 joinrel->consider_startup = (root->tuple_fraction > 0);
901 joinrel->consider_param_startup = false;
902 joinrel->consider_parallel = false;
904 joinrel->pathlist = NIL;
905 joinrel->ppilist = NIL;
906 joinrel->partial_pathlist = NIL;
907 joinrel->cheapest_startup_path = NULL;
908 joinrel->cheapest_total_path = NULL;
909 joinrel->cheapest_unique_path = NULL;
911 joinrel->direct_lateral_relids = NULL;
912 joinrel->lateral_relids = NULL;
913 joinrel->relid = 0; /* indicates not a baserel */
914 joinrel->rtekind = RTE_JOIN;
915 joinrel->min_attr = 0;
916 joinrel->max_attr = 0;
917 joinrel->attr_needed = NULL;
918 joinrel->attr_widths = NULL;
919 joinrel->notnullattnums = NULL;
920 joinrel->nulling_relids = NULL;
921 joinrel->lateral_vars = NIL;
922 joinrel->lateral_referencers = NULL;
923 joinrel->indexlist = NIL;
924 joinrel->pages = 0;
925 joinrel->tuples = 0;
926 joinrel->allvisfrac = 0;
927 joinrel->eclass_indexes = NULL;
928 joinrel->subroot = NULL;
929 joinrel->subplan_params = NIL;
930 joinrel->amflags = 0;
931 joinrel->serverid = InvalidOid;
932 joinrel->userid = InvalidOid;
933 joinrel->useridiscurrent = false;
934 joinrel->fdwroutine = NULL;
935 joinrel->fdw_private = NULL;
936 joinrel->baserestrictinfo = NIL;
937 joinrel->baserestrictcost.startup = 0;
938 joinrel->baserestrictcost.per_tuple = 0;
939 joinrel->joininfo = NIL;
940 joinrel->has_eclass_joins = false;
941 joinrel->consider_partitionwise_join = false; /* might get changed later */
942 joinrel->parent = parent_joinrel;
943 joinrel->top_parent = parent_joinrel->top_parent ? parent_joinrel->top_parent : parent_joinrel;
944 joinrel->top_parent_relids = joinrel->top_parent->relids;
945 joinrel->part_scheme = NULL;
946 joinrel->nparts = -1;
947 joinrel->boundinfo = NULL;
948 joinrel->partbounds_merged = false;
949 joinrel->partition_qual = NIL;
950 joinrel->part_rels = NULL;
951 joinrel->live_parts = NULL;
952 joinrel->all_partrels = NULL;
953 joinrel->partexprs = NULL;
954 joinrel->nullable_partexprs = NULL;
955
956 /* Compute information relevant to foreign relations. */
957 set_foreign_rel_properties(joinrel, outer_rel, inner_rel);
958
959 /* Set up reltarget struct */
960 build_child_join_reltarget(root, parent_joinrel, joinrel,
961 nappinfos, appinfos);
962
963 /* Construct joininfo list. */
965 (Node *) parent_joinrel->joininfo,
966 nappinfos,
967 appinfos);
968
969 /*
970 * Lateral relids referred in child join will be same as that referred in
971 * the parent relation.
972 */
973 joinrel->direct_lateral_relids = (Relids) bms_copy(parent_joinrel->direct_lateral_relids);
974 joinrel->lateral_relids = (Relids) bms_copy(parent_joinrel->lateral_relids);
975
976 /*
977 * If the parent joinrel has pending equivalence classes, so does the
978 * child.
979 */
980 joinrel->has_eclass_joins = parent_joinrel->has_eclass_joins;
981
982 /* Is the join between partitions itself partitioned? */
983 build_joinrel_partition_info(root, joinrel, outer_rel, inner_rel, sjinfo,
984 restrictlist);
985
986 /* Child joinrel is parallel safe if parent is parallel safe. */
987 joinrel->consider_parallel = parent_joinrel->consider_parallel;
988
989 /* Set estimates of the child-joinrel's size. */
990 set_joinrel_size_estimates(root, joinrel, outer_rel, inner_rel,
991 sjinfo, restrictlist);
992
993 /* We build the join only once. */
994 Assert(!find_join_rel(root, joinrel->relids));
995
996 /* Add the relation to the PlannerInfo. */
997 add_join_rel(root, joinrel);
998
999 /*
1000 * We might need EquivalenceClass members corresponding to the child join,
1001 * so that we can represent sort pathkeys for it. As with children of
1002 * baserels, we shouldn't need this unless there are relevant eclass joins
1003 * (implying that a merge join might be possible) or pathkeys to sort by.
1004 */
1005 if (joinrel->has_eclass_joins || has_useful_pathkeys(root, parent_joinrel))
1007 nappinfos, appinfos,
1008 parent_joinrel, joinrel);
1009
1010 return joinrel;
1011}
Node * adjust_appendrel_attrs(PlannerInfo *root, Node *node, int nappinfos, AppendRelInfo **appinfos)
Definition: appendinfo.c:200
Relids adjust_child_relids(Relids relids, int nappinfos, AppendRelInfo **appinfos)
Definition: appendinfo.c:574
Bitmapset * bms_copy(const Bitmapset *a)
Definition: bitmapset.c:122
void set_joinrel_size_estimates(PlannerInfo *root, RelOptInfo *rel, RelOptInfo *outer_rel, RelOptInfo *inner_rel, SpecialJoinInfo *sjinfo, List *restrictlist)
Definition: costsize.c:5414
void add_child_join_rel_equivalences(PlannerInfo *root, int nappinfos, AppendRelInfo **appinfos, RelOptInfo *parent_joinrel, RelOptInfo *child_joinrel)
Definition: equivclass.c:2941
#define makeNode(_type_)
Definition: nodes.h:161
@ RTE_JOIN
Definition: parsenodes.h:1028
bool has_useful_pathkeys(PlannerInfo *root, RelOptInfo *rel)
Definition: pathkeys.c:2319
Bitmapset * Relids
Definition: pathnodes.h:30
@ RELOPT_OTHER_JOINREL
Definition: pathnodes.h:854
#define IS_OTHER_REL(rel)
Definition: pathnodes.h:878
#define InvalidOid
Definition: postgres_ext.h:35
static void build_joinrel_partition_info(PlannerInfo *root, RelOptInfo *joinrel, RelOptInfo *outer_rel, RelOptInfo *inner_rel, SpecialJoinInfo *sjinfo, List *restrictlist)
Definition: relnode.c:1991
RelOptInfo * find_join_rel(PlannerInfo *root, Relids relids)
Definition: relnode.c:527
static void build_child_join_reltarget(PlannerInfo *root, RelOptInfo *parentrel, RelOptInfo *childrel, int nappinfos, AppendRelInfo **appinfos)
Definition: relnode.c:2504
static void set_foreign_rel_properties(RelOptInfo *joinrel, RelOptInfo *outer_rel, RelOptInfo *inner_rel)
Definition: relnode.c:589
static void add_join_rel(PlannerInfo *root, RelOptInfo *joinrel)
Definition: relnode.c:627
List * baserestrictinfo
Definition: pathnodes.h:1009
List * subplan_params
Definition: pathnodes.h:978
List * ppilist
Definition: pathnodes.h:923
bool useridiscurrent
Definition: pathnodes.h:992
uint32 amflags
Definition: pathnodes.h:982
List * joininfo
Definition: pathnodes.h:1015
Bitmapset * notnullattnums
Definition: pathnodes.h:960
List * partition_qual
Definition: pathnodes.h:1051
Relids relids
Definition: pathnodes.h:895
struct PathTarget * reltarget
Definition: pathnodes.h:917
Index relid
Definition: pathnodes.h:942
List * lateral_vars
Definition: pathnodes.h:964
Cardinality tuples
Definition: pathnodes.h:973
Relids top_parent_relids
Definition: pathnodes.h:1033
bool partbounds_merged
Definition: pathnodes.h:1049
BlockNumber pages
Definition: pathnodes.h:972
Relids lateral_relids
Definition: pathnodes.h:937
List * cheapest_parameterized_paths
Definition: pathnodes.h:928
RelOptKind reloptkind
Definition: pathnodes.h:889
List * indexlist
Definition: pathnodes.h:968
struct Path * cheapest_unique_path
Definition: pathnodes.h:927
Relids lateral_referencers
Definition: pathnodes.h:966
struct Path * cheapest_startup_path
Definition: pathnodes.h:925
QualCost baserestrictcost
Definition: pathnodes.h:1011
struct Path * cheapest_total_path
Definition: pathnodes.h:926
Oid userid
Definition: pathnodes.h:990
Bitmapset * eclass_indexes
Definition: pathnodes.h:976
Relids all_partrels
Definition: pathnodes.h:1065
Relids direct_lateral_relids
Definition: pathnodes.h:935
bool has_eclass_joins
Definition: pathnodes.h:1017
Oid serverid
Definition: pathnodes.h:988
Bitmapset * live_parts
Definition: pathnodes.h:1063
bool consider_partitionwise_join
Definition: pathnodes.h:1023
PlannerInfo * subroot
Definition: pathnodes.h:977
AttrNumber max_attr
Definition: pathnodes.h:950
Relids nulling_relids
Definition: pathnodes.h:962
double allvisfrac
Definition: pathnodes.h:974
Cardinality rows
Definition: pathnodes.h:901
AttrNumber min_attr
Definition: pathnodes.h:948
RTEKind rtekind
Definition: pathnodes.h:946
PathTarget * create_empty_pathtarget(void)
Definition: tlist.c:681

References add_child_join_rel_equivalences(), add_join_rel(), adjust_appendrel_attrs(), adjust_child_relids(), RelOptInfo::all_partrels, RelOptInfo::allvisfrac, RelOptInfo::amflags, Assert(), RelOptInfo::baserestrictcost, RelOptInfo::baserestrictinfo, bms_copy(), build_child_join_reltarget(), build_joinrel_partition_info(), RelOptInfo::cheapest_parameterized_paths, RelOptInfo::cheapest_startup_path, RelOptInfo::cheapest_total_path, RelOptInfo::cheapest_unique_path, RelOptInfo::consider_parallel, RelOptInfo::consider_param_startup, RelOptInfo::consider_partitionwise_join, RelOptInfo::consider_startup, create_empty_pathtarget(), RelOptInfo::direct_lateral_relids, RelOptInfo::eclass_indexes, find_join_rel(), RelOptInfo::has_eclass_joins, has_useful_pathkeys(), RelOptInfo::indexlist, InvalidOid, IS_OTHER_REL, RelOptInfo::joininfo, RelOptInfo::lateral_referencers, RelOptInfo::lateral_relids, RelOptInfo::lateral_vars, RelOptInfo::live_parts, makeNode, RelOptInfo::max_attr, RelOptInfo::min_attr, NIL, RelOptInfo::notnullattnums, RelOptInfo::nparts, RelOptInfo::nulling_relids, RelOptInfo::pages, RelOptInfo::partbounds_merged, RelOptInfo::partial_pathlist, RelOptInfo::partition_qual, RelOptInfo::pathlist, QualCost::per_tuple, RelOptInfo::ppilist, RelOptInfo::relid, RelOptInfo::relids, RELOPT_OTHER_JOINREL, RelOptInfo::reloptkind, RelOptInfo::reltarget, root, RelOptInfo::rows, RTE_JOIN, RelOptInfo::rtekind, RelOptInfo::serverid, set_foreign_rel_properties(), set_joinrel_size_estimates(), QualCost::startup, RelOptInfo::subplan_params, RelOptInfo::subroot, RelOptInfo::top_parent_relids, RelOptInfo::tuples, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by try_partitionwise_join().

◆ build_join_rel()

RelOptInfo * build_join_rel ( PlannerInfo root,
Relids  joinrelids,
RelOptInfo outer_rel,
RelOptInfo inner_rel,
SpecialJoinInfo sjinfo,
List pushed_down_joins,
List **  restrictlist_ptr 
)

Definition at line 665 of file relnode.c.

672{
673 RelOptInfo *joinrel;
674 List *restrictlist;
675
676 /* This function should be used only for join between parents. */
677 Assert(!IS_OTHER_REL(outer_rel) && !IS_OTHER_REL(inner_rel));
678
679 /*
680 * See if we already have a joinrel for this set of base rels.
681 */
682 joinrel = find_join_rel(root, joinrelids);
683
684 if (joinrel)
685 {
686 /*
687 * Yes, so we only need to figure the restrictlist for this particular
688 * pair of component relations.
689 */
690 if (restrictlist_ptr)
691 *restrictlist_ptr = build_joinrel_restrictlist(root,
692 joinrel,
693 outer_rel,
694 inner_rel,
695 sjinfo);
696 return joinrel;
697 }
698
699 /*
700 * Nope, so make one.
701 */
702 joinrel = makeNode(RelOptInfo);
703 joinrel->reloptkind = RELOPT_JOINREL;
704 joinrel->relids = bms_copy(joinrelids);
705 joinrel->rows = 0;
706 /* cheap startup cost is interesting iff not all tuples to be retrieved */
707 joinrel->consider_startup = (root->tuple_fraction > 0);
708 joinrel->consider_param_startup = false;
709 joinrel->consider_parallel = false;
711 joinrel->pathlist = NIL;
712 joinrel->ppilist = NIL;
713 joinrel->partial_pathlist = NIL;
714 joinrel->cheapest_startup_path = NULL;
715 joinrel->cheapest_total_path = NULL;
716 joinrel->cheapest_unique_path = NULL;
718 /* init direct_lateral_relids from children; we'll finish it up below */
719 joinrel->direct_lateral_relids =
721 inner_rel->direct_lateral_relids);
723 outer_rel, inner_rel);
724 joinrel->relid = 0; /* indicates not a baserel */
725 joinrel->rtekind = RTE_JOIN;
726 joinrel->min_attr = 0;
727 joinrel->max_attr = 0;
728 joinrel->attr_needed = NULL;
729 joinrel->attr_widths = NULL;
730 joinrel->notnullattnums = NULL;
731 joinrel->nulling_relids = NULL;
732 joinrel->lateral_vars = NIL;
733 joinrel->lateral_referencers = NULL;
734 joinrel->indexlist = NIL;
735 joinrel->statlist = NIL;
736 joinrel->pages = 0;
737 joinrel->tuples = 0;
738 joinrel->allvisfrac = 0;
739 joinrel->eclass_indexes = NULL;
740 joinrel->subroot = NULL;
741 joinrel->subplan_params = NIL;
742 joinrel->rel_parallel_workers = -1;
743 joinrel->amflags = 0;
744 joinrel->serverid = InvalidOid;
745 joinrel->userid = InvalidOid;
746 joinrel->useridiscurrent = false;
747 joinrel->fdwroutine = NULL;
748 joinrel->fdw_private = NULL;
749 joinrel->unique_for_rels = NIL;
750 joinrel->non_unique_for_rels = NIL;
751 joinrel->baserestrictinfo = NIL;
752 joinrel->baserestrictcost.startup = 0;
753 joinrel->baserestrictcost.per_tuple = 0;
754 joinrel->baserestrict_min_security = UINT_MAX;
755 joinrel->joininfo = NIL;
756 joinrel->has_eclass_joins = false;
757 joinrel->consider_partitionwise_join = false; /* might get changed later */
758 joinrel->parent = NULL;
759 joinrel->top_parent = NULL;
760 joinrel->top_parent_relids = NULL;
761 joinrel->part_scheme = NULL;
762 joinrel->nparts = -1;
763 joinrel->boundinfo = NULL;
764 joinrel->partbounds_merged = false;
765 joinrel->partition_qual = NIL;
766 joinrel->part_rels = NULL;
767 joinrel->live_parts = NULL;
768 joinrel->all_partrels = NULL;
769 joinrel->partexprs = NULL;
770 joinrel->nullable_partexprs = NULL;
771
772 /* Compute information relevant to the foreign relations. */
773 set_foreign_rel_properties(joinrel, outer_rel, inner_rel);
774
775 /*
776 * Fill the joinrel's tlist with just the Vars and PHVs that need to be
777 * output from this join (ie, are needed for higher joinclauses or final
778 * output).
779 *
780 * NOTE: the tlist order for a join rel will depend on which pair of outer
781 * and inner rels we first try to build it from. But the contents should
782 * be the same regardless.
783 */
784 build_joinrel_tlist(root, joinrel, outer_rel, sjinfo, pushed_down_joins,
785 (sjinfo->jointype == JOIN_FULL));
786 build_joinrel_tlist(root, joinrel, inner_rel, sjinfo, pushed_down_joins,
787 (sjinfo->jointype != JOIN_INNER));
788 add_placeholders_to_joinrel(root, joinrel, outer_rel, inner_rel, sjinfo);
789
790 /*
791 * add_placeholders_to_joinrel also took care of adding the ph_lateral
792 * sets of any PlaceHolderVars computed here to direct_lateral_relids, so
793 * now we can finish computing that. This is much like the computation of
794 * the transitively-closed lateral_relids in min_join_parameterization,
795 * except that here we *do* have to consider the added PHVs.
796 */
797 joinrel->direct_lateral_relids =
798 bms_del_members(joinrel->direct_lateral_relids, joinrel->relids);
799
800 /*
801 * Construct restrict and join clause lists for the new joinrel. (The
802 * caller might or might not need the restrictlist, but I need it anyway
803 * for set_joinrel_size_estimates().)
804 */
805 restrictlist = build_joinrel_restrictlist(root, joinrel,
806 outer_rel, inner_rel,
807 sjinfo);
808 if (restrictlist_ptr)
809 *restrictlist_ptr = restrictlist;
810 build_joinrel_joinlist(joinrel, outer_rel, inner_rel);
811
812 /*
813 * This is also the right place to check whether the joinrel has any
814 * pending EquivalenceClass joins.
815 */
817
818 /* Store the partition information. */
819 build_joinrel_partition_info(root, joinrel, outer_rel, inner_rel, sjinfo,
820 restrictlist);
821
822 /*
823 * Set estimates of the joinrel's size.
824 */
825 set_joinrel_size_estimates(root, joinrel, outer_rel, inner_rel,
826 sjinfo, restrictlist);
827
828 /*
829 * Set the consider_parallel flag if this joinrel could potentially be
830 * scanned within a parallel worker. If this flag is false for either
831 * inner_rel or outer_rel, then it must be false for the joinrel also.
832 * Even if both are true, there might be parallel-restricted expressions
833 * in the targetlist or quals.
834 *
835 * Note that if there are more than two rels in this relation, they could
836 * be divided between inner_rel and outer_rel in any arbitrary way. We
837 * assume this doesn't matter, because we should hit all the same baserels
838 * and joinclauses while building up to this joinrel no matter which we
839 * take; therefore, we should make the same decision here however we get
840 * here.
841 */
842 if (inner_rel->consider_parallel && outer_rel->consider_parallel &&
843 is_parallel_safe(root, (Node *) restrictlist) &&
844 is_parallel_safe(root, (Node *) joinrel->reltarget->exprs))
845 joinrel->consider_parallel = true;
846
847 /* Add the joinrel to the PlannerInfo. */
848 add_join_rel(root, joinrel);
849
850 /*
851 * Also, if dynamic-programming join search is active, add the new joinrel
852 * to the appropriate sublist. Note: you might think the Assert on number
853 * of members should be for equality, but some of the level 1 rels might
854 * have been joinrels already, so we can only assert <=.
855 */
856 if (root->join_rel_level)
857 {
858 Assert(root->join_cur_level > 0);
859 Assert(root->join_cur_level <= bms_num_members(joinrel->relids));
860 root->join_rel_level[root->join_cur_level] =
861 lappend(root->join_rel_level[root->join_cur_level], joinrel);
862 }
863
864 return joinrel;
865}
Bitmapset * bms_del_members(Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:1161
int bms_num_members(const Bitmapset *a)
Definition: bitmapset.c:751
Bitmapset * bms_union(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:251
bool has_relevant_eclass_joinclause(PlannerInfo *root, RelOptInfo *rel1)
Definition: equivclass.c:3446
List * lappend(List *list, void *datum)
Definition: list.c:339
@ JOIN_FULL
Definition: nodes.h:301
@ JOIN_INNER
Definition: nodes.h:299
@ RELOPT_JOINREL
Definition: pathnodes.h:852
void add_placeholders_to_joinrel(PlannerInfo *root, RelOptInfo *joinrel, RelOptInfo *outer_rel, RelOptInfo *inner_rel, SpecialJoinInfo *sjinfo)
Definition: placeholder.c:400
static void build_joinrel_tlist(PlannerInfo *root, RelOptInfo *joinrel, RelOptInfo *input_rel, SpecialJoinInfo *sjinfo, List *pushed_down_joins, bool can_null)
Definition: relnode.c:1100
Relids min_join_parameterization(PlannerInfo *root, Relids joinrelids, RelOptInfo *outer_rel, RelOptInfo *inner_rel)
Definition: relnode.c:1022
static List * build_joinrel_restrictlist(PlannerInfo *root, RelOptInfo *joinrel, RelOptInfo *outer_rel, RelOptInfo *inner_rel, SpecialJoinInfo *sjinfo)
Definition: relnode.c:1285
static void build_joinrel_joinlist(RelOptInfo *joinrel, RelOptInfo *outer_rel, RelOptInfo *inner_rel)
Definition: relnode.c:1322
List * statlist
Definition: pathnodes.h:970
List * unique_for_rels
Definition: pathnodes.h:1001
List * non_unique_for_rels
Definition: pathnodes.h:1003
int rel_parallel_workers
Definition: pathnodes.h:980
Index baserestrict_min_security
Definition: pathnodes.h:1013
JoinType jointype
Definition: pathnodes.h:3039

References add_join_rel(), add_placeholders_to_joinrel(), RelOptInfo::all_partrels, RelOptInfo::allvisfrac, RelOptInfo::amflags, Assert(), RelOptInfo::baserestrict_min_security, RelOptInfo::baserestrictcost, RelOptInfo::baserestrictinfo, bms_copy(), bms_del_members(), bms_num_members(), bms_union(), build_joinrel_joinlist(), build_joinrel_partition_info(), build_joinrel_restrictlist(), build_joinrel_tlist(), RelOptInfo::cheapest_parameterized_paths, RelOptInfo::cheapest_startup_path, RelOptInfo::cheapest_total_path, RelOptInfo::cheapest_unique_path, RelOptInfo::consider_parallel, RelOptInfo::consider_param_startup, RelOptInfo::consider_partitionwise_join, RelOptInfo::consider_startup, create_empty_pathtarget(), RelOptInfo::direct_lateral_relids, RelOptInfo::eclass_indexes, PathTarget::exprs, find_join_rel(), RelOptInfo::has_eclass_joins, has_relevant_eclass_joinclause(), RelOptInfo::indexlist, InvalidOid, IS_OTHER_REL, is_parallel_safe(), JOIN_FULL, JOIN_INNER, RelOptInfo::joininfo, SpecialJoinInfo::jointype, lappend(), RelOptInfo::lateral_referencers, RelOptInfo::lateral_relids, RelOptInfo::lateral_vars, RelOptInfo::live_parts, makeNode, RelOptInfo::max_attr, RelOptInfo::min_attr, min_join_parameterization(), NIL, RelOptInfo::non_unique_for_rels, RelOptInfo::notnullattnums, RelOptInfo::nparts, RelOptInfo::nulling_relids, RelOptInfo::pages, RelOptInfo::partbounds_merged, RelOptInfo::partial_pathlist, RelOptInfo::partition_qual, RelOptInfo::pathlist, QualCost::per_tuple, RelOptInfo::ppilist, RelOptInfo::rel_parallel_workers, RelOptInfo::relid, RelOptInfo::relids, RELOPT_JOINREL, RelOptInfo::reloptkind, RelOptInfo::reltarget, root, RelOptInfo::rows, RTE_JOIN, RelOptInfo::rtekind, RelOptInfo::serverid, set_foreign_rel_properties(), set_joinrel_size_estimates(), QualCost::startup, RelOptInfo::statlist, RelOptInfo::subplan_params, RelOptInfo::subroot, RelOptInfo::top_parent_relids, RelOptInfo::tuples, RelOptInfo::unique_for_rels, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by make_join_rel().

◆ build_simple_rel()

RelOptInfo * build_simple_rel ( PlannerInfo root,
int  relid,
RelOptInfo parent 
)

Definition at line 192 of file relnode.c.

193{
194 RelOptInfo *rel;
195 RangeTblEntry *rte;
196
197 /* Rel should not exist already */
198 Assert(relid > 0 && relid < root->simple_rel_array_size);
199 if (root->simple_rel_array[relid] != NULL)
200 elog(ERROR, "rel %d already exists", relid);
201
202 /* Fetch RTE for relation */
203 rte = root->simple_rte_array[relid];
204 Assert(rte != NULL);
205
206 rel = makeNode(RelOptInfo);
208 rel->relids = bms_make_singleton(relid);
209 rel->rows = 0;
210 /* cheap startup cost is interesting iff not all tuples to be retrieved */
211 rel->consider_startup = (root->tuple_fraction > 0);
212 rel->consider_param_startup = false; /* might get changed later */
213 rel->consider_parallel = false; /* might get changed later */
215 rel->pathlist = NIL;
216 rel->ppilist = NIL;
217 rel->partial_pathlist = NIL;
218 rel->cheapest_startup_path = NULL;
219 rel->cheapest_total_path = NULL;
220 rel->cheapest_unique_path = NULL;
222 rel->relid = relid;
223 rel->rtekind = rte->rtekind;
224 /* min_attr, max_attr, attr_needed, attr_widths are set below */
225 rel->notnullattnums = NULL;
226 rel->lateral_vars = NIL;
227 rel->indexlist = NIL;
228 rel->statlist = NIL;
229 rel->pages = 0;
230 rel->tuples = 0;
231 rel->allvisfrac = 0;
232 rel->eclass_indexes = NULL;
233 rel->subroot = NULL;
234 rel->subplan_params = NIL;
235 rel->rel_parallel_workers = -1; /* set up in get_relation_info */
236 rel->amflags = 0;
237 rel->serverid = InvalidOid;
238 if (rte->rtekind == RTE_RELATION)
239 {
240 Assert(parent == NULL ||
241 parent->rtekind == RTE_RELATION ||
242 parent->rtekind == RTE_SUBQUERY);
243
244 /*
245 * For any RELATION rte, we need a userid with which to check
246 * permission access. Baserels simply use their own
247 * RTEPermissionInfo's checkAsUser.
248 *
249 * For otherrels normally there's no RTEPermissionInfo, so we use the
250 * parent's, which normally has one. The exceptional case is that the
251 * parent is a subquery, in which case the otherrel will have its own.
252 */
253 if (rel->reloptkind == RELOPT_BASEREL ||
255 parent->rtekind == RTE_SUBQUERY))
256 {
257 RTEPermissionInfo *perminfo;
258
259 perminfo = getRTEPermissionInfo(root->parse->rteperminfos, rte);
260 rel->userid = perminfo->checkAsUser;
261 }
262 else
263 rel->userid = parent->userid;
264 }
265 else
266 rel->userid = InvalidOid;
267 rel->useridiscurrent = false;
268 rel->fdwroutine = NULL;
269 rel->fdw_private = NULL;
270 rel->unique_for_rels = NIL;
272 rel->baserestrictinfo = NIL;
273 rel->baserestrictcost.startup = 0;
275 rel->baserestrict_min_security = UINT_MAX;
276 rel->joininfo = NIL;
277 rel->has_eclass_joins = false;
278 rel->consider_partitionwise_join = false; /* might get changed later */
279 rel->part_scheme = NULL;
280 rel->nparts = -1;
281 rel->boundinfo = NULL;
282 rel->partbounds_merged = false;
283 rel->partition_qual = NIL;
284 rel->part_rels = NULL;
285 rel->live_parts = NULL;
286 rel->all_partrels = NULL;
287 rel->partexprs = NULL;
288 rel->nullable_partexprs = NULL;
289
290 /*
291 * Pass assorted information down the inheritance hierarchy.
292 */
293 if (parent)
294 {
295 /* We keep back-links to immediate parent and topmost parent. */
296 rel->parent = parent;
297 rel->top_parent = parent->top_parent ? parent->top_parent : parent;
298 rel->top_parent_relids = rel->top_parent->relids;
299
300 /*
301 * A child rel is below the same outer joins as its parent. (We
302 * presume this info was already calculated for the parent.)
303 */
304 rel->nulling_relids = parent->nulling_relids;
305
306 /*
307 * Also propagate lateral-reference information from appendrel parent
308 * rels to their child rels. We intentionally give each child rel the
309 * same minimum parameterization, even though it's quite possible that
310 * some don't reference all the lateral rels. This is because any
311 * append path for the parent will have to have the same
312 * parameterization for every child anyway, and there's no value in
313 * forcing extra reparameterize_path() calls. Similarly, a lateral
314 * reference to the parent prevents use of otherwise-movable join rels
315 * for each child.
316 *
317 * It's possible for child rels to have their own children, in which
318 * case the topmost parent's lateral info propagates all the way down.
319 */
321 rel->lateral_relids = parent->lateral_relids;
323 }
324 else
325 {
326 rel->parent = NULL;
327 rel->top_parent = NULL;
328 rel->top_parent_relids = NULL;
329 rel->nulling_relids = NULL;
330 rel->direct_lateral_relids = NULL;
331 rel->lateral_relids = NULL;
332 rel->lateral_referencers = NULL;
333 }
334
335 /* Check type of rtable entry */
336 switch (rte->rtekind)
337 {
338 case RTE_RELATION:
339 /* Table --- retrieve statistics from the system catalogs */
340 get_relation_info(root, rte->relid, rte->inh, rel);
341 break;
342 case RTE_SUBQUERY:
343 case RTE_FUNCTION:
344 case RTE_TABLEFUNC:
345 case RTE_VALUES:
346 case RTE_CTE:
348
349 /*
350 * Subquery, function, tablefunc, values list, CTE, or ENR --- set
351 * up attr range and arrays
352 *
353 * Note: 0 is included in range to support whole-row Vars
354 */
355 rel->min_attr = 0;
356 rel->max_attr = list_length(rte->eref->colnames);
357 rel->attr_needed = (Relids *)
358 palloc0((rel->max_attr - rel->min_attr + 1) * sizeof(Relids));
359 rel->attr_widths = (int32 *)
360 palloc0((rel->max_attr - rel->min_attr + 1) * sizeof(int32));
361 break;
362 case RTE_RESULT:
363 /* RTE_RESULT has no columns, nor could it have whole-row Var */
364 rel->min_attr = 0;
365 rel->max_attr = -1;
366 rel->attr_needed = NULL;
367 rel->attr_widths = NULL;
368 break;
369 default:
370 elog(ERROR, "unrecognized RTE kind: %d",
371 (int) rte->rtekind);
372 break;
373 }
374
375 /*
376 * We must apply the partially filled in RelOptInfo before calling
377 * apply_child_basequals due to some transformations within that function
378 * which require the RelOptInfo to be available in the simple_rel_array.
379 */
380 root->simple_rel_array[relid] = rel;
381
382 /*
383 * Apply the parent's quals to the child, with appropriate substitution of
384 * variables. If the resulting clause is constant-FALSE or NULL after
385 * applying transformations, apply_child_basequals returns false to
386 * indicate that scanning this relation won't yield any rows. In this
387 * case, we mark the child as dummy right away. (We must do this
388 * immediately so that pruning works correctly when recursing in
389 * expand_partitioned_rtentry.)
390 */
391 if (parent)
392 {
393 AppendRelInfo *appinfo = root->append_rel_array[relid];
394
395 Assert(appinfo != NULL);
396 if (!apply_child_basequals(root, parent, rel, rte, appinfo))
397 {
398 /*
399 * Restriction clause reduced to constant FALSE or NULL. Mark as
400 * dummy so we won't scan this relation.
401 */
402 mark_dummy_rel(rel);
403 }
404 }
405
406 return rel;
407}
Bitmapset * bms_make_singleton(int x)
Definition: bitmapset.c:216
int32_t int32
Definition: c.h:498
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:225
bool apply_child_basequals(PlannerInfo *root, RelOptInfo *parentrel, RelOptInfo *childrel, RangeTblEntry *childRTE, AppendRelInfo *appinfo)
Definition: inherit.c:843
void mark_dummy_rel(RelOptInfo *rel)
Definition: joinrels.c:1385
void * palloc0(Size size)
Definition: mcxt.c:1975
RTEPermissionInfo * getRTEPermissionInfo(List *rteperminfos, RangeTblEntry *rte)
@ RTE_CTE
Definition: parsenodes.h:1032
@ RTE_NAMEDTUPLESTORE
Definition: parsenodes.h:1033
@ RTE_VALUES
Definition: parsenodes.h:1031
@ RTE_SUBQUERY
Definition: parsenodes.h:1027
@ RTE_RESULT
Definition: parsenodes.h:1034
@ RTE_FUNCTION
Definition: parsenodes.h:1029
@ RTE_TABLEFUNC
Definition: parsenodes.h:1030
@ RTE_RELATION
Definition: parsenodes.h:1026
@ RELOPT_BASEREL
Definition: pathnodes.h:851
@ RELOPT_OTHER_MEMBER_REL
Definition: pathnodes.h:853
static int list_length(const List *l)
Definition: pg_list.h:152
void get_relation_info(PlannerInfo *root, Oid relationObjectId, bool inhparent, RelOptInfo *rel)
Definition: plancat.c:116
RTEKind rtekind
Definition: parsenodes.h:1061

References RelOptInfo::all_partrels, RelOptInfo::allvisfrac, RelOptInfo::amflags, apply_child_basequals(), Assert(), RelOptInfo::baserestrict_min_security, RelOptInfo::baserestrictcost, RelOptInfo::baserestrictinfo, bms_make_singleton(), RelOptInfo::cheapest_parameterized_paths, RelOptInfo::cheapest_startup_path, RelOptInfo::cheapest_total_path, RelOptInfo::cheapest_unique_path, RTEPermissionInfo::checkAsUser, RelOptInfo::consider_parallel, RelOptInfo::consider_param_startup, RelOptInfo::consider_partitionwise_join, RelOptInfo::consider_startup, create_empty_pathtarget(), RelOptInfo::direct_lateral_relids, RelOptInfo::eclass_indexes, elog, ERROR, get_relation_info(), getRTEPermissionInfo(), RelOptInfo::has_eclass_joins, RelOptInfo::indexlist, RangeTblEntry::inh, InvalidOid, RelOptInfo::joininfo, RelOptInfo::lateral_referencers, RelOptInfo::lateral_relids, RelOptInfo::lateral_vars, list_length(), RelOptInfo::live_parts, makeNode, mark_dummy_rel(), RelOptInfo::max_attr, RelOptInfo::min_attr, NIL, RelOptInfo::non_unique_for_rels, RelOptInfo::notnullattnums, RelOptInfo::nparts, RelOptInfo::nulling_relids, RelOptInfo::pages, palloc0(), RelOptInfo::partbounds_merged, RelOptInfo::partial_pathlist, RelOptInfo::partition_qual, RelOptInfo::pathlist, QualCost::per_tuple, RelOptInfo::ppilist, RelOptInfo::rel_parallel_workers, RelOptInfo::relid, RelOptInfo::relids, RELOPT_BASEREL, RELOPT_OTHER_MEMBER_REL, RelOptInfo::reloptkind, RelOptInfo::reltarget, root, RelOptInfo::rows, RTE_CTE, RTE_FUNCTION, RTE_NAMEDTUPLESTORE, RTE_RELATION, RTE_RESULT, RTE_SUBQUERY, RTE_TABLEFUNC, RTE_VALUES, RangeTblEntry::rtekind, RelOptInfo::rtekind, RelOptInfo::serverid, QualCost::startup, RelOptInfo::statlist, RelOptInfo::subplan_params, RelOptInfo::subroot, RelOptInfo::top_parent_relids, RelOptInfo::tuples, RelOptInfo::unique_for_rels, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by add_base_rels_to_query(), expand_appendrel_subquery(), expand_inherited_rtentry(), expand_partitioned_rtentry(), plan_cluster_use_sort(), plan_create_index_workers(), query_planner(), and recurse_set_operations().

◆ calc_nestloop_required_outer()

Relids calc_nestloop_required_outer ( Relids  outerrelids,
Relids  outer_paramrels,
Relids  innerrelids,
Relids  inner_paramrels 
)

Definition at line 2456 of file pathnode.c.

2460{
2461 Relids required_outer;
2462
2463 /* inner_path can require rels from outer path, but not vice versa */
2464 Assert(!bms_overlap(outer_paramrels, innerrelids));
2465 /* easy case if inner path is not parameterized */
2466 if (!inner_paramrels)
2467 return bms_copy(outer_paramrels);
2468 /* else, form the union ... */
2469 required_outer = bms_union(outer_paramrels, inner_paramrels);
2470 /* ... and remove any mention of now-satisfied outer rels */
2471 required_outer = bms_del_members(required_outer,
2472 outerrelids);
2473 return required_outer;
2474}
bool bms_overlap(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:582

References Assert(), bms_copy(), bms_del_members(), bms_overlap(), and bms_union().

Referenced by try_nestloop_path().

◆ calc_non_nestloop_required_outer()

Relids calc_non_nestloop_required_outer ( Path outer_path,
Path inner_path 
)

Definition at line 2483 of file pathnode.c.

2484{
2485 Relids outer_paramrels = PATH_REQ_OUTER(outer_path);
2486 Relids inner_paramrels = PATH_REQ_OUTER(inner_path);
2487 Relids innerrelids PG_USED_FOR_ASSERTS_ONLY;
2488 Relids outerrelids PG_USED_FOR_ASSERTS_ONLY;
2489 Relids required_outer;
2490
2491 /*
2492 * Any parameterization of the input paths refers to topmost parents of
2493 * the relevant relations, because reparameterize_path_by_child() hasn't
2494 * been called yet. So we must consider topmost parents of the relations
2495 * being joined, too, while checking for disallowed parameterization
2496 * cases.
2497 */
2498 if (inner_path->parent->top_parent_relids)
2499 innerrelids = inner_path->parent->top_parent_relids;
2500 else
2501 innerrelids = inner_path->parent->relids;
2502
2503 if (outer_path->parent->top_parent_relids)
2504 outerrelids = outer_path->parent->top_parent_relids;
2505 else
2506 outerrelids = outer_path->parent->relids;
2507
2508 /* neither path can require rels from the other */
2509 Assert(!bms_overlap(outer_paramrels, innerrelids));
2510 Assert(!bms_overlap(inner_paramrels, outerrelids));
2511 /* form the union ... */
2512 required_outer = bms_union(outer_paramrels, inner_paramrels);
2513 /* we do not need an explicit test for empty; bms_union gets it right */
2514 return required_outer;
2515}
#define PG_USED_FOR_ASSERTS_ONLY
Definition: c.h:224

References Assert(), bms_overlap(), bms_union(), PATH_REQ_OUTER, and PG_USED_FOR_ASSERTS_ONLY.

Referenced by try_hashjoin_path(), and try_mergejoin_path().

◆ compare_fractional_path_costs()

int compare_fractional_path_costs ( Path path1,
Path path2,
double  fraction 
)

Definition at line 124 of file pathnode.c.

126{
127 Cost cost1,
128 cost2;
129
130 /* Number of disabled nodes, if different, trumps all else. */
131 if (unlikely(path1->disabled_nodes != path2->disabled_nodes))
132 {
133 if (path1->disabled_nodes < path2->disabled_nodes)
134 return -1;
135 else
136 return +1;
137 }
138
139 if (fraction <= 0.0 || fraction >= 1.0)
140 return compare_path_costs(path1, path2, TOTAL_COST);
141 cost1 = path1->startup_cost +
142 fraction * (path1->total_cost - path1->startup_cost);
143 cost2 = path2->startup_cost +
144 fraction * (path2->total_cost - path2->startup_cost);
145 if (cost1 < cost2)
146 return -1;
147 if (cost1 > cost2)
148 return +1;
149 return 0;
150}
int compare_path_costs(Path *path1, Path *path2, CostSelector criterion)
Definition: pathnode.c:69
@ TOTAL_COST
Definition: pathnodes.h:38

References compare_path_costs(), Path::disabled_nodes, Path::startup_cost, TOTAL_COST, Path::total_cost, and unlikely.

Referenced by get_cheapest_fractional_path(), and get_cheapest_fractional_path_for_pathkeys().

◆ compare_path_costs()

int compare_path_costs ( Path path1,
Path path2,
CostSelector  criterion 
)

Definition at line 69 of file pathnode.c.

70{
71 /* Number of disabled nodes, if different, trumps all else. */
72 if (unlikely(path1->disabled_nodes != path2->disabled_nodes))
73 {
74 if (path1->disabled_nodes < path2->disabled_nodes)
75 return -1;
76 else
77 return +1;
78 }
79
80 if (criterion == STARTUP_COST)
81 {
82 if (path1->startup_cost < path2->startup_cost)
83 return -1;
84 if (path1->startup_cost > path2->startup_cost)
85 return +1;
86
87 /*
88 * If paths have the same startup cost (not at all unlikely), order
89 * them by total cost.
90 */
91 if (path1->total_cost < path2->total_cost)
92 return -1;
93 if (path1->total_cost > path2->total_cost)
94 return +1;
95 }
96 else
97 {
98 if (path1->total_cost < path2->total_cost)
99 return -1;
100 if (path1->total_cost > path2->total_cost)
101 return +1;
102
103 /*
104 * If paths have the same total cost, order them by startup cost.
105 */
106 if (path1->startup_cost < path2->startup_cost)
107 return -1;
108 if (path1->startup_cost > path2->startup_cost)
109 return +1;
110 }
111 return 0;
112}
@ STARTUP_COST
Definition: pathnodes.h:38

References Path::disabled_nodes, STARTUP_COST, Path::startup_cost, Path::total_cost, and unlikely.

Referenced by append_startup_cost_compare(), append_total_cost_compare(), compare_fractional_path_costs(), generate_mergejoin_paths(), get_cheapest_parameterized_child_path(), get_cheapest_path_for_pathkeys(), and set_cheapest().

◆ create_agg_path()

AggPath * create_agg_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
PathTarget target,
AggStrategy  aggstrategy,
AggSplit  aggsplit,
List groupClause,
List qual,
const AggClauseCosts aggcosts,
double  numGroups 
)

Definition at line 3243 of file pathnode.c.

3253{
3254 AggPath *pathnode = makeNode(AggPath);
3255
3256 pathnode->path.pathtype = T_Agg;
3257 pathnode->path.parent = rel;
3258 pathnode->path.pathtarget = target;
3259 /* For now, assume we are above any joins, so no parameterization */
3260 pathnode->path.param_info = NULL;
3261 pathnode->path.parallel_aware = false;
3262 pathnode->path.parallel_safe = rel->consider_parallel &&
3263 subpath->parallel_safe;
3264 pathnode->path.parallel_workers = subpath->parallel_workers;
3265
3266 if (aggstrategy == AGG_SORTED)
3267 {
3268 /*
3269 * Attempt to preserve the order of the subpath. Additional pathkeys
3270 * may have been added in adjust_group_pathkeys_for_groupagg() to
3271 * support ORDER BY / DISTINCT aggregates. Pathkeys added there
3272 * belong to columns within the aggregate function, so we must strip
3273 * these additional pathkeys off as those columns are unavailable
3274 * above the aggregate node.
3275 */
3276 if (list_length(subpath->pathkeys) > root->num_groupby_pathkeys)
3277 pathnode->path.pathkeys = list_copy_head(subpath->pathkeys,
3278 root->num_groupby_pathkeys);
3279 else
3280 pathnode->path.pathkeys = subpath->pathkeys; /* preserves order */
3281 }
3282 else
3283 pathnode->path.pathkeys = NIL; /* output is unordered */
3284
3285 pathnode->subpath = subpath;
3286
3287 pathnode->aggstrategy = aggstrategy;
3288 pathnode->aggsplit = aggsplit;
3289 pathnode->numGroups = numGroups;
3290 pathnode->transitionSpace = aggcosts ? aggcosts->transitionSpace : 0;
3291 pathnode->groupClause = groupClause;
3292 pathnode->qual = qual;
3293
3294 cost_agg(&pathnode->path, root,
3295 aggstrategy, aggcosts,
3296 list_length(groupClause), numGroups,
3297 qual,
3298 subpath->disabled_nodes,
3299 subpath->startup_cost, subpath->total_cost,
3300 subpath->rows, subpath->pathtarget->width);
3301
3302 /* add tlist eval cost for each output row */
3303 pathnode->path.startup_cost += target->cost.startup;
3304 pathnode->path.total_cost += target->cost.startup +
3305 target->cost.per_tuple * pathnode->path.rows;
3306
3307 return pathnode;
3308}
void cost_agg(Path *path, PlannerInfo *root, AggStrategy aggstrategy, const AggClauseCosts *aggcosts, int numGroupCols, double numGroups, List *quals, int disabled_nodes, Cost input_startup_cost, Cost input_total_cost, double input_tuples, double input_width)
Definition: costsize.c:2682
List * list_copy_head(const List *oldlist, int len)
Definition: list.c:1593
Datum subpath(PG_FUNCTION_ARGS)
Definition: ltree_op.c:311
@ AGG_SORTED
Definition: nodes.h:361
Size transitionSpace
Definition: pathnodes.h:62
Path * subpath
Definition: pathnodes.h:2396
Cardinality numGroups
Definition: pathnodes.h:2399
AggSplit aggsplit
Definition: pathnodes.h:2398
List * groupClause
Definition: pathnodes.h:2401
uint64 transitionSpace
Definition: pathnodes.h:2400
AggStrategy aggstrategy
Definition: pathnodes.h:2397
Path path
Definition: pathnodes.h:2395
List * qual
Definition: pathnodes.h:2402
NodeTag pathtype
Definition: pathnodes.h:1759
int parallel_workers
Definition: pathnodes.h:1790
bool parallel_aware
Definition: pathnodes.h:1786

References AGG_SORTED, AggPath::aggsplit, AggPath::aggstrategy, RelOptInfo::consider_parallel, PathTarget::cost, cost_agg(), AggPath::groupClause, list_copy_head(), list_length(), makeNode, NIL, AggPath::numGroups, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, AggPath::path, Path::pathkeys, Path::pathtype, QualCost::per_tuple, AggPath::qual, root, Path::rows, QualCost::startup, Path::startup_cost, subpath(), AggPath::subpath, Path::total_cost, AggClauseCosts::transitionSpace, and AggPath::transitionSpace.

Referenced by add_paths_to_grouping_rel(), create_final_distinct_paths(), create_partial_distinct_paths(), create_partial_grouping_paths(), and generate_union_paths().

◆ create_append_path()

AppendPath * create_append_path ( PlannerInfo root,
RelOptInfo rel,
List subpaths,
List partial_subpaths,
List pathkeys,
Relids  required_outer,
int  parallel_workers,
bool  parallel_aware,
double  rows 
)

Definition at line 1300 of file pathnode.c.

1306{
1307 AppendPath *pathnode = makeNode(AppendPath);
1308 ListCell *l;
1309
1310 Assert(!parallel_aware || parallel_workers > 0);
1311
1312 pathnode->path.pathtype = T_Append;
1313 pathnode->path.parent = rel;
1314 pathnode->path.pathtarget = rel->reltarget;
1315
1316 /*
1317 * If this is for a baserel (not a join or non-leaf partition), we prefer
1318 * to apply get_baserel_parampathinfo to construct a full ParamPathInfo
1319 * for the path. This supports building a Memoize path atop this path,
1320 * and if this is a partitioned table the info may be useful for run-time
1321 * pruning (cf make_partition_pruneinfo()).
1322 *
1323 * However, if we don't have "root" then that won't work and we fall back
1324 * on the simpler get_appendrel_parampathinfo. There's no point in doing
1325 * the more expensive thing for a dummy path, either.
1326 */
1327 if (rel->reloptkind == RELOPT_BASEREL && root && subpaths != NIL)
1328 pathnode->path.param_info = get_baserel_parampathinfo(root,
1329 rel,
1330 required_outer);
1331 else
1332 pathnode->path.param_info = get_appendrel_parampathinfo(rel,
1333 required_outer);
1334
1335 pathnode->path.parallel_aware = parallel_aware;
1336 pathnode->path.parallel_safe = rel->consider_parallel;
1337 pathnode->path.parallel_workers = parallel_workers;
1338 pathnode->path.pathkeys = pathkeys;
1339
1340 /*
1341 * For parallel append, non-partial paths are sorted by descending total
1342 * costs. That way, the total time to finish all non-partial paths is
1343 * minimized. Also, the partial paths are sorted by descending startup
1344 * costs. There may be some paths that require to do startup work by a
1345 * single worker. In such case, it's better for workers to choose the
1346 * expensive ones first, whereas the leader should choose the cheapest
1347 * startup plan.
1348 */
1349 if (pathnode->path.parallel_aware)
1350 {
1351 /*
1352 * We mustn't fiddle with the order of subpaths when the Append has
1353 * pathkeys. The order they're listed in is critical to keeping the
1354 * pathkeys valid.
1355 */
1356 Assert(pathkeys == NIL);
1357
1359 list_sort(partial_subpaths, append_startup_cost_compare);
1360 }
1361 pathnode->first_partial_path = list_length(subpaths);
1362 pathnode->subpaths = list_concat(subpaths, partial_subpaths);
1363
1364 /*
1365 * Apply query-wide LIMIT if known and path is for sole base relation.
1366 * (Handling this at this low level is a bit klugy.)
1367 */
1368 if (root != NULL && bms_equal(rel->relids, root->all_query_rels))
1369 pathnode->limit_tuples = root->limit_tuples;
1370 else
1371 pathnode->limit_tuples = -1.0;
1372
1373 foreach(l, pathnode->subpaths)
1374 {
1375 Path *subpath = (Path *) lfirst(l);
1376
1377 pathnode->path.parallel_safe = pathnode->path.parallel_safe &&
1378 subpath->parallel_safe;
1379
1380 /* All child paths must have same parameterization */
1381 Assert(bms_equal(PATH_REQ_OUTER(subpath), required_outer));
1382 }
1383
1384 Assert(!parallel_aware || pathnode->path.parallel_safe);
1385
1386 /*
1387 * If there's exactly one child path then the output of the Append is
1388 * necessarily ordered the same as the child's, so we can inherit the
1389 * child's pathkeys if any, overriding whatever the caller might've said.
1390 * Furthermore, if the child's parallel awareness matches the Append's,
1391 * then the Append is a no-op and will be discarded later (in setrefs.c).
1392 * Then we can inherit the child's size and cost too, effectively charging
1393 * zero for the Append. Otherwise, we must do the normal costsize
1394 * calculation.
1395 */
1396 if (list_length(pathnode->subpaths) == 1)
1397 {
1398 Path *child = (Path *) linitial(pathnode->subpaths);
1399
1400 if (child->parallel_aware == parallel_aware)
1401 {
1402 pathnode->path.rows = child->rows;
1403 pathnode->path.startup_cost = child->startup_cost;
1404 pathnode->path.total_cost = child->total_cost;
1405 }
1406 else
1407 cost_append(pathnode);
1408 /* Must do this last, else cost_append complains */
1409 pathnode->path.pathkeys = child->pathkeys;
1410 }
1411 else
1412 cost_append(pathnode);
1413
1414 /* If the caller provided a row estimate, override the computed value. */
1415 if (rows >= 0)
1416 pathnode->path.rows = rows;
1417
1418 return pathnode;
1419}
void cost_append(AppendPath *apath)
Definition: costsize.c:2250
void list_sort(List *list, list_sort_comparator cmp)
Definition: list.c:1674
List * list_concat(List *list1, const List *list2)
Definition: list.c:561
static int append_startup_cost_compare(const ListCell *a, const ListCell *b)
Definition: pathnode.c:1453
static int append_total_cost_compare(const ListCell *a, const ListCell *b)
Definition: pathnode.c:1431
#define linitial(l)
Definition: pg_list.h:178
ParamPathInfo * get_appendrel_parampathinfo(RelOptInfo *appendrel, Relids required_outer)
Definition: relnode.c:1856
ParamPathInfo * get_baserel_parampathinfo(PlannerInfo *root, RelOptInfo *baserel, Relids required_outer)
Definition: relnode.c:1545
int first_partial_path
Definition: pathnodes.h:2068
Cardinality limit_tuples
Definition: pathnodes.h:2069
List * subpaths
Definition: pathnodes.h:2066

References append_startup_cost_compare(), append_total_cost_compare(), Assert(), bms_equal(), RelOptInfo::consider_parallel, cost_append(), AppendPath::first_partial_path, get_appendrel_parampathinfo(), get_baserel_parampathinfo(), lfirst, AppendPath::limit_tuples, linitial, list_concat(), list_length(), list_sort(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, AppendPath::path, PATH_REQ_OUTER, Path::pathkeys, Path::pathtype, RelOptInfo::relids, RELOPT_BASEREL, RelOptInfo::reloptkind, RelOptInfo::reltarget, root, Path::rows, Path::startup_cost, subpath(), AppendPath::subpaths, and Path::total_cost.

Referenced by add_paths_to_append_rel(), create_degenerate_grouping_paths(), generate_orderedappend_paths(), generate_union_paths(), mark_dummy_rel(), reparameterize_path(), and set_dummy_rel_pathlist().

◆ create_bitmap_and_path()

BitmapAndPath * create_bitmap_and_path ( PlannerInfo root,
RelOptInfo rel,
List bitmapquals 
)

Definition at line 1131 of file pathnode.c.

1134{
1136 Relids required_outer = NULL;
1137 ListCell *lc;
1138
1139 pathnode->path.pathtype = T_BitmapAnd;
1140 pathnode->path.parent = rel;
1141 pathnode->path.pathtarget = rel->reltarget;
1142
1143 /*
1144 * Identify the required outer rels as the union of what the child paths
1145 * depend on. (Alternatively, we could insist that the caller pass this
1146 * in, but it's more convenient and reliable to compute it here.)
1147 */
1148 foreach(lc, bitmapquals)
1149 {
1150 Path *bitmapqual = (Path *) lfirst(lc);
1151
1152 required_outer = bms_add_members(required_outer,
1153 PATH_REQ_OUTER(bitmapqual));
1154 }
1155 pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1156 required_outer);
1157
1158 /*
1159 * Currently, a BitmapHeapPath, BitmapAndPath, or BitmapOrPath will be
1160 * parallel-safe if and only if rel->consider_parallel is set. So, we can
1161 * set the flag for this path based only on the relation-level flag,
1162 * without actually iterating over the list of children.
1163 */
1164 pathnode->path.parallel_aware = false;
1165 pathnode->path.parallel_safe = rel->consider_parallel;
1166 pathnode->path.parallel_workers = 0;
1167
1168 pathnode->path.pathkeys = NIL; /* always unordered */
1169
1170 pathnode->bitmapquals = bitmapquals;
1171
1172 /* this sets bitmapselectivity as well as the regular cost fields: */
1173 cost_bitmap_and_node(pathnode, root);
1174
1175 return pathnode;
1176}
Bitmapset * bms_add_members(Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:917
void cost_bitmap_and_node(BitmapAndPath *path, PlannerInfo *root)
Definition: costsize.c:1165
List * bitmapquals
Definition: pathnodes.h:1931

References BitmapAndPath::bitmapquals, bms_add_members(), RelOptInfo::consider_parallel, cost_bitmap_and_node(), get_baserel_parampathinfo(), lfirst, makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, BitmapAndPath::path, PATH_REQ_OUTER, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by bitmap_and_cost_est(), and choose_bitmap_and().

◆ create_bitmap_heap_path()

BitmapHeapPath * create_bitmap_heap_path ( PlannerInfo root,
RelOptInfo rel,
Path bitmapqual,
Relids  required_outer,
double  loop_count,
int  parallel_degree 
)

Definition at line 1098 of file pathnode.c.

1104{
1106
1107 pathnode->path.pathtype = T_BitmapHeapScan;
1108 pathnode->path.parent = rel;
1109 pathnode->path.pathtarget = rel->reltarget;
1110 pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1111 required_outer);
1112 pathnode->path.parallel_aware = (parallel_degree > 0);
1113 pathnode->path.parallel_safe = rel->consider_parallel;
1114 pathnode->path.parallel_workers = parallel_degree;
1115 pathnode->path.pathkeys = NIL; /* always unordered */
1116
1117 pathnode->bitmapqual = bitmapqual;
1118
1119 cost_bitmap_heap_scan(&pathnode->path, root, rel,
1120 pathnode->path.param_info,
1121 bitmapqual, loop_count);
1122
1123 return pathnode;
1124}
void cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info, Path *bitmapqual, double loop_count)
Definition: costsize.c:1023
Path * bitmapqual
Definition: pathnodes.h:1919

References BitmapHeapPath::bitmapqual, RelOptInfo::consider_parallel, cost_bitmap_heap_scan(), get_baserel_parampathinfo(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, BitmapHeapPath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by create_index_paths(), create_partial_bitmap_paths(), and reparameterize_path().

◆ create_bitmap_or_path()

BitmapOrPath * create_bitmap_or_path ( PlannerInfo root,
RelOptInfo rel,
List bitmapquals 
)

Definition at line 1183 of file pathnode.c.

1186{
1187 BitmapOrPath *pathnode = makeNode(BitmapOrPath);
1188 Relids required_outer = NULL;
1189 ListCell *lc;
1190
1191 pathnode->path.pathtype = T_BitmapOr;
1192 pathnode->path.parent = rel;
1193 pathnode->path.pathtarget = rel->reltarget;
1194
1195 /*
1196 * Identify the required outer rels as the union of what the child paths
1197 * depend on. (Alternatively, we could insist that the caller pass this
1198 * in, but it's more convenient and reliable to compute it here.)
1199 */
1200 foreach(lc, bitmapquals)
1201 {
1202 Path *bitmapqual = (Path *) lfirst(lc);
1203
1204 required_outer = bms_add_members(required_outer,
1205 PATH_REQ_OUTER(bitmapqual));
1206 }
1207 pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1208 required_outer);
1209
1210 /*
1211 * Currently, a BitmapHeapPath, BitmapAndPath, or BitmapOrPath will be
1212 * parallel-safe if and only if rel->consider_parallel is set. So, we can
1213 * set the flag for this path based only on the relation-level flag,
1214 * without actually iterating over the list of children.
1215 */
1216 pathnode->path.parallel_aware = false;
1217 pathnode->path.parallel_safe = rel->consider_parallel;
1218 pathnode->path.parallel_workers = 0;
1219
1220 pathnode->path.pathkeys = NIL; /* always unordered */
1221
1222 pathnode->bitmapquals = bitmapquals;
1223
1224 /* this sets bitmapselectivity as well as the regular cost fields: */
1225 cost_bitmap_or_node(pathnode, root);
1226
1227 return pathnode;
1228}
void cost_bitmap_or_node(BitmapOrPath *path, PlannerInfo *root)
Definition: costsize.c:1210
List * bitmapquals
Definition: pathnodes.h:1944

References BitmapOrPath::bitmapquals, bms_add_members(), RelOptInfo::consider_parallel, cost_bitmap_or_node(), get_baserel_parampathinfo(), lfirst, makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, BitmapOrPath::path, PATH_REQ_OUTER, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by generate_bitmap_or_paths().

◆ create_ctescan_path()

Path * create_ctescan_path ( PlannerInfo root,
RelOptInfo rel,
List pathkeys,
Relids  required_outer 
)

Definition at line 2196 of file pathnode.c.

2198{
2199 Path *pathnode = makeNode(Path);
2200
2201 pathnode->pathtype = T_CteScan;
2202 pathnode->parent = rel;
2203 pathnode->pathtarget = rel->reltarget;
2204 pathnode->param_info = get_baserel_parampathinfo(root, rel,
2205 required_outer);
2206 pathnode->parallel_aware = false;
2207 pathnode->parallel_safe = rel->consider_parallel;
2208 pathnode->parallel_workers = 0;
2209 pathnode->pathkeys = pathkeys;
2210
2211 cost_ctescan(pathnode, root, rel, pathnode->param_info);
2212
2213 return pathnode;
2214}
void cost_ctescan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1708

References RelOptInfo::consider_parallel, cost_ctescan(), get_baserel_parampathinfo(), makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by set_cte_pathlist().

◆ create_foreign_join_path()

ForeignPath * create_foreign_join_path ( PlannerInfo root,
RelOptInfo rel,
PathTarget target,
double  rows,
int  disabled_nodes,
Cost  startup_cost,
Cost  total_cost,
List pathkeys,
Relids  required_outer,
Path fdw_outerpath,
List fdw_restrictinfo,
List fdw_private 
)

Definition at line 2355 of file pathnode.c.

2364{
2365 ForeignPath *pathnode = makeNode(ForeignPath);
2366
2367 /*
2368 * We should use get_joinrel_parampathinfo to handle parameterized paths,
2369 * but the API of this function doesn't support it, and existing
2370 * extensions aren't yet trying to build such paths anyway. For the
2371 * moment just throw an error if someone tries it; eventually we should
2372 * revisit this.
2373 */
2374 if (!bms_is_empty(required_outer) || !bms_is_empty(rel->lateral_relids))
2375 elog(ERROR, "parameterized foreign joins are not supported yet");
2376
2377 pathnode->path.pathtype = T_ForeignScan;
2378 pathnode->path.parent = rel;
2379 pathnode->path.pathtarget = target ? target : rel->reltarget;
2380 pathnode->path.param_info = NULL; /* XXX see above */
2381 pathnode->path.parallel_aware = false;
2382 pathnode->path.parallel_safe = rel->consider_parallel;
2383 pathnode->path.parallel_workers = 0;
2384 pathnode->path.rows = rows;
2385 pathnode->path.disabled_nodes = disabled_nodes;
2386 pathnode->path.startup_cost = startup_cost;
2387 pathnode->path.total_cost = total_cost;
2388 pathnode->path.pathkeys = pathkeys;
2389
2390 pathnode->fdw_outerpath = fdw_outerpath;
2391 pathnode->fdw_restrictinfo = fdw_restrictinfo;
2392 pathnode->fdw_private = fdw_private;
2393
2394 return pathnode;
2395}
#define bms_is_empty(a)
Definition: bitmapset.h:118
Path * fdw_outerpath
Definition: pathnodes.h:2004
List * fdw_restrictinfo
Definition: pathnodes.h:2005
List * fdw_private
Definition: pathnodes.h:2006

References bms_is_empty, RelOptInfo::consider_parallel, Path::disabled_nodes, elog, ERROR, ForeignPath::fdw_outerpath, ForeignPath::fdw_private, ForeignPath::fdw_restrictinfo, RelOptInfo::lateral_relids, makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, ForeignPath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, Path::rows, Path::startup_cost, and Path::total_cost.

Referenced by add_paths_with_pathkeys_for_rel(), and postgresGetForeignJoinPaths().

◆ create_foreign_upper_path()

ForeignPath * create_foreign_upper_path ( PlannerInfo root,
RelOptInfo rel,
PathTarget target,
double  rows,
int  disabled_nodes,
Cost  startup_cost,
Cost  total_cost,
List pathkeys,
Path fdw_outerpath,
List fdw_restrictinfo,
List fdw_private 
)

Definition at line 2409 of file pathnode.c.

2417{
2418 ForeignPath *pathnode = makeNode(ForeignPath);
2419
2420 /*
2421 * Upper relations should never have any lateral references, since joining
2422 * is complete.
2423 */
2425
2426 pathnode->path.pathtype = T_ForeignScan;
2427 pathnode->path.parent = rel;
2428 pathnode->path.pathtarget = target ? target : rel->reltarget;
2429 pathnode->path.param_info = NULL;
2430 pathnode->path.parallel_aware = false;
2431 pathnode->path.parallel_safe = rel->consider_parallel;
2432 pathnode->path.parallel_workers = 0;
2433 pathnode->path.rows = rows;
2434 pathnode->path.disabled_nodes = disabled_nodes;
2435 pathnode->path.startup_cost = startup_cost;
2436 pathnode->path.total_cost = total_cost;
2437 pathnode->path.pathkeys = pathkeys;
2438
2439 pathnode->fdw_outerpath = fdw_outerpath;
2440 pathnode->fdw_restrictinfo = fdw_restrictinfo;
2441 pathnode->fdw_private = fdw_private;
2442
2443 return pathnode;
2444}

References Assert(), bms_is_empty, RelOptInfo::consider_parallel, Path::disabled_nodes, ForeignPath::fdw_outerpath, ForeignPath::fdw_private, ForeignPath::fdw_restrictinfo, RelOptInfo::lateral_relids, makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, ForeignPath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, Path::rows, Path::startup_cost, and Path::total_cost.

Referenced by add_foreign_final_paths(), add_foreign_grouping_paths(), and add_foreign_ordered_paths().

◆ create_foreignscan_path()

ForeignPath * create_foreignscan_path ( PlannerInfo root,
RelOptInfo rel,
PathTarget target,
double  rows,
int  disabled_nodes,
Cost  startup_cost,
Cost  total_cost,
List pathkeys,
Relids  required_outer,
Path fdw_outerpath,
List fdw_restrictinfo,
List fdw_private 
)

Definition at line 2307 of file pathnode.c.

2316{
2317 ForeignPath *pathnode = makeNode(ForeignPath);
2318
2319 /* Historically some FDWs were confused about when to use this */
2320 Assert(IS_SIMPLE_REL(rel));
2321
2322 pathnode->path.pathtype = T_ForeignScan;
2323 pathnode->path.parent = rel;
2324 pathnode->path.pathtarget = target ? target : rel->reltarget;
2325 pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
2326 required_outer);
2327 pathnode->path.parallel_aware = false;
2328 pathnode->path.parallel_safe = rel->consider_parallel;
2329 pathnode->path.parallel_workers = 0;
2330 pathnode->path.rows = rows;
2331 pathnode->path.disabled_nodes = disabled_nodes;
2332 pathnode->path.startup_cost = startup_cost;
2333 pathnode->path.total_cost = total_cost;
2334 pathnode->path.pathkeys = pathkeys;
2335
2336 pathnode->fdw_outerpath = fdw_outerpath;
2337 pathnode->fdw_restrictinfo = fdw_restrictinfo;
2338 pathnode->fdw_private = fdw_private;
2339
2340 return pathnode;
2341}
#define IS_SIMPLE_REL(rel)
Definition: pathnodes.h:863

References Assert(), RelOptInfo::consider_parallel, Path::disabled_nodes, ForeignPath::fdw_outerpath, ForeignPath::fdw_private, ForeignPath::fdw_restrictinfo, get_baserel_parampathinfo(), IS_SIMPLE_REL, makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, ForeignPath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, root, Path::rows, Path::startup_cost, and Path::total_cost.

Referenced by add_paths_with_pathkeys_for_rel(), fileGetForeignPaths(), and postgresGetForeignPaths().

◆ create_functionscan_path()

Path * create_functionscan_path ( PlannerInfo root,
RelOptInfo rel,
List pathkeys,
Relids  required_outer 
)

Definition at line 2118 of file pathnode.c.

2120{
2121 Path *pathnode = makeNode(Path);
2122
2123 pathnode->pathtype = T_FunctionScan;
2124 pathnode->parent = rel;
2125 pathnode->pathtarget = rel->reltarget;
2126 pathnode->param_info = get_baserel_parampathinfo(root, rel,
2127 required_outer);
2128 pathnode->parallel_aware = false;
2129 pathnode->parallel_safe = rel->consider_parallel;
2130 pathnode->parallel_workers = 0;
2131 pathnode->pathkeys = pathkeys;
2132
2133 cost_functionscan(pathnode, root, rel, pathnode->param_info);
2134
2135 return pathnode;
2136}
void cost_functionscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1538

References RelOptInfo::consider_parallel, cost_functionscan(), get_baserel_parampathinfo(), makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by set_function_pathlist().

◆ create_gather_merge_path()

GatherMergePath * create_gather_merge_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
PathTarget target,
List pathkeys,
Relids  required_outer,
double *  rows 
)

Definition at line 1962 of file pathnode.c.

1965{
1967 int input_disabled_nodes = 0;
1968 Cost input_startup_cost = 0;
1969 Cost input_total_cost = 0;
1970
1971 Assert(subpath->parallel_safe);
1972 Assert(pathkeys);
1973
1974 /*
1975 * The subpath should guarantee that it is adequately ordered either by
1976 * adding an explicit sort node or by using presorted input. We cannot
1977 * add an explicit Sort node for the subpath in createplan.c on additional
1978 * pathkeys, because we can't guarantee the sort would be safe. For
1979 * example, expressions may be volatile or otherwise parallel unsafe.
1980 */
1981 if (!pathkeys_contained_in(pathkeys, subpath->pathkeys))
1982 elog(ERROR, "gather merge input not sufficiently sorted");
1983
1984 pathnode->path.pathtype = T_GatherMerge;
1985 pathnode->path.parent = rel;
1986 pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1987 required_outer);
1988 pathnode->path.parallel_aware = false;
1989
1990 pathnode->subpath = subpath;
1991 pathnode->num_workers = subpath->parallel_workers;
1992 pathnode->path.pathkeys = pathkeys;
1993 pathnode->path.pathtarget = target ? target : rel->reltarget;
1994
1995 input_disabled_nodes += subpath->disabled_nodes;
1996 input_startup_cost += subpath->startup_cost;
1997 input_total_cost += subpath->total_cost;
1998
1999 cost_gather_merge(pathnode, root, rel, pathnode->path.param_info,
2000 input_disabled_nodes, input_startup_cost,
2001 input_total_cost, rows);
2002
2003 return pathnode;
2004}
void cost_gather_merge(GatherMergePath *path, PlannerInfo *root, RelOptInfo *rel, ParamPathInfo *param_info, int input_disabled_nodes, Cost input_startup_cost, Cost input_total_cost, double *rows)
Definition: costsize.c:485
bool pathkeys_contained_in(List *keys1, List *keys2)
Definition: pathkeys.c:343

References Assert(), cost_gather_merge(), elog, ERROR, get_baserel_parampathinfo(), makeNode, GatherMergePath::num_workers, Path::parallel_aware, GatherMergePath::path, Path::pathkeys, pathkeys_contained_in(), Path::pathtype, RelOptInfo::reltarget, root, subpath(), and GatherMergePath::subpath.

Referenced by create_ordered_paths(), gather_grouping_paths(), generate_gather_paths(), and generate_useful_gather_paths().

◆ create_gather_path()

GatherPath * create_gather_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
PathTarget target,
Relids  required_outer,
double *  rows 
)

Definition at line 2044 of file pathnode.c.

2046{
2047 GatherPath *pathnode = makeNode(GatherPath);
2048
2049 Assert(subpath->parallel_safe);
2050
2051 pathnode->path.pathtype = T_Gather;
2052 pathnode->path.parent = rel;
2053 pathnode->path.pathtarget = target;
2054 pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
2055 required_outer);
2056 pathnode->path.parallel_aware = false;
2057 pathnode->path.parallel_safe = false;
2058 pathnode->path.parallel_workers = 0;
2059 pathnode->path.pathkeys = NIL; /* Gather has unordered result */
2060
2061 pathnode->subpath = subpath;
2062 pathnode->num_workers = subpath->parallel_workers;
2063 pathnode->single_copy = false;
2064
2065 if (pathnode->num_workers == 0)
2066 {
2067 pathnode->path.pathkeys = subpath->pathkeys;
2068 pathnode->num_workers = 1;
2069 pathnode->single_copy = true;
2070 }
2071
2072 cost_gather(pathnode, root, rel, pathnode->path.param_info, rows);
2073
2074 return pathnode;
2075}
void cost_gather(GatherPath *path, PlannerInfo *root, RelOptInfo *rel, ParamPathInfo *param_info, double *rows)
Definition: costsize.c:446
bool single_copy
Definition: pathnodes.h:2177
int num_workers
Definition: pathnodes.h:2178

References Assert(), cost_gather(), get_baserel_parampathinfo(), makeNode, NIL, GatherPath::num_workers, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, GatherPath::path, Path::pathkeys, Path::pathtype, root, GatherPath::single_copy, subpath(), and GatherPath::subpath.

Referenced by generate_gather_paths(), and generate_union_paths().

◆ create_group_path()

GroupPath * create_group_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
List groupClause,
List qual,
double  numGroups 
)

Definition at line 3130 of file pathnode.c.

3136{
3137 GroupPath *pathnode = makeNode(GroupPath);
3138 PathTarget *target = rel->reltarget;
3139
3140 pathnode->path.pathtype = T_Group;
3141 pathnode->path.parent = rel;
3142 pathnode->path.pathtarget = target;
3143 /* For now, assume we are above any joins, so no parameterization */
3144 pathnode->path.param_info = NULL;
3145 pathnode->path.parallel_aware = false;
3146 pathnode->path.parallel_safe = rel->consider_parallel &&
3147 subpath->parallel_safe;
3148 pathnode->path.parallel_workers = subpath->parallel_workers;
3149 /* Group doesn't change sort ordering */
3150 pathnode->path.pathkeys = subpath->pathkeys;
3151
3152 pathnode->subpath = subpath;
3153
3154 pathnode->groupClause = groupClause;
3155 pathnode->qual = qual;
3156
3157 cost_group(&pathnode->path, root,
3158 list_length(groupClause),
3159 numGroups,
3160 qual,
3161 subpath->disabled_nodes,
3162 subpath->startup_cost, subpath->total_cost,
3163 subpath->rows);
3164
3165 /* add tlist eval cost for each output row */
3166 pathnode->path.startup_cost += target->cost.startup;
3167 pathnode->path.total_cost += target->cost.startup +
3168 target->cost.per_tuple * pathnode->path.rows;
3169
3170 return pathnode;
3171}
void cost_group(Path *path, PlannerInfo *root, int numGroupCols, double numGroups, List *quals, int input_disabled_nodes, Cost input_startup_cost, Cost input_total_cost, double input_tuples)
Definition: costsize.c:3195
List * qual
Definition: pathnodes.h:2370
List * groupClause
Definition: pathnodes.h:2369
Path * subpath
Definition: pathnodes.h:2368
Path path
Definition: pathnodes.h:2367

References RelOptInfo::consider_parallel, PathTarget::cost, cost_group(), GroupPath::groupClause, list_length(), makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, GroupPath::path, Path::pathkeys, Path::pathtype, QualCost::per_tuple, GroupPath::qual, RelOptInfo::reltarget, root, Path::rows, QualCost::startup, Path::startup_cost, subpath(), GroupPath::subpath, and Path::total_cost.

Referenced by add_paths_to_grouping_rel(), and create_partial_grouping_paths().

◆ create_group_result_path()

GroupResultPath * create_group_result_path ( PlannerInfo root,
RelOptInfo rel,
PathTarget target,
List havingqual 
)

Definition at line 1586 of file pathnode.c.

1588{
1590
1591 pathnode->path.pathtype = T_Result;
1592 pathnode->path.parent = rel;
1593 pathnode->path.pathtarget = target;
1594 pathnode->path.param_info = NULL; /* there are no other rels... */
1595 pathnode->path.parallel_aware = false;
1596 pathnode->path.parallel_safe = rel->consider_parallel;
1597 pathnode->path.parallel_workers = 0;
1598 pathnode->path.pathkeys = NIL;
1599 pathnode->quals = havingqual;
1600
1601 /*
1602 * We can't quite use cost_resultscan() because the quals we want to
1603 * account for are not baserestrict quals of the rel. Might as well just
1604 * hack it here.
1605 */
1606 pathnode->path.rows = 1;
1607 pathnode->path.startup_cost = target->cost.startup;
1608 pathnode->path.total_cost = target->cost.startup +
1609 cpu_tuple_cost + target->cost.per_tuple;
1610
1611 /*
1612 * Add cost of qual, if any --- but we ignore its selectivity, since our
1613 * rowcount estimate should be 1 no matter what the qual is.
1614 */
1615 if (havingqual)
1616 {
1617 QualCost qual_cost;
1618
1619 cost_qual_eval(&qual_cost, havingqual, root);
1620 /* havingqual is evaluated once at startup */
1621 pathnode->path.startup_cost += qual_cost.startup + qual_cost.per_tuple;
1622 pathnode->path.total_cost += qual_cost.startup + qual_cost.per_tuple;
1623 }
1624
1625 return pathnode;
1626}
double cpu_tuple_cost
Definition: costsize.c:132
void cost_qual_eval(QualCost *cost, List *quals, PlannerInfo *root)
Definition: costsize.c:4742

References RelOptInfo::consider_parallel, PathTarget::cost, cost_qual_eval(), cpu_tuple_cost, makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, GroupResultPath::path, Path::pathkeys, Path::pathtype, QualCost::per_tuple, GroupResultPath::quals, root, Path::rows, QualCost::startup, Path::startup_cost, and Path::total_cost.

Referenced by create_degenerate_grouping_paths(), and query_planner().

◆ create_groupingsets_path()

GroupingSetsPath * create_groupingsets_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
List having_qual,
AggStrategy  aggstrategy,
List rollups,
const AggClauseCosts agg_costs 
)

Definition at line 3326 of file pathnode.c.

3333{
3335 PathTarget *target = rel->reltarget;
3336 ListCell *lc;
3337 bool is_first = true;
3338 bool is_first_sort = true;
3339
3340 /* The topmost generated Plan node will be an Agg */
3341 pathnode->path.pathtype = T_Agg;
3342 pathnode->path.parent = rel;
3343 pathnode->path.pathtarget = target;
3344 pathnode->path.param_info = subpath->param_info;
3345 pathnode->path.parallel_aware = false;
3346 pathnode->path.parallel_safe = rel->consider_parallel &&
3347 subpath->parallel_safe;
3348 pathnode->path.parallel_workers = subpath->parallel_workers;
3349 pathnode->subpath = subpath;
3350
3351 /*
3352 * Simplify callers by downgrading AGG_SORTED to AGG_PLAIN, and AGG_MIXED
3353 * to AGG_HASHED, here if possible.
3354 */
3355 if (aggstrategy == AGG_SORTED &&
3356 list_length(rollups) == 1 &&
3357 ((RollupData *) linitial(rollups))->groupClause == NIL)
3358 aggstrategy = AGG_PLAIN;
3359
3360 if (aggstrategy == AGG_MIXED &&
3361 list_length(rollups) == 1)
3362 aggstrategy = AGG_HASHED;
3363
3364 /*
3365 * Output will be in sorted order by group_pathkeys if, and only if, there
3366 * is a single rollup operation on a non-empty list of grouping
3367 * expressions.
3368 */
3369 if (aggstrategy == AGG_SORTED && list_length(rollups) == 1)
3370 pathnode->path.pathkeys = root->group_pathkeys;
3371 else
3372 pathnode->path.pathkeys = NIL;
3373
3374 pathnode->aggstrategy = aggstrategy;
3375 pathnode->rollups = rollups;
3376 pathnode->qual = having_qual;
3377 pathnode->transitionSpace = agg_costs ? agg_costs->transitionSpace : 0;
3378
3379 Assert(rollups != NIL);
3380 Assert(aggstrategy != AGG_PLAIN || list_length(rollups) == 1);
3381 Assert(aggstrategy != AGG_MIXED || list_length(rollups) > 1);
3382
3383 foreach(lc, rollups)
3384 {
3385 RollupData *rollup = lfirst(lc);
3386 List *gsets = rollup->gsets;
3387 int numGroupCols = list_length(linitial(gsets));
3388
3389 /*
3390 * In AGG_SORTED or AGG_PLAIN mode, the first rollup takes the
3391 * (already-sorted) input, and following ones do their own sort.
3392 *
3393 * In AGG_HASHED mode, there is one rollup for each grouping set.
3394 *
3395 * In AGG_MIXED mode, the first rollups are hashed, the first
3396 * non-hashed one takes the (already-sorted) input, and following ones
3397 * do their own sort.
3398 */
3399 if (is_first)
3400 {
3401 cost_agg(&pathnode->path, root,
3402 aggstrategy,
3403 agg_costs,
3404 numGroupCols,
3405 rollup->numGroups,
3406 having_qual,
3407 subpath->disabled_nodes,
3408 subpath->startup_cost,
3409 subpath->total_cost,
3410 subpath->rows,
3411 subpath->pathtarget->width);
3412 is_first = false;
3413 if (!rollup->is_hashed)
3414 is_first_sort = false;
3415 }
3416 else
3417 {
3418 Path sort_path; /* dummy for result of cost_sort */
3419 Path agg_path; /* dummy for result of cost_agg */
3420
3421 if (rollup->is_hashed || is_first_sort)
3422 {
3423 /*
3424 * Account for cost of aggregation, but don't charge input
3425 * cost again
3426 */
3427 cost_agg(&agg_path, root,
3428 rollup->is_hashed ? AGG_HASHED : AGG_SORTED,
3429 agg_costs,
3430 numGroupCols,
3431 rollup->numGroups,
3432 having_qual,
3433 0, 0.0, 0.0,
3434 subpath->rows,
3435 subpath->pathtarget->width);
3436 if (!rollup->is_hashed)
3437 is_first_sort = false;
3438 }
3439 else
3440 {
3441 /* Account for cost of sort, but don't charge input cost again */
3442 cost_sort(&sort_path, root, NIL, 0,
3443 0.0,
3444 subpath->rows,
3445 subpath->pathtarget->width,
3446 0.0,
3447 work_mem,
3448 -1.0);
3449
3450 /* Account for cost of aggregation */
3451
3452 cost_agg(&agg_path, root,
3453 AGG_SORTED,
3454 agg_costs,
3455 numGroupCols,
3456 rollup->numGroups,
3457 having_qual,
3458 sort_path.disabled_nodes,
3459 sort_path.startup_cost,
3460 sort_path.total_cost,
3461 sort_path.rows,
3462 subpath->pathtarget->width);
3463 }
3464
3465 pathnode->path.disabled_nodes += agg_path.disabled_nodes;
3466 pathnode->path.total_cost += agg_path.total_cost;
3467 pathnode->path.rows += agg_path.rows;
3468 }
3469 }
3470
3471 /* add tlist eval cost for each output row */
3472 pathnode->path.startup_cost += target->cost.startup;
3473 pathnode->path.total_cost += target->cost.startup +
3474 target->cost.per_tuple * pathnode->path.rows;
3475
3476 return pathnode;
3477}
void cost_sort(Path *path, PlannerInfo *root, List *pathkeys, int input_disabled_nodes, Cost input_cost, double tuples, int width, Cost comparison_cost, int sort_mem, double limit_tuples)
Definition: costsize.c:2144
int work_mem
Definition: globals.c:132
@ AGG_HASHED
Definition: nodes.h:362
@ AGG_MIXED
Definition: nodes.h:363
@ AGG_PLAIN
Definition: nodes.h:360
uint64 transitionSpace
Definition: pathnodes.h:2442
AggStrategy aggstrategy
Definition: pathnodes.h:2439
Cardinality numGroups
Definition: pathnodes.h:2426
List * gsets
Definition: pathnodes.h:2424
bool is_hashed
Definition: pathnodes.h:2428

References AGG_HASHED, AGG_MIXED, AGG_PLAIN, AGG_SORTED, GroupingSetsPath::aggstrategy, Assert(), RelOptInfo::consider_parallel, PathTarget::cost, cost_agg(), cost_sort(), Path::disabled_nodes, RollupData::gsets, RollupData::is_hashed, lfirst, linitial, list_length(), makeNode, NIL, RollupData::numGroups, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, GroupingSetsPath::path, Path::pathkeys, Path::pathtype, QualCost::per_tuple, GroupingSetsPath::qual, RelOptInfo::reltarget, GroupingSetsPath::rollups, root, Path::rows, QualCost::startup, Path::startup_cost, subpath(), GroupingSetsPath::subpath, Path::total_cost, AggClauseCosts::transitionSpace, GroupingSetsPath::transitionSpace, and work_mem.

Referenced by consider_groupingsets_paths().

◆ create_hashjoin_path()

HashPath * create_hashjoin_path ( PlannerInfo root,
RelOptInfo joinrel,
JoinType  jointype,
JoinCostWorkspace workspace,
JoinPathExtraData extra,
Path outer_path,
Path inner_path,
bool  parallel_hash,
List restrict_clauses,
Relids  required_outer,
List hashclauses 
)

Definition at line 2700 of file pathnode.c.

2711{
2712 HashPath *pathnode = makeNode(HashPath);
2713
2714 pathnode->jpath.path.pathtype = T_HashJoin;
2715 pathnode->jpath.path.parent = joinrel;
2716 pathnode->jpath.path.pathtarget = joinrel->reltarget;
2717 pathnode->jpath.path.param_info =
2719 joinrel,
2720 outer_path,
2721 inner_path,
2722 extra->sjinfo,
2723 required_outer,
2724 &restrict_clauses);
2725 pathnode->jpath.path.parallel_aware =
2726 joinrel->consider_parallel && parallel_hash;
2727 pathnode->jpath.path.parallel_safe = joinrel->consider_parallel &&
2728 outer_path->parallel_safe && inner_path->parallel_safe;
2729 /* This is a foolish way to estimate parallel_workers, but for now... */
2730 pathnode->jpath.path.parallel_workers = outer_path->parallel_workers;
2731
2732 /*
2733 * A hashjoin never has pathkeys, since its output ordering is
2734 * unpredictable due to possible batching. XXX If the inner relation is
2735 * small enough, we could instruct the executor that it must not batch,
2736 * and then we could assume that the output inherits the outer relation's
2737 * ordering, which might save a sort step. However there is considerable
2738 * downside if our estimate of the inner relation size is badly off. For
2739 * the moment we don't risk it. (Note also that if we wanted to take this
2740 * seriously, joinpath.c would have to consider many more paths for the
2741 * outer rel than it does now.)
2742 */
2743 pathnode->jpath.path.pathkeys = NIL;
2744 pathnode->jpath.jointype = jointype;
2745 pathnode->jpath.inner_unique = extra->inner_unique;
2746 pathnode->jpath.outerjoinpath = outer_path;
2747 pathnode->jpath.innerjoinpath = inner_path;
2748 pathnode->jpath.joinrestrictinfo = restrict_clauses;
2749 pathnode->path_hashclauses = hashclauses;
2750 /* final_cost_hashjoin will fill in pathnode->num_batches */
2751
2752 final_cost_hashjoin(root, pathnode, workspace, extra);
2753
2754 return pathnode;
2755}
void final_cost_hashjoin(PlannerInfo *root, HashPath *path, JoinCostWorkspace *workspace, JoinPathExtraData *extra)
Definition: costsize.c:4275
ParamPathInfo * get_joinrel_parampathinfo(PlannerInfo *root, RelOptInfo *joinrel, Path *outer_path, Path *inner_path, SpecialJoinInfo *sjinfo, Relids required_outer, List **restrict_clauses)
Definition: relnode.c:1659
List * path_hashclauses
Definition: pathnodes.h:2294
JoinPath jpath
Definition: pathnodes.h:2293
SpecialJoinInfo * sjinfo
Definition: pathnodes.h:3374
Path * outerjoinpath
Definition: pathnodes.h:2208
Path * innerjoinpath
Definition: pathnodes.h:2209
JoinType jointype
Definition: pathnodes.h:2203
bool inner_unique
Definition: pathnodes.h:2205
List * joinrestrictinfo
Definition: pathnodes.h:2211

References RelOptInfo::consider_parallel, final_cost_hashjoin(), get_joinrel_parampathinfo(), JoinPath::inner_unique, JoinPathExtraData::inner_unique, JoinPath::innerjoinpath, JoinPath::joinrestrictinfo, JoinPath::jointype, HashPath::jpath, makeNode, NIL, JoinPath::outerjoinpath, Path::parallel_safe, Path::parallel_workers, HashPath::path_hashclauses, RelOptInfo::reltarget, root, and JoinPathExtraData::sjinfo.

Referenced by try_hashjoin_path(), and try_partial_hashjoin_path().

◆ create_incremental_sort_path()

IncrementalSortPath * create_incremental_sort_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
List pathkeys,
int  presorted_keys,
double  limit_tuples 
)

Definition at line 3035 of file pathnode.c.

3041{
3043 SortPath *pathnode = &sort->spath;
3044
3045 pathnode->path.pathtype = T_IncrementalSort;
3046 pathnode->path.parent = rel;
3047 /* Sort doesn't project, so use source path's pathtarget */
3048 pathnode->path.pathtarget = subpath->pathtarget;
3049 /* For now, assume we are above any joins, so no parameterization */
3050 pathnode->path.param_info = NULL;
3051 pathnode->path.parallel_aware = false;
3052 pathnode->path.parallel_safe = rel->consider_parallel &&
3053 subpath->parallel_safe;
3054 pathnode->path.parallel_workers = subpath->parallel_workers;
3055 pathnode->path.pathkeys = pathkeys;
3056
3057 pathnode->subpath = subpath;
3058
3059 cost_incremental_sort(&pathnode->path,
3060 root, pathkeys, presorted_keys,
3061 subpath->disabled_nodes,
3062 subpath->startup_cost,
3063 subpath->total_cost,
3064 subpath->rows,
3065 subpath->pathtarget->width,
3066 0.0, /* XXX comparison_cost shouldn't be 0? */
3067 work_mem, limit_tuples);
3068
3069 sort->nPresortedCols = presorted_keys;
3070
3071 return sort;
3072}
Datum sort(PG_FUNCTION_ARGS)
Definition: _int_op.c:198
void cost_incremental_sort(Path *path, PlannerInfo *root, List *pathkeys, int presorted_keys, int input_disabled_nodes, Cost input_startup_cost, Cost input_total_cost, double input_tuples, int width, Cost comparison_cost, int sort_mem, double limit_tuples)
Definition: costsize.c:2000
Path path
Definition: pathnodes.h:2341
Path * subpath
Definition: pathnodes.h:2342

References RelOptInfo::consider_parallel, cost_incremental_sort(), makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, SortPath::path, Path::pathkeys, Path::pathtype, root, sort(), subpath(), SortPath::subpath, and work_mem.

Referenced by build_setop_child_paths(), create_one_window_path(), create_ordered_paths(), gather_grouping_paths(), generate_useful_gather_paths(), and make_ordered_path().

◆ create_index_path()

IndexPath * create_index_path ( PlannerInfo root,
IndexOptInfo index,
List indexclauses,
List indexorderbys,
List indexorderbycols,
List pathkeys,
ScanDirection  indexscandir,
bool  indexonly,
Relids  required_outer,
double  loop_count,
bool  partial_path 
)

Definition at line 1049 of file pathnode.c.

1060{
1061 IndexPath *pathnode = makeNode(IndexPath);
1062 RelOptInfo *rel = index->rel;
1063
1064 pathnode->path.pathtype = indexonly ? T_IndexOnlyScan : T_IndexScan;
1065 pathnode->path.parent = rel;
1066 pathnode->path.pathtarget = rel->reltarget;
1067 pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1068 required_outer);
1069 pathnode->path.parallel_aware = false;
1070 pathnode->path.parallel_safe = rel->consider_parallel;
1071 pathnode->path.parallel_workers = 0;
1072 pathnode->path.pathkeys = pathkeys;
1073
1074 pathnode->indexinfo = index;
1075 pathnode->indexclauses = indexclauses;
1076 pathnode->indexorderbys = indexorderbys;
1077 pathnode->indexorderbycols = indexorderbycols;
1078 pathnode->indexscandir = indexscandir;
1079
1080 cost_index(pathnode, root, loop_count, partial_path);
1081
1082 return pathnode;
1083}
void cost_index(IndexPath *path, PlannerInfo *root, double loop_count, bool partial_path)
Definition: costsize.c:560
List * indexclauses
Definition: pathnodes.h:1845
ScanDirection indexscandir
Definition: pathnodes.h:1848
Path path
Definition: pathnodes.h:1843
List * indexorderbycols
Definition: pathnodes.h:1847
List * indexorderbys
Definition: pathnodes.h:1846
IndexOptInfo * indexinfo
Definition: pathnodes.h:1844
Definition: type.h:96

References RelOptInfo::consider_parallel, cost_index(), get_baserel_parampathinfo(), IndexPath::indexclauses, IndexPath::indexinfo, IndexPath::indexorderbycols, IndexPath::indexorderbys, IndexPath::indexscandir, makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, IndexPath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by build_index_paths(), and plan_cluster_use_sort().

◆ create_limit_path()

LimitPath * create_limit_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
Node limitOffset,
Node limitCount,
LimitOption  limitOption,
int64  offset_est,
int64  count_est 
)

Definition at line 3982 of file pathnode.c.

3987{
3988 LimitPath *pathnode = makeNode(LimitPath);
3989
3990 pathnode->path.pathtype = T_Limit;
3991 pathnode->path.parent = rel;
3992 /* Limit doesn't project, so use source path's pathtarget */
3993 pathnode->path.pathtarget = subpath->pathtarget;
3994 /* For now, assume we are above any joins, so no parameterization */
3995 pathnode->path.param_info = NULL;
3996 pathnode->path.parallel_aware = false;
3997 pathnode->path.parallel_safe = rel->consider_parallel &&
3998 subpath->parallel_safe;
3999 pathnode->path.parallel_workers = subpath->parallel_workers;
4000 pathnode->path.rows = subpath->rows;
4001 pathnode->path.disabled_nodes = subpath->disabled_nodes;
4002 pathnode->path.startup_cost = subpath->startup_cost;
4003 pathnode->path.total_cost = subpath->total_cost;
4004 pathnode->path.pathkeys = subpath->pathkeys;
4005 pathnode->subpath = subpath;
4006 pathnode->limitOffset = limitOffset;
4007 pathnode->limitCount = limitCount;
4008 pathnode->limitOption = limitOption;
4009
4010 /*
4011 * Adjust the output rows count and costs according to the offset/limit.
4012 */
4014 &pathnode->path.startup_cost,
4015 &pathnode->path.total_cost,
4016 offset_est, count_est);
4017
4018 return pathnode;
4019}
void adjust_limit_rows_costs(double *rows, Cost *startup_cost, Cost *total_cost, int64 offset_est, int64 count_est)
Definition: pathnode.c:4038
Path path
Definition: pathnodes.h:2541
Path * subpath
Definition: pathnodes.h:2542
LimitOption limitOption
Definition: pathnodes.h:2545
Node * limitOffset
Definition: pathnodes.h:2543
Node * limitCount
Definition: pathnodes.h:2544

References adjust_limit_rows_costs(), RelOptInfo::consider_parallel, Path::disabled_nodes, LimitPath::limitCount, LimitPath::limitOffset, LimitPath::limitOption, makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, LimitPath::path, Path::pathkeys, Path::pathtype, Path::rows, Path::startup_cost, subpath(), LimitPath::subpath, and Path::total_cost.

Referenced by create_final_distinct_paths(), create_partial_distinct_paths(), and grouping_planner().

◆ create_lockrows_path()

LockRowsPath * create_lockrows_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
List rowMarks,
int  epqParam 
)

Definition at line 3816 of file pathnode.c.

3818{
3819 LockRowsPath *pathnode = makeNode(LockRowsPath);
3820
3821 pathnode->path.pathtype = T_LockRows;
3822 pathnode->path.parent = rel;
3823 /* LockRows doesn't project, so use source path's pathtarget */
3824 pathnode->path.pathtarget = subpath->pathtarget;
3825 /* For now, assume we are above any joins, so no parameterization */
3826 pathnode->path.param_info = NULL;
3827 pathnode->path.parallel_aware = false;
3828 pathnode->path.parallel_safe = false;
3829 pathnode->path.parallel_workers = 0;
3830 pathnode->path.rows = subpath->rows;
3831
3832 /*
3833 * The result cannot be assumed sorted, since locking might cause the sort
3834 * key columns to be replaced with new values.
3835 */
3836 pathnode->path.pathkeys = NIL;
3837
3838 pathnode->subpath = subpath;
3839 pathnode->rowMarks = rowMarks;
3840 pathnode->epqParam = epqParam;
3841
3842 /*
3843 * We should charge something extra for the costs of row locking and
3844 * possible refetches, but it's hard to say how much. For now, use
3845 * cpu_tuple_cost per row.
3846 */
3847 pathnode->path.disabled_nodes = subpath->disabled_nodes;
3848 pathnode->path.startup_cost = subpath->startup_cost;
3849 pathnode->path.total_cost = subpath->total_cost +
3850 cpu_tuple_cost * subpath->rows;
3851
3852 return pathnode;
3853}
Path * subpath
Definition: pathnodes.h:2502
List * rowMarks
Definition: pathnodes.h:2503

References cpu_tuple_cost, Path::disabled_nodes, LockRowsPath::epqParam, makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, LockRowsPath::path, Path::pathkeys, Path::pathtype, LockRowsPath::rowMarks, Path::rows, Path::startup_cost, subpath(), LockRowsPath::subpath, and Path::total_cost.

Referenced by grouping_planner().

◆ create_material_path()

MaterialPath * create_material_path ( RelOptInfo rel,
Path subpath 
)

Definition at line 1634 of file pathnode.c.

1635{
1636 MaterialPath *pathnode = makeNode(MaterialPath);
1637
1638 Assert(subpath->parent == rel);
1639
1640 pathnode->path.pathtype = T_Material;
1641 pathnode->path.parent = rel;
1642 pathnode->path.pathtarget = rel->reltarget;
1643 pathnode->path.param_info = subpath->param_info;
1644 pathnode->path.parallel_aware = false;
1645 pathnode->path.parallel_safe = rel->consider_parallel &&
1646 subpath->parallel_safe;
1647 pathnode->path.parallel_workers = subpath->parallel_workers;
1648 pathnode->path.pathkeys = subpath->pathkeys;
1649
1650 pathnode->subpath = subpath;
1651
1652 cost_material(&pathnode->path,
1653 subpath->disabled_nodes,
1654 subpath->startup_cost,
1655 subpath->total_cost,
1656 subpath->rows,
1657 subpath->pathtarget->width);
1658
1659 return pathnode;
1660}
void cost_material(Path *path, int input_disabled_nodes, Cost input_startup_cost, Cost input_total_cost, double tuples, int width)
Definition: costsize.c:2483
Path * subpath
Definition: pathnodes.h:2116

References Assert(), RelOptInfo::consider_parallel, cost_material(), makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, MaterialPath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, subpath(), and MaterialPath::subpath.

Referenced by consider_parallel_nestloop(), match_unsorted_outer(), reparameterize_path(), and set_tablesample_rel_pathlist().

◆ create_memoize_path()

MemoizePath * create_memoize_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
List param_exprs,
List hash_operators,
bool  singlerow,
bool  binary_mode,
double  calls 
)

Definition at line 1667 of file pathnode.c.

1670{
1671 MemoizePath *pathnode = makeNode(MemoizePath);
1672
1673 Assert(subpath->parent == rel);
1674
1675 pathnode->path.pathtype = T_Memoize;
1676 pathnode->path.parent = rel;
1677 pathnode->path.pathtarget = rel->reltarget;
1678 pathnode->path.param_info = subpath->param_info;
1679 pathnode->path.parallel_aware = false;
1680 pathnode->path.parallel_safe = rel->consider_parallel &&
1681 subpath->parallel_safe;
1682 pathnode->path.parallel_workers = subpath->parallel_workers;
1683 pathnode->path.pathkeys = subpath->pathkeys;
1684
1685 pathnode->subpath = subpath;
1686 pathnode->hash_operators = hash_operators;
1687 pathnode->param_exprs = param_exprs;
1688 pathnode->singlerow = singlerow;
1689 pathnode->binary_mode = binary_mode;
1690 pathnode->calls = clamp_row_est(calls);
1691
1692 /*
1693 * For now we set est_entries to 0. cost_memoize_rescan() does all the
1694 * hard work to determine how many cache entries there are likely to be,
1695 * so it seems best to leave it up to that function to fill this field in.
1696 * If left at 0, the executor will make a guess at a good value.
1697 */
1698 pathnode->est_entries = 0;
1699
1700 /* we should not generate this path type when enable_memoize=false */
1702 pathnode->path.disabled_nodes = subpath->disabled_nodes;
1703
1704 /*
1705 * Add a small additional charge for caching the first entry. All the
1706 * harder calculations for rescans are performed in cost_memoize_rescan().
1707 */
1708 pathnode->path.startup_cost = subpath->startup_cost + cpu_tuple_cost;
1709 pathnode->path.total_cost = subpath->total_cost + cpu_tuple_cost;
1710 pathnode->path.rows = subpath->rows;
1711
1712 return pathnode;
1713}
bool enable_memoize
Definition: costsize.c:155
bool singlerow
Definition: pathnodes.h:2130
List * hash_operators
Definition: pathnodes.h:2128
uint32 est_entries
Definition: pathnodes.h:2135
bool binary_mode
Definition: pathnodes.h:2132
Cardinality calls
Definition: pathnodes.h:2134
Path * subpath
Definition: pathnodes.h:2127
List * param_exprs
Definition: pathnodes.h:2129

References Assert(), MemoizePath::binary_mode, MemoizePath::calls, clamp_row_est(), RelOptInfo::consider_parallel, cpu_tuple_cost, Path::disabled_nodes, enable_memoize, MemoizePath::est_entries, MemoizePath::hash_operators, makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, MemoizePath::param_exprs, MemoizePath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, Path::rows, MemoizePath::singlerow, Path::startup_cost, subpath(), MemoizePath::subpath, and Path::total_cost.

Referenced by get_memoize_path(), and reparameterize_path().

◆ create_merge_append_path()

MergeAppendPath * create_merge_append_path ( PlannerInfo root,
RelOptInfo rel,
List subpaths,
List pathkeys,
Relids  required_outer 
)

Definition at line 1471 of file pathnode.c.

1476{
1478 int input_disabled_nodes;
1479 Cost input_startup_cost;
1480 Cost input_total_cost;
1481 ListCell *l;
1482
1483 /*
1484 * We don't currently support parameterized MergeAppend paths, as
1485 * explained in the comments for generate_orderedappend_paths.
1486 */
1487 Assert(bms_is_empty(rel->lateral_relids) && bms_is_empty(required_outer));
1488
1489 pathnode->path.pathtype = T_MergeAppend;
1490 pathnode->path.parent = rel;
1491 pathnode->path.pathtarget = rel->reltarget;
1492 pathnode->path.param_info = NULL;
1493 pathnode->path.parallel_aware = false;
1494 pathnode->path.parallel_safe = rel->consider_parallel;
1495 pathnode->path.parallel_workers = 0;
1496 pathnode->path.pathkeys = pathkeys;
1497 pathnode->subpaths = subpaths;
1498
1499 /*
1500 * Apply query-wide LIMIT if known and path is for sole base relation.
1501 * (Handling this at this low level is a bit klugy.)
1502 */
1503 if (bms_equal(rel->relids, root->all_query_rels))
1504 pathnode->limit_tuples = root->limit_tuples;
1505 else
1506 pathnode->limit_tuples = -1.0;
1507
1508 /*
1509 * Add up the sizes and costs of the input paths.
1510 */
1511 pathnode->path.rows = 0;
1512 input_disabled_nodes = 0;
1513 input_startup_cost = 0;
1514 input_total_cost = 0;
1515 foreach(l, subpaths)
1516 {
1517 Path *subpath = (Path *) lfirst(l);
1518
1519 /* All child paths should be unparameterized */
1521
1522 pathnode->path.rows += subpath->rows;
1523 pathnode->path.parallel_safe = pathnode->path.parallel_safe &&
1524 subpath->parallel_safe;
1525
1526 if (pathkeys_contained_in(pathkeys, subpath->pathkeys))
1527 {
1528 /* Subpath is adequately ordered, we won't need to sort it */
1529 input_disabled_nodes += subpath->disabled_nodes;
1530 input_startup_cost += subpath->startup_cost;
1531 input_total_cost += subpath->total_cost;
1532 }
1533 else
1534 {
1535 /* We'll need to insert a Sort node, so include cost for that */
1536 Path sort_path; /* dummy for result of cost_sort */
1537
1538 cost_sort(&sort_path,
1539 root,
1540 pathkeys,
1541 subpath->disabled_nodes,
1542 subpath->total_cost,
1543 subpath->rows,
1544 subpath->pathtarget->width,
1545 0.0,
1546 work_mem,
1547 pathnode->limit_tuples);
1548 input_disabled_nodes += sort_path.disabled_nodes;
1549 input_startup_cost += sort_path.startup_cost;
1550 input_total_cost += sort_path.total_cost;
1551 }
1552 }
1553
1554 /*
1555 * Now we can compute total costs of the MergeAppend. If there's exactly
1556 * one child path and its parallel awareness matches that of the
1557 * MergeAppend, then the MergeAppend is a no-op and will be discarded
1558 * later (in setrefs.c); otherwise we do the normal cost calculation.
1559 */
1560 if (list_length(subpaths) == 1 &&
1561 ((Path *) linitial(subpaths))->parallel_aware ==
1562 pathnode->path.parallel_aware)
1563 {
1564 pathnode->path.disabled_nodes = input_disabled_nodes;
1565 pathnode->path.startup_cost = input_startup_cost;
1566 pathnode->path.total_cost = input_total_cost;
1567 }
1568 else
1569 cost_merge_append(&pathnode->path, root,
1570 pathkeys, list_length(subpaths),
1571 input_disabled_nodes,
1572 input_startup_cost, input_total_cost,
1573 pathnode->path.rows);
1574
1575 return pathnode;
1576}
void cost_merge_append(Path *path, PlannerInfo *root, List *pathkeys, int n_streams, int input_disabled_nodes, Cost input_startup_cost, Cost input_total_cost, double tuples)
Definition: costsize.c:2432
Cardinality limit_tuples
Definition: pathnodes.h:2091

References Assert(), bms_equal(), bms_is_empty, RelOptInfo::consider_parallel, cost_merge_append(), cost_sort(), Path::disabled_nodes, RelOptInfo::lateral_relids, lfirst, MergeAppendPath::limit_tuples, linitial, list_length(), makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, MergeAppendPath::path, PATH_REQ_OUTER, Path::pathkeys, pathkeys_contained_in(), Path::pathtype, RelOptInfo::relids, RelOptInfo::reltarget, root, Path::rows, Path::startup_cost, subpath(), MergeAppendPath::subpaths, Path::total_cost, and work_mem.

Referenced by generate_orderedappend_paths(), and generate_union_paths().

◆ create_mergejoin_path()

MergePath * create_mergejoin_path ( PlannerInfo root,
RelOptInfo joinrel,
JoinType  jointype,
JoinCostWorkspace workspace,
JoinPathExtraData extra,
Path outer_path,
Path inner_path,
List restrict_clauses,
List pathkeys,
Relids  required_outer,
List mergeclauses,
List outersortkeys,
List innersortkeys,
int  outer_presorted_keys 
)

Definition at line 2632 of file pathnode.c.

2646{
2647 MergePath *pathnode = makeNode(MergePath);
2648
2649 pathnode->jpath.path.pathtype = T_MergeJoin;
2650 pathnode->jpath.path.parent = joinrel;
2651 pathnode->jpath.path.pathtarget = joinrel->reltarget;
2652 pathnode->jpath.path.param_info =
2654 joinrel,
2655 outer_path,
2656 inner_path,
2657 extra->sjinfo,
2658 required_outer,
2659 &restrict_clauses);
2660 pathnode->jpath.path.parallel_aware = false;
2661 pathnode->jpath.path.parallel_safe = joinrel->consider_parallel &&
2662 outer_path->parallel_safe && inner_path->parallel_safe;
2663 /* This is a foolish way to estimate parallel_workers, but for now... */
2664 pathnode->jpath.path.parallel_workers = outer_path->parallel_workers;
2665 pathnode->jpath.path.pathkeys = pathkeys;
2666 pathnode->jpath.jointype = jointype;
2667 pathnode->jpath.inner_unique = extra->inner_unique;
2668 pathnode->jpath.outerjoinpath = outer_path;
2669 pathnode->jpath.innerjoinpath = inner_path;
2670 pathnode->jpath.joinrestrictinfo = restrict_clauses;
2671 pathnode->path_mergeclauses = mergeclauses;
2672 pathnode->outersortkeys = outersortkeys;
2673 pathnode->innersortkeys = innersortkeys;
2674 pathnode->outer_presorted_keys = outer_presorted_keys;
2675 /* pathnode->skip_mark_restore will be set by final_cost_mergejoin */
2676 /* pathnode->materialize_inner will be set by final_cost_mergejoin */
2677
2678 final_cost_mergejoin(root, pathnode, workspace, extra);
2679
2680 return pathnode;
2681}
void final_cost_mergejoin(PlannerInfo *root, MergePath *path, JoinCostWorkspace *workspace, JoinPathExtraData *extra)
Definition: costsize.c:3837
List * outersortkeys
Definition: pathnodes.h:2274
List * innersortkeys
Definition: pathnodes.h:2275
JoinPath jpath
Definition: pathnodes.h:2272
int outer_presorted_keys
Definition: pathnodes.h:2276
List * path_mergeclauses
Definition: pathnodes.h:2273

References RelOptInfo::consider_parallel, final_cost_mergejoin(), get_joinrel_parampathinfo(), JoinPath::inner_unique, JoinPathExtraData::inner_unique, JoinPath::innerjoinpath, MergePath::innersortkeys, JoinPath::joinrestrictinfo, JoinPath::jointype, MergePath::jpath, makeNode, MergePath::outer_presorted_keys, JoinPath::outerjoinpath, MergePath::outersortkeys, Path::parallel_safe, Path::parallel_workers, MergePath::path_mergeclauses, RelOptInfo::reltarget, root, and JoinPathExtraData::sjinfo.

Referenced by try_mergejoin_path(), and try_partial_mergejoin_path().

◆ create_minmaxagg_path()

MinMaxAggPath * create_minmaxagg_path ( PlannerInfo root,
RelOptInfo rel,
PathTarget target,
List mmaggregates,
List quals 
)

Definition at line 3489 of file pathnode.c.

3494{
3496 Cost initplan_cost;
3497 int initplan_disabled_nodes = 0;
3498 ListCell *lc;
3499
3500 /* The topmost generated Plan node will be a Result */
3501 pathnode->path.pathtype = T_Result;
3502 pathnode->path.parent = rel;
3503 pathnode->path.pathtarget = target;
3504 /* For now, assume we are above any joins, so no parameterization */
3505 pathnode->path.param_info = NULL;
3506 pathnode->path.parallel_aware = false;
3507 pathnode->path.parallel_safe = true; /* might change below */
3508 pathnode->path.parallel_workers = 0;
3509 /* Result is one unordered row */
3510 pathnode->path.rows = 1;
3511 pathnode->path.pathkeys = NIL;
3512
3513 pathnode->mmaggregates = mmaggregates;
3514 pathnode->quals = quals;
3515
3516 /* Calculate cost of all the initplans, and check parallel safety */
3517 initplan_cost = 0;
3518 foreach(lc, mmaggregates)
3519 {
3520 MinMaxAggInfo *mminfo = (MinMaxAggInfo *) lfirst(lc);
3521
3522 initplan_disabled_nodes += mminfo->path->disabled_nodes;
3523 initplan_cost += mminfo->pathcost;
3524 if (!mminfo->path->parallel_safe)
3525 pathnode->path.parallel_safe = false;
3526 }
3527
3528 /* add tlist eval cost for each output row, plus cpu_tuple_cost */
3529 pathnode->path.disabled_nodes = initplan_disabled_nodes;
3530 pathnode->path.startup_cost = initplan_cost + target->cost.startup;
3531 pathnode->path.total_cost = initplan_cost + target->cost.startup +
3532 target->cost.per_tuple + cpu_tuple_cost;
3533
3534 /*
3535 * Add cost of qual, if any --- but we ignore its selectivity, since our
3536 * rowcount estimate should be 1 no matter what the qual is.
3537 */
3538 if (quals)
3539 {
3540 QualCost qual_cost;
3541
3542 cost_qual_eval(&qual_cost, quals, root);
3543 pathnode->path.startup_cost += qual_cost.startup;
3544 pathnode->path.total_cost += qual_cost.startup + qual_cost.per_tuple;
3545 }
3546
3547 /*
3548 * If the initplans were all parallel-safe, also check safety of the
3549 * target and quals. (The Result node itself isn't parallelizable, but if
3550 * we are in a subquery then it can be useful for the outer query to know
3551 * that this one is parallel-safe.)
3552 */
3553 if (pathnode->path.parallel_safe)
3554 pathnode->path.parallel_safe =
3555 is_parallel_safe(root, (Node *) target->exprs) &&
3556 is_parallel_safe(root, (Node *) quals);
3557
3558 return pathnode;
3559}
List * quals
Definition: pathnodes.h:2452
List * mmaggregates
Definition: pathnodes.h:2451

References PathTarget::cost, cost_qual_eval(), cpu_tuple_cost, Path::disabled_nodes, PathTarget::exprs, is_parallel_safe(), lfirst, makeNode, MinMaxAggPath::mmaggregates, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, MinMaxAggPath::path, MinMaxAggInfo::path, MinMaxAggInfo::pathcost, Path::pathkeys, Path::pathtype, QualCost::per_tuple, MinMaxAggPath::quals, root, Path::rows, QualCost::startup, Path::startup_cost, and Path::total_cost.

Referenced by preprocess_minmax_aggregates().

◆ create_modifytable_path()

ModifyTablePath * create_modifytable_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
CmdType  operation,
bool  canSetTag,
Index  nominalRelation,
Index  rootRelation,
bool  partColsUpdated,
List resultRelations,
List updateColnosLists,
List withCheckOptionLists,
List returningLists,
List rowMarks,
OnConflictExpr onconflict,
List mergeActionLists,
List mergeJoinConditions,
int  epqParam 
)

Definition at line 3880 of file pathnode.c.

3891{
3893
3894 Assert(operation == CMD_MERGE ||
3895 (operation == CMD_UPDATE ?
3896 list_length(resultRelations) == list_length(updateColnosLists) :
3897 updateColnosLists == NIL));
3898 Assert(withCheckOptionLists == NIL ||
3899 list_length(resultRelations) == list_length(withCheckOptionLists));
3900 Assert(returningLists == NIL ||
3901 list_length(resultRelations) == list_length(returningLists));
3902
3903 pathnode->path.pathtype = T_ModifyTable;
3904 pathnode->path.parent = rel;
3905 /* pathtarget is not interesting, just make it minimally valid */
3906 pathnode->path.pathtarget = rel->reltarget;
3907 /* For now, assume we are above any joins, so no parameterization */
3908 pathnode->path.param_info = NULL;
3909 pathnode->path.parallel_aware = false;
3910 pathnode->path.parallel_safe = false;
3911 pathnode->path.parallel_workers = 0;
3912 pathnode->path.pathkeys = NIL;
3913
3914 /*
3915 * Compute cost & rowcount as subpath cost & rowcount (if RETURNING)
3916 *
3917 * Currently, we don't charge anything extra for the actual table
3918 * modification work, nor for the WITH CHECK OPTIONS or RETURNING
3919 * expressions if any. It would only be window dressing, since
3920 * ModifyTable is always a top-level node and there is no way for the
3921 * costs to change any higher-level planning choices. But we might want
3922 * to make it look better sometime.
3923 */
3924 pathnode->path.disabled_nodes = subpath->disabled_nodes;
3925 pathnode->path.startup_cost = subpath->startup_cost;
3926 pathnode->path.total_cost = subpath->total_cost;
3927 if (returningLists != NIL)
3928 {
3929 pathnode->path.rows = subpath->rows;
3930
3931 /*
3932 * Set width to match the subpath output. XXX this is totally wrong:
3933 * we should return an average of the RETURNING tlist widths. But
3934 * it's what happened historically, and improving it is a task for
3935 * another day. (Again, it's mostly window dressing.)
3936 */
3937 pathnode->path.pathtarget->width = subpath->pathtarget->width;
3938 }
3939 else
3940 {
3941 pathnode->path.rows = 0;
3942 pathnode->path.pathtarget->width = 0;
3943 }
3944
3945 pathnode->subpath = subpath;
3946 pathnode->operation = operation;
3947 pathnode->canSetTag = canSetTag;
3948 pathnode->nominalRelation = nominalRelation;
3949 pathnode->rootRelation = rootRelation;
3950 pathnode->partColsUpdated = partColsUpdated;
3951 pathnode->resultRelations = resultRelations;
3952 pathnode->updateColnosLists = updateColnosLists;
3953 pathnode->withCheckOptionLists = withCheckOptionLists;
3954 pathnode->returningLists = returningLists;
3955 pathnode->rowMarks = rowMarks;
3956 pathnode->onconflict = onconflict;
3957 pathnode->epqParam = epqParam;
3958 pathnode->mergeActionLists = mergeActionLists;
3959 pathnode->mergeJoinConditions = mergeJoinConditions;
3960
3961 return pathnode;
3962}
@ CMD_MERGE
Definition: nodes.h:275
@ CMD_UPDATE
Definition: nodes.h:272
bool partColsUpdated
Definition: pathnodes.h:2522
List * returningLists
Definition: pathnodes.h:2526
List * resultRelations
Definition: pathnodes.h:2523
List * withCheckOptionLists
Definition: pathnodes.h:2525
List * mergeJoinConditions
Definition: pathnodes.h:2532
List * updateColnosLists
Definition: pathnodes.h:2524
OnConflictExpr * onconflict
Definition: pathnodes.h:2528
CmdType operation
Definition: pathnodes.h:2518
Index rootRelation
Definition: pathnodes.h:2521
Index nominalRelation
Definition: pathnodes.h:2520
List * mergeActionLists
Definition: pathnodes.h:2530

References Assert(), ModifyTablePath::canSetTag, CMD_MERGE, CMD_UPDATE, Path::disabled_nodes, ModifyTablePath::epqParam, list_length(), makeNode, ModifyTablePath::mergeActionLists, ModifyTablePath::mergeJoinConditions, NIL, ModifyTablePath::nominalRelation, ModifyTablePath::onconflict, ModifyTablePath::operation, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, ModifyTablePath::partColsUpdated, ModifyTablePath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, ModifyTablePath::resultRelations, ModifyTablePath::returningLists, ModifyTablePath::rootRelation, ModifyTablePath::rowMarks, Path::rows, Path::startup_cost, subpath(), ModifyTablePath::subpath, Path::total_cost, ModifyTablePath::updateColnosLists, and ModifyTablePath::withCheckOptionLists.

Referenced by grouping_planner().

◆ create_namedtuplestorescan_path()

Path * create_namedtuplestorescan_path ( PlannerInfo root,
RelOptInfo rel,
Relids  required_outer 
)

Definition at line 2222 of file pathnode.c.

2224{
2225 Path *pathnode = makeNode(Path);
2226
2227 pathnode->pathtype = T_NamedTuplestoreScan;
2228 pathnode->parent = rel;
2229 pathnode->pathtarget = rel->reltarget;
2230 pathnode->param_info = get_baserel_parampathinfo(root, rel,
2231 required_outer);
2232 pathnode->parallel_aware = false;
2233 pathnode->parallel_safe = rel->consider_parallel;
2234 pathnode->parallel_workers = 0;
2235 pathnode->pathkeys = NIL; /* result is always unordered */
2236
2237 cost_namedtuplestorescan(pathnode, root, rel, pathnode->param_info);
2238
2239 return pathnode;
2240}
void cost_namedtuplestorescan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1750

References RelOptInfo::consider_parallel, cost_namedtuplestorescan(), get_baserel_parampathinfo(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by set_namedtuplestore_pathlist().

◆ create_nestloop_path()

NestPath * create_nestloop_path ( PlannerInfo root,
RelOptInfo joinrel,
JoinType  jointype,
JoinCostWorkspace workspace,
JoinPathExtraData extra,
Path outer_path,
Path inner_path,
List restrict_clauses,
List pathkeys,
Relids  required_outer 
)

Definition at line 2535 of file pathnode.c.

2545{
2546 NestPath *pathnode = makeNode(NestPath);
2547 Relids inner_req_outer = PATH_REQ_OUTER(inner_path);
2548 Relids outerrelids;
2549
2550 /*
2551 * Paths are parameterized by top-level parents, so run parameterization
2552 * tests on the parent relids.
2553 */
2554 if (outer_path->parent->top_parent_relids)
2555 outerrelids = outer_path->parent->top_parent_relids;
2556 else
2557 outerrelids = outer_path->parent->relids;
2558
2559 /*
2560 * If the inner path is parameterized by the outer, we must drop any
2561 * restrict_clauses that are due to be moved into the inner path. We have
2562 * to do this now, rather than postpone the work till createplan time,
2563 * because the restrict_clauses list can affect the size and cost
2564 * estimates for this path. We detect such clauses by checking for serial
2565 * number match to clauses already enforced in the inner path.
2566 */
2567 if (bms_overlap(inner_req_outer, outerrelids))
2568 {
2569 Bitmapset *enforced_serials = get_param_path_clause_serials(inner_path);
2570 List *jclauses = NIL;
2571 ListCell *lc;
2572
2573 foreach(lc, restrict_clauses)
2574 {
2575 RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
2576
2577 if (!bms_is_member(rinfo->rinfo_serial, enforced_serials))
2578 jclauses = lappend(jclauses, rinfo);
2579 }
2580 restrict_clauses = jclauses;
2581 }
2582
2583 pathnode->jpath.path.pathtype = T_NestLoop;
2584 pathnode->jpath.path.parent = joinrel;
2585 pathnode->jpath.path.pathtarget = joinrel->reltarget;
2586 pathnode->jpath.path.param_info =
2588 joinrel,
2589 outer_path,
2590 inner_path,
2591 extra->sjinfo,
2592 required_outer,
2593 &restrict_clauses);
2594 pathnode->jpath.path.parallel_aware = false;
2595 pathnode->jpath.path.parallel_safe = joinrel->consider_parallel &&
2596 outer_path->parallel_safe && inner_path->parallel_safe;
2597 /* This is a foolish way to estimate parallel_workers, but for now... */
2598 pathnode->jpath.path.parallel_workers = outer_path->parallel_workers;
2599 pathnode->jpath.path.pathkeys = pathkeys;
2600 pathnode->jpath.jointype = jointype;
2601 pathnode->jpath.inner_unique = extra->inner_unique;
2602 pathnode->jpath.outerjoinpath = outer_path;
2603 pathnode->jpath.innerjoinpath = inner_path;
2604 pathnode->jpath.joinrestrictinfo = restrict_clauses;
2605
2606 final_cost_nestloop(root, pathnode, workspace, extra);
2607
2608 return pathnode;
2609}
bool bms_is_member(int x, const Bitmapset *a)
Definition: bitmapset.c:510
void final_cost_nestloop(PlannerInfo *root, NestPath *path, JoinCostWorkspace *workspace, JoinPathExtraData *extra)
Definition: costsize.c:3349
Bitmapset * get_param_path_clause_serials(Path *path)
Definition: relnode.c:1910
JoinPath jpath
Definition: pathnodes.h:2226
int rinfo_serial
Definition: pathnodes.h:2777

References bms_is_member(), bms_overlap(), RelOptInfo::consider_parallel, final_cost_nestloop(), get_joinrel_parampathinfo(), get_param_path_clause_serials(), JoinPath::inner_unique, JoinPathExtraData::inner_unique, JoinPath::innerjoinpath, JoinPath::joinrestrictinfo, JoinPath::jointype, NestPath::jpath, lappend(), lfirst, makeNode, NIL, JoinPath::outerjoinpath, Path::parallel_safe, Path::parallel_workers, PATH_REQ_OUTER, RelOptInfo::reltarget, RestrictInfo::rinfo_serial, root, and JoinPathExtraData::sjinfo.

Referenced by try_nestloop_path(), and try_partial_nestloop_path().

◆ create_projection_path()

ProjectionPath * create_projection_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
PathTarget target 
)

Definition at line 2766 of file pathnode.c.

2770{
2772 PathTarget *oldtarget;
2773
2774 /*
2775 * We mustn't put a ProjectionPath directly above another; it's useless
2776 * and will confuse create_projection_plan. Rather than making sure all
2777 * callers handle that, let's implement it here, by stripping off any
2778 * ProjectionPath in what we're given. Given this rule, there won't be
2779 * more than one.
2780 */
2782 {
2784
2785 Assert(subpp->path.parent == rel);
2786 subpath = subpp->subpath;
2788 }
2789
2790 pathnode->path.pathtype = T_Result;
2791 pathnode->path.parent = rel;
2792 pathnode->path.pathtarget = target;
2793 /* For now, assume we are above any joins, so no parameterization */
2794 pathnode->path.param_info = NULL;
2795 pathnode->path.parallel_aware = false;
2796 pathnode->path.parallel_safe = rel->consider_parallel &&
2797 subpath->parallel_safe &&
2798 is_parallel_safe(root, (Node *) target->exprs);
2799 pathnode->path.parallel_workers = subpath->parallel_workers;
2800 /* Projection does not change the sort order */
2801 pathnode->path.pathkeys = subpath->pathkeys;
2802
2803 pathnode->subpath = subpath;
2804
2805 /*
2806 * We might not need a separate Result node. If the input plan node type
2807 * can project, we can just tell it to project something else. Or, if it
2808 * can't project but the desired target has the same expression list as
2809 * what the input will produce anyway, we can still give it the desired
2810 * tlist (possibly changing its ressortgroupref labels, but nothing else).
2811 * Note: in the latter case, create_projection_plan has to recheck our
2812 * conclusion; see comments therein.
2813 */
2814 oldtarget = subpath->pathtarget;
2816 equal(oldtarget->exprs, target->exprs))
2817 {
2818 /* No separate Result node needed */
2819 pathnode->dummypp = true;
2820
2821 /*
2822 * Set cost of plan as subpath's cost, adjusted for tlist replacement.
2823 */
2824 pathnode->path.rows = subpath->rows;
2825 pathnode->path.disabled_nodes = subpath->disabled_nodes;
2826 pathnode->path.startup_cost = subpath->startup_cost +
2827 (target->cost.startup - oldtarget->cost.startup);
2828 pathnode->path.total_cost = subpath->total_cost +
2829 (target->cost.startup - oldtarget->cost.startup) +
2830 (target->cost.per_tuple - oldtarget->cost.per_tuple) * subpath->rows;
2831 }
2832 else
2833 {
2834 /* We really do need the Result node */
2835 pathnode->dummypp = false;
2836
2837 /*
2838 * The Result node's cost is cpu_tuple_cost per row, plus the cost of
2839 * evaluating the tlist. There is no qual to worry about.
2840 */
2841 pathnode->path.rows = subpath->rows;
2842 pathnode->path.disabled_nodes = subpath->disabled_nodes;
2843 pathnode->path.startup_cost = subpath->startup_cost +
2844 target->cost.startup;
2845 pathnode->path.total_cost = subpath->total_cost +
2846 target->cost.startup +
2847 (cpu_tuple_cost + target->cost.per_tuple) * subpath->rows;
2848 }
2849
2850 return pathnode;
2851}
bool equal(const void *a, const void *b)
Definition: equalfuncs.c:223
Path * subpath
Definition: pathnodes.h:2316

References Assert(), RelOptInfo::consider_parallel, PathTarget::cost, cpu_tuple_cost, Path::disabled_nodes, ProjectionPath::dummypp, equal(), PathTarget::exprs, is_parallel_safe(), is_projection_capable_path(), IsA, makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, ProjectionPath::path, Path::pathkeys, Path::pathtype, QualCost::per_tuple, root, Path::rows, QualCost::startup, Path::startup_cost, subpath(), ProjectionPath::subpath, and Path::total_cost.

Referenced by add_paths_with_pathkeys_for_rel(), adjust_paths_for_srfs(), apply_projection_to_path(), apply_scanjoin_target_to_paths(), and recurse_set_operations().

◆ create_recursiveunion_path()

RecursiveUnionPath * create_recursiveunion_path ( PlannerInfo root,
RelOptInfo rel,
Path leftpath,
Path rightpath,
PathTarget target,
List distinctList,
int  wtParam,
double  numGroups 
)

Definition at line 3771 of file pathnode.c.

3779{
3781
3782 pathnode->path.pathtype = T_RecursiveUnion;
3783 pathnode->path.parent = rel;
3784 pathnode->path.pathtarget = target;
3785 /* For now, assume we are above any joins, so no parameterization */
3786 pathnode->path.param_info = NULL;
3787 pathnode->path.parallel_aware = false;
3788 pathnode->path.parallel_safe = rel->consider_parallel &&
3789 leftpath->parallel_safe && rightpath->parallel_safe;
3790 /* Foolish, but we'll do it like joins for now: */
3791 pathnode->path.parallel_workers = leftpath->parallel_workers;
3792 /* RecursiveUnion result is always unsorted */
3793 pathnode->path.pathkeys = NIL;
3794
3795 pathnode->leftpath = leftpath;
3796 pathnode->rightpath = rightpath;
3797 pathnode->distinctList = distinctList;
3798 pathnode->wtParam = wtParam;
3799 pathnode->numGroups = numGroups;
3800
3801 cost_recursive_union(&pathnode->path, leftpath, rightpath);
3802
3803 return pathnode;
3804}
void cost_recursive_union(Path *runion, Path *nrterm, Path *rterm)
Definition: costsize.c:1826
Cardinality numGroups
Definition: pathnodes.h:2493

References RelOptInfo::consider_parallel, cost_recursive_union(), RecursiveUnionPath::distinctList, RecursiveUnionPath::leftpath, makeNode, NIL, RecursiveUnionPath::numGroups, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, RecursiveUnionPath::path, Path::pathkeys, Path::pathtype, RecursiveUnionPath::rightpath, and RecursiveUnionPath::wtParam.

Referenced by generate_recursion_path().

◆ create_resultscan_path()

Path * create_resultscan_path ( PlannerInfo root,
RelOptInfo rel,
Relids  required_outer 
)

Definition at line 2248 of file pathnode.c.

2250{
2251 Path *pathnode = makeNode(Path);
2252
2253 pathnode->pathtype = T_Result;
2254 pathnode->parent = rel;
2255 pathnode->pathtarget = rel->reltarget;
2256 pathnode->param_info = get_baserel_parampathinfo(root, rel,
2257 required_outer);
2258 pathnode->parallel_aware = false;
2259 pathnode->parallel_safe = rel->consider_parallel;
2260 pathnode->parallel_workers = 0;
2261 pathnode->pathkeys = NIL; /* result is always unordered */
2262
2263 cost_resultscan(pathnode, root, rel, pathnode->param_info);
2264
2265 return pathnode;
2266}
void cost_resultscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1788

References RelOptInfo::consider_parallel, cost_resultscan(), get_baserel_parampathinfo(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by reparameterize_path(), and set_result_pathlist().

◆ create_samplescan_path()

Path * create_samplescan_path ( PlannerInfo root,
RelOptInfo rel,
Relids  required_outer 
)

Definition at line 1008 of file pathnode.c.

1009{
1010 Path *pathnode = makeNode(Path);
1011
1012 pathnode->pathtype = T_SampleScan;
1013 pathnode->parent = rel;
1014 pathnode->pathtarget = rel->reltarget;
1015 pathnode->param_info = get_baserel_parampathinfo(root, rel,
1016 required_outer);
1017 pathnode->parallel_aware = false;
1018 pathnode->parallel_safe = rel->consider_parallel;
1019 pathnode->parallel_workers = 0;
1020 pathnode->pathkeys = NIL; /* samplescan has unordered result */
1021
1022 cost_samplescan(pathnode, root, rel, pathnode->param_info);
1023
1024 return pathnode;
1025}
void cost_samplescan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:370

References RelOptInfo::consider_parallel, cost_samplescan(), get_baserel_parampathinfo(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by reparameterize_path(), and set_tablesample_rel_pathlist().

◆ create_seqscan_path()

Path * create_seqscan_path ( PlannerInfo root,
RelOptInfo rel,
Relids  required_outer,
int  parallel_workers 
)

Definition at line 983 of file pathnode.c.

985{
986 Path *pathnode = makeNode(Path);
987
988 pathnode->pathtype = T_SeqScan;
989 pathnode->parent = rel;
990 pathnode->pathtarget = rel->reltarget;
991 pathnode->param_info = get_baserel_parampathinfo(root, rel,
992 required_outer);
993 pathnode->parallel_aware = (parallel_workers > 0);
994 pathnode->parallel_safe = rel->consider_parallel;
995 pathnode->parallel_workers = parallel_workers;
996 pathnode->pathkeys = NIL; /* seqscan has unordered result */
997
998 cost_seqscan(pathnode, root, rel, pathnode->param_info);
999
1000 return pathnode;
1001}
void cost_seqscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:295

References RelOptInfo::consider_parallel, cost_seqscan(), get_baserel_parampathinfo(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by create_plain_partial_paths(), plan_cluster_use_sort(), reparameterize_path(), and set_plain_rel_pathlist().

◆ create_set_projection_path()

ProjectSetPath * create_set_projection_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
PathTarget target 
)

Definition at line 2965 of file pathnode.c.

2969{
2971 double tlist_rows;
2972 ListCell *lc;
2973
2974 pathnode->path.pathtype = T_ProjectSet;
2975 pathnode->path.parent = rel;
2976 pathnode->path.pathtarget = target;
2977 /* For now, assume we are above any joins, so no parameterization */
2978 pathnode->path.param_info = NULL;
2979 pathnode->path.parallel_aware = false;
2980 pathnode->path.parallel_safe = rel->consider_parallel &&
2981 subpath->parallel_safe &&
2982 is_parallel_safe(root, (Node *) target->exprs);
2983 pathnode->path.parallel_workers = subpath->parallel_workers;
2984 /* Projection does not change the sort order XXX? */
2985 pathnode->path.pathkeys = subpath->pathkeys;
2986
2987 pathnode->subpath = subpath;
2988
2989 /*
2990 * Estimate number of rows produced by SRFs for each row of input; if
2991 * there's more than one in this node, use the maximum.
2992 */
2993 tlist_rows = 1;
2994 foreach(lc, target->exprs)
2995 {
2996 Node *node = (Node *) lfirst(lc);
2997 double itemrows;
2998
2999 itemrows = expression_returns_set_rows(root, node);
3000 if (tlist_rows < itemrows)
3001 tlist_rows = itemrows;
3002 }
3003
3004 /*
3005 * In addition to the cost of evaluating the tlist, charge cpu_tuple_cost
3006 * per input row, and half of cpu_tuple_cost for each added output row.
3007 * This is slightly bizarre maybe, but it's what 9.6 did; we may revisit
3008 * this estimate later.
3009 */
3010 pathnode->path.disabled_nodes = subpath->disabled_nodes;
3011 pathnode->path.rows = subpath->rows * tlist_rows;
3012 pathnode->path.startup_cost = subpath->startup_cost +
3013 target->cost.startup;
3014 pathnode->path.total_cost = subpath->total_cost +
3015 target->cost.startup +
3016 (cpu_tuple_cost + target->cost.per_tuple) * subpath->rows +
3017 (pathnode->path.rows - subpath->rows) * cpu_tuple_cost / 2;
3018
3019 return pathnode;
3020}
double expression_returns_set_rows(PlannerInfo *root, Node *clause)
Definition: clauses.c:290
Path * subpath
Definition: pathnodes.h:2328

References RelOptInfo::consider_parallel, PathTarget::cost, cpu_tuple_cost, Path::disabled_nodes, expression_returns_set_rows(), PathTarget::exprs, is_parallel_safe(), lfirst, makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, ProjectSetPath::path, Path::pathkeys, Path::pathtype, QualCost::per_tuple, root, Path::rows, QualCost::startup, Path::startup_cost, subpath(), ProjectSetPath::subpath, and Path::total_cost.

Referenced by adjust_paths_for_srfs().

◆ create_setop_path()

SetOpPath * create_setop_path ( PlannerInfo root,
RelOptInfo rel,
Path leftpath,
Path rightpath,
SetOpCmd  cmd,
SetOpStrategy  strategy,
List groupList,
double  numGroups,
double  outputRows 
)

Definition at line 3653 of file pathnode.c.

3662{
3663 SetOpPath *pathnode = makeNode(SetOpPath);
3664
3665 pathnode->path.pathtype = T_SetOp;
3666 pathnode->path.parent = rel;
3667 pathnode->path.pathtarget = rel->reltarget;
3668 /* For now, assume we are above any joins, so no parameterization */
3669 pathnode->path.param_info = NULL;
3670 pathnode->path.parallel_aware = false;
3671 pathnode->path.parallel_safe = rel->consider_parallel &&
3672 leftpath->parallel_safe && rightpath->parallel_safe;
3673 pathnode->path.parallel_workers =
3674 leftpath->parallel_workers + rightpath->parallel_workers;
3675 /* SetOp preserves the input sort order if in sort mode */
3676 pathnode->path.pathkeys =
3677 (strategy == SETOP_SORTED) ? leftpath->pathkeys : NIL;
3678
3679 pathnode->leftpath = leftpath;
3680 pathnode->rightpath = rightpath;
3681 pathnode->cmd = cmd;
3682 pathnode->strategy = strategy;
3683 pathnode->groupList = groupList;
3684 pathnode->numGroups = numGroups;
3685
3686 /*
3687 * Compute cost estimates. As things stand, we end up with the same total
3688 * cost in this node for sort and hash methods, but different startup
3689 * costs. This could be refined perhaps, but it'll do for now.
3690 */
3691 pathnode->path.disabled_nodes =
3692 leftpath->disabled_nodes + rightpath->disabled_nodes;
3693 if (strategy == SETOP_SORTED)
3694 {
3695 /*
3696 * In sorted mode, we can emit output incrementally. Charge one
3697 * cpu_operator_cost per comparison per input tuple. Like cost_group,
3698 * we assume all columns get compared at most of the tuples.
3699 */
3700 pathnode->path.startup_cost =
3701 leftpath->startup_cost + rightpath->startup_cost;
3702 pathnode->path.total_cost =
3703 leftpath->total_cost + rightpath->total_cost +
3704 cpu_operator_cost * (leftpath->rows + rightpath->rows) * list_length(groupList);
3705
3706 /*
3707 * Also charge a small amount per extracted tuple. Like cost_sort,
3708 * charge only operator cost not cpu_tuple_cost, since SetOp does no
3709 * qual-checking or projection.
3710 */
3711 pathnode->path.total_cost += cpu_operator_cost * outputRows;
3712 }
3713 else
3714 {
3715 Size hashentrysize;
3716
3717 /*
3718 * In hashed mode, we must read all the input before we can emit
3719 * anything. Also charge comparison costs to represent the cost of
3720 * hash table lookups.
3721 */
3722 pathnode->path.startup_cost =
3723 leftpath->total_cost + rightpath->total_cost +
3724 cpu_operator_cost * (leftpath->rows + rightpath->rows) * list_length(groupList);
3725 pathnode->path.total_cost = pathnode->path.startup_cost;
3726
3727 /*
3728 * Also charge a small amount per extracted tuple. Like cost_sort,
3729 * charge only operator cost not cpu_tuple_cost, since SetOp does no
3730 * qual-checking or projection.
3731 */
3732 pathnode->path.total_cost += cpu_operator_cost * outputRows;
3733
3734 /*
3735 * Mark the path as disabled if enable_hashagg is off. While this
3736 * isn't exactly a HashAgg node, it seems close enough to justify
3737 * letting that switch control it.
3738 */
3739 if (!enable_hashagg)
3740 pathnode->path.disabled_nodes++;
3741
3742 /*
3743 * Also disable if it doesn't look like the hashtable will fit into
3744 * hash_mem.
3745 */
3746 hashentrysize = MAXALIGN(leftpath->pathtarget->width) +
3748 if (hashentrysize * numGroups > get_hash_memory_limit())
3749 pathnode->path.disabled_nodes++;
3750 }
3751 pathnode->path.rows = outputRows;
3752
3753 return pathnode;
3754}
#define MAXALIGN(LEN)
Definition: c.h:782
size_t Size
Definition: c.h:576
double cpu_operator_cost
Definition: costsize.c:134
bool enable_hashagg
Definition: costsize.c:152
#define SizeofMinimalTupleHeader
Definition: htup_details.h:699
size_t get_hash_memory_limit(void)
Definition: nodeHash.c:3616
@ SETOP_SORTED
Definition: nodes.h:412
Path * rightpath
Definition: pathnodes.h:2476
Cardinality numGroups
Definition: pathnodes.h:2480
Path * leftpath
Definition: pathnodes.h:2475
SetOpCmd cmd
Definition: pathnodes.h:2477
Path path
Definition: pathnodes.h:2474
SetOpStrategy strategy
Definition: pathnodes.h:2478
List * groupList
Definition: pathnodes.h:2479

References SetOpPath::cmd, RelOptInfo::consider_parallel, cpu_operator_cost, Path::disabled_nodes, enable_hashagg, get_hash_memory_limit(), SetOpPath::groupList, if(), SetOpPath::leftpath, list_length(), makeNode, MAXALIGN, NIL, SetOpPath::numGroups, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, SetOpPath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, SetOpPath::rightpath, Path::rows, SETOP_SORTED, SizeofMinimalTupleHeader, Path::startup_cost, SetOpPath::strategy, and Path::total_cost.

Referenced by generate_nonunion_paths().

◆ create_sort_path()

SortPath * create_sort_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
List pathkeys,
double  limit_tuples 
)

Definition at line 3085 of file pathnode.c.

3090{
3091 SortPath *pathnode = makeNode(SortPath);
3092
3093 pathnode->path.pathtype = T_Sort;
3094 pathnode->path.parent = rel;
3095 /* Sort doesn't project, so use source path's pathtarget */
3096 pathnode->path.pathtarget = subpath->pathtarget;
3097 /* For now, assume we are above any joins, so no parameterization */
3098 pathnode->path.param_info = NULL;
3099 pathnode->path.parallel_aware = false;
3100 pathnode->path.parallel_safe = rel->consider_parallel &&
3101 subpath->parallel_safe;
3102 pathnode->path.parallel_workers = subpath->parallel_workers;
3103 pathnode->path.pathkeys = pathkeys;
3104
3105 pathnode->subpath = subpath;
3106
3107 cost_sort(&pathnode->path, root, pathkeys,
3108 subpath->disabled_nodes,
3109 subpath->total_cost,
3110 subpath->rows,
3111 subpath->pathtarget->width,
3112 0.0, /* XXX comparison_cost shouldn't be 0? */
3113 work_mem, limit_tuples);
3114
3115 return pathnode;
3116}

References RelOptInfo::consider_parallel, cost_sort(), makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, SortPath::path, Path::pathkeys, Path::pathtype, root, subpath(), SortPath::subpath, and work_mem.

Referenced by add_paths_with_pathkeys_for_rel(), build_setop_child_paths(), create_one_window_path(), create_ordered_paths(), gather_grouping_paths(), generate_nonunion_paths(), generate_union_paths(), generate_useful_gather_paths(), and make_ordered_path().

◆ create_subqueryscan_path()

SubqueryScanPath * create_subqueryscan_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
bool  trivial_pathtarget,
List pathkeys,
Relids  required_outer 
)

Definition at line 2088 of file pathnode.c.

2091{
2093
2094 pathnode->path.pathtype = T_SubqueryScan;
2095 pathnode->path.parent = rel;
2096 pathnode->path.pathtarget = rel->reltarget;
2097 pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
2098 required_outer);
2099 pathnode->path.parallel_aware = false;
2100 pathnode->path.parallel_safe = rel->consider_parallel &&
2101 subpath->parallel_safe;
2102 pathnode->path.parallel_workers = subpath->parallel_workers;
2103 pathnode->path.pathkeys = pathkeys;
2104 pathnode->subpath = subpath;
2105
2106 cost_subqueryscan(pathnode, root, rel, pathnode->path.param_info,
2107 trivial_pathtarget);
2108
2109 return pathnode;
2110}
void cost_subqueryscan(SubqueryScanPath *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info, bool trivial_pathtarget)
Definition: costsize.c:1457

References RelOptInfo::consider_parallel, cost_subqueryscan(), get_baserel_parampathinfo(), makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, SubqueryScanPath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, root, subpath(), and SubqueryScanPath::subpath.

Referenced by build_setop_child_paths(), reparameterize_path(), and set_subquery_pathlist().

◆ create_tablefuncscan_path()

Path * create_tablefuncscan_path ( PlannerInfo root,
RelOptInfo rel,
Relids  required_outer 
)

Definition at line 2144 of file pathnode.c.

2146{
2147 Path *pathnode = makeNode(Path);
2148
2149 pathnode->pathtype = T_TableFuncScan;
2150 pathnode->parent = rel;
2151 pathnode->pathtarget = rel->reltarget;
2152 pathnode->param_info = get_baserel_parampathinfo(root, rel,
2153 required_outer);
2154 pathnode->parallel_aware = false;
2155 pathnode->parallel_safe = rel->consider_parallel;
2156 pathnode->parallel_workers = 0;
2157 pathnode->pathkeys = NIL; /* result is always unordered */
2158
2159 cost_tablefuncscan(pathnode, root, rel, pathnode->param_info);
2160
2161 return pathnode;
2162}
void cost_tablefuncscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1600

References RelOptInfo::consider_parallel, cost_tablefuncscan(), get_baserel_parampathinfo(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by set_tablefunc_pathlist().

◆ create_tidrangescan_path()

TidRangePath * create_tidrangescan_path ( PlannerInfo root,
RelOptInfo rel,
List tidrangequals,
Relids  required_outer 
)

Definition at line 1264 of file pathnode.c.

1266{
1267 TidRangePath *pathnode = makeNode(TidRangePath);
1268
1269 pathnode->path.pathtype = T_TidRangeScan;
1270 pathnode->path.parent = rel;
1271 pathnode->path.pathtarget = rel->reltarget;
1272 pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1273 required_outer);
1274 pathnode->path.parallel_aware = false;
1275 pathnode->path.parallel_safe = rel->consider_parallel;
1276 pathnode->path.parallel_workers = 0;
1277 pathnode->path.pathkeys = NIL; /* always unordered */
1278
1279 pathnode->tidrangequals = tidrangequals;
1280
1281 cost_tidrangescan(&pathnode->path, root, rel, tidrangequals,
1282 pathnode->path.param_info);
1283
1284 return pathnode;
1285}
void cost_tidrangescan(Path *path, PlannerInfo *root, RelOptInfo *baserel, List *tidrangequals, ParamPathInfo *param_info)
Definition: costsize.c:1363
List * tidrangequals
Definition: pathnodes.h:1970

References RelOptInfo::consider_parallel, cost_tidrangescan(), get_baserel_parampathinfo(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, TidRangePath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, root, and TidRangePath::tidrangequals.

Referenced by create_tidscan_paths().

◆ create_tidscan_path()

TidPath * create_tidscan_path ( PlannerInfo root,
RelOptInfo rel,
List tidquals,
Relids  required_outer 
)

Definition at line 1235 of file pathnode.c.

1237{
1238 TidPath *pathnode = makeNode(TidPath);
1239
1240 pathnode->path.pathtype = T_TidScan;
1241 pathnode->path.parent = rel;
1242 pathnode->path.pathtarget = rel->reltarget;
1243 pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1244 required_outer);
1245 pathnode->path.parallel_aware = false;
1246 pathnode->path.parallel_safe = rel->consider_parallel;
1247 pathnode->path.parallel_workers = 0;
1248 pathnode->path.pathkeys = NIL; /* always unordered */
1249
1250 pathnode->tidquals = tidquals;
1251
1252 cost_tidscan(&pathnode->path, root, rel, tidquals,
1253 pathnode->path.param_info);
1254
1255 return pathnode;
1256}
void cost_tidscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, List *tidquals, ParamPathInfo *param_info)
Definition: costsize.c:1258
List * tidquals
Definition: pathnodes.h:1958
Path path
Definition: pathnodes.h:1957

References RelOptInfo::consider_parallel, cost_tidscan(), get_baserel_parampathinfo(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, TidPath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, root, and TidPath::tidquals.

Referenced by BuildParameterizedTidPaths(), and create_tidscan_paths().

◆ create_unique_path()

UniquePath * create_unique_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
SpecialJoinInfo sjinfo 
)

Definition at line 1727 of file pathnode.c.

1729{
1730 UniquePath *pathnode;
1731 Path sort_path; /* dummy for result of cost_sort */
1732 Path agg_path; /* dummy for result of cost_agg */
1733 MemoryContext oldcontext;
1734 int numCols;
1735
1736 /* Caller made a mistake if subpath isn't cheapest_total ... */
1738 Assert(subpath->parent == rel);
1739 /* ... or if SpecialJoinInfo is the wrong one */
1740 Assert(sjinfo->jointype == JOIN_SEMI);
1741 Assert(bms_equal(rel->relids, sjinfo->syn_righthand));
1742
1743 /* If result already cached, return it */
1744 if (rel->cheapest_unique_path)
1745 return (UniquePath *) rel->cheapest_unique_path;
1746
1747 /* If it's not possible to unique-ify, return NULL */
1748 if (!(sjinfo->semi_can_btree || sjinfo->semi_can_hash))
1749 return NULL;
1750
1751 /*
1752 * When called during GEQO join planning, we are in a short-lived memory
1753 * context. We must make sure that the path and any subsidiary data
1754 * structures created for a baserel survive the GEQO cycle, else the
1755 * baserel is trashed for future GEQO cycles. On the other hand, when we
1756 * are creating those for a joinrel during GEQO, we don't want them to
1757 * clutter the main planning context. Upshot is that the best solution is
1758 * to explicitly allocate memory in the same context the given RelOptInfo
1759 * is in.
1760 */
1762
1763 pathnode = makeNode(UniquePath);
1764
1765 pathnode->path.pathtype = T_Unique;
1766 pathnode->path.parent = rel;
1767 pathnode->path.pathtarget = rel->reltarget;
1768 pathnode->path.param_info = subpath->param_info;
1769 pathnode->path.parallel_aware = false;
1770 pathnode->path.parallel_safe = rel->consider_parallel &&
1771 subpath->parallel_safe;
1772 pathnode->path.parallel_workers = subpath->parallel_workers;
1773
1774 /*
1775 * Assume the output is unsorted, since we don't necessarily have pathkeys
1776 * to represent it. (This might get overridden below.)
1777 */
1778 pathnode->path.pathkeys = NIL;
1779
1780 pathnode->subpath = subpath;
1781
1782 /*
1783 * Under GEQO and when planning child joins, the sjinfo might be
1784 * short-lived, so we'd better make copies of data structures we extract
1785 * from it.
1786 */
1787 pathnode->in_operators = copyObject(sjinfo->semi_operators);
1788 pathnode->uniq_exprs = copyObject(sjinfo->semi_rhs_exprs);
1789
1790 /*
1791 * If the input is a relation and it has a unique index that proves the
1792 * semi_rhs_exprs are unique, then we don't need to do anything. Note
1793 * that relation_has_unique_index_for automatically considers restriction
1794 * clauses for the rel, as well.
1795 */
1796 if (rel->rtekind == RTE_RELATION && sjinfo->semi_can_btree &&
1798 sjinfo->semi_rhs_exprs,
1799 sjinfo->semi_operators))
1800 {
1801 pathnode->umethod = UNIQUE_PATH_NOOP;
1802 pathnode->path.rows = rel->rows;
1803 pathnode->path.disabled_nodes = subpath->disabled_nodes;
1804 pathnode->path.startup_cost = subpath->startup_cost;
1805 pathnode->path.total_cost = subpath->total_cost;
1806 pathnode->path.pathkeys = subpath->pathkeys;
1807
1808 rel->cheapest_unique_path = (Path *) pathnode;
1809
1810 MemoryContextSwitchTo(oldcontext);
1811
1812 return pathnode;
1813 }
1814
1815 /*
1816 * If the input is a subquery whose output must be unique already, then we
1817 * don't need to do anything. The test for uniqueness has to consider
1818 * exactly which columns we are extracting; for example "SELECT DISTINCT
1819 * x,y" doesn't guarantee that x alone is distinct. So we cannot check for
1820 * this optimization unless semi_rhs_exprs consists only of simple Vars
1821 * referencing subquery outputs. (Possibly we could do something with
1822 * expressions in the subquery outputs, too, but for now keep it simple.)
1823 */
1824 if (rel->rtekind == RTE_SUBQUERY)
1825 {
1827
1829 {
1830 List *sub_tlist_colnos;
1831
1832 sub_tlist_colnos = translate_sub_tlist(sjinfo->semi_rhs_exprs,
1833 rel->relid);
1834
1835 if (sub_tlist_colnos &&
1837 sub_tlist_colnos,
1838 sjinfo->semi_operators))
1839 {
1840 pathnode->umethod = UNIQUE_PATH_NOOP;
1841 pathnode->path.rows = rel->rows;
1842 pathnode->path.disabled_nodes = subpath->disabled_nodes;
1843 pathnode->path.startup_cost = subpath->startup_cost;
1844 pathnode->path.total_cost = subpath->total_cost;
1845 pathnode->path.pathkeys = subpath->pathkeys;
1846
1847 rel->cheapest_unique_path = (Path *) pathnode;
1848
1849 MemoryContextSwitchTo(oldcontext);
1850
1851 return pathnode;
1852 }
1853 }
1854 }
1855
1856 /* Estimate number of output rows */
1857 pathnode->path.rows = estimate_num_groups(root,
1858 sjinfo->semi_rhs_exprs,
1859 rel->rows,
1860 NULL,
1861 NULL);
1862 numCols = list_length(sjinfo->semi_rhs_exprs);
1863
1864 if (sjinfo->semi_can_btree)
1865 {
1866 /*
1867 * Estimate cost for sort+unique implementation
1868 */
1869 cost_sort(&sort_path, root, NIL,
1870 subpath->disabled_nodes,
1871 subpath->total_cost,
1872 rel->rows,
1873 subpath->pathtarget->width,
1874 0.0,
1875 work_mem,
1876 -1.0);
1877
1878 /*
1879 * Charge one cpu_operator_cost per comparison per input tuple. We
1880 * assume all columns get compared at most of the tuples. (XXX
1881 * probably this is an overestimate.) This should agree with
1882 * create_upper_unique_path.
1883 */
1884 sort_path.total_cost += cpu_operator_cost * rel->rows * numCols;
1885 }
1886
1887 if (sjinfo->semi_can_hash)
1888 {
1889 /*
1890 * Estimate the overhead per hashtable entry at 64 bytes (same as in
1891 * planner.c).
1892 */
1893 int hashentrysize = subpath->pathtarget->width + 64;
1894
1895 if (hashentrysize * pathnode->path.rows > get_hash_memory_limit())
1896 {
1897 /*
1898 * We should not try to hash. Hack the SpecialJoinInfo to
1899 * remember this, in case we come through here again.
1900 */
1901 sjinfo->semi_can_hash = false;
1902 }
1903 else
1904 cost_agg(&agg_path, root,
1905 AGG_HASHED, NULL,
1906 numCols, pathnode->path.rows,
1907 NIL,
1908 subpath->disabled_nodes,
1909 subpath->startup_cost,
1910 subpath->total_cost,
1911 rel->rows,
1912 subpath->pathtarget->width);
1913 }
1914
1915 if (sjinfo->semi_can_btree && sjinfo->semi_can_hash)
1916 {
1917 if (agg_path.disabled_nodes < sort_path.disabled_nodes ||
1918 (agg_path.disabled_nodes == sort_path.disabled_nodes &&
1919 agg_path.total_cost < sort_path.total_cost))
1920 pathnode->umethod = UNIQUE_PATH_HASH;
1921 else
1922 pathnode->umethod = UNIQUE_PATH_SORT;
1923 }
1924 else if (sjinfo->semi_can_btree)
1925 pathnode->umethod = UNIQUE_PATH_SORT;
1926 else if (sjinfo->semi_can_hash)
1927 pathnode->umethod = UNIQUE_PATH_HASH;
1928 else
1929 {
1930 /* we can get here only if we abandoned hashing above */
1931 MemoryContextSwitchTo(oldcontext);
1932 return NULL;
1933 }
1934
1935 if (pathnode->umethod == UNIQUE_PATH_HASH)
1936 {
1937 pathnode->path.disabled_nodes = agg_path.disabled_nodes;
1938 pathnode->path.startup_cost = agg_path.startup_cost;
1939 pathnode->path.total_cost = agg_path.total_cost;
1940 }
1941 else
1942 {
1943 pathnode->path.disabled_nodes = sort_path.disabled_nodes;
1944 pathnode->path.startup_cost = sort_path.startup_cost;
1945 pathnode->path.total_cost = sort_path.total_cost;
1946 }
1947
1948 rel->cheapest_unique_path = (Path *) pathnode;
1949
1950 MemoryContextSwitchTo(oldcontext);
1951
1952 return pathnode;
1953}
bool query_is_distinct_for(Query *query, List *colnos, List *opids)
bool query_supports_distinctness(Query *query)
bool relation_has_unique_index_for(PlannerInfo *root, RelOptInfo *rel, List *restrictlist, List *exprlist, List *oprlist)
Definition: indxpath.c:4162
MemoryContext GetMemoryChunkContext(void *pointer)
Definition: mcxt.c:738
#define copyObject(obj)
Definition: nodes.h:230
@ JOIN_SEMI
Definition: nodes.h:313
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:124
static List * translate_sub_tlist(List *tlist, int relid)
Definition: pathnode.c:2018
@ UNIQUE_PATH_SORT
Definition: pathnodes.h:2156
@ UNIQUE_PATH_NOOP
Definition: pathnodes.h:2154
@ UNIQUE_PATH_HASH
Definition: pathnodes.h:2155
#define planner_rt_fetch(rti, root)
Definition: pathnodes.h:594
double estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows, List **pgset, EstimationInfo *estinfo)
Definition: selfuncs.c:3446
Query * subquery
Definition: parsenodes.h:1118
List * semi_rhs_exprs
Definition: pathnodes.h:3050
Relids syn_righthand
Definition: pathnodes.h:3038
List * semi_operators
Definition: pathnodes.h:3049
Path * subpath
Definition: pathnodes.h:2162
List * uniq_exprs
Definition: pathnodes.h:2165
UniquePathMethod umethod
Definition: pathnodes.h:2163
List * in_operators
Definition: pathnodes.h:2164

References AGG_HASHED, Assert(), bms_equal(), RelOptInfo::cheapest_total_path, RelOptInfo::cheapest_unique_path, RelOptInfo::consider_parallel, copyObject, cost_agg(), cost_sort(), cpu_operator_cost, Path::disabled_nodes, estimate_num_groups(), get_hash_memory_limit(), GetMemoryChunkContext(), UniquePath::in_operators, JOIN_SEMI, SpecialJoinInfo::jointype, list_length(), makeNode, MemoryContextSwitchTo(), NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, UniquePath::path, Path::pathkeys, Path::pathtype, planner_rt_fetch, query_is_distinct_for(), query_supports_distinctness(), relation_has_unique_index_for(), RelOptInfo::relid, RelOptInfo::relids, RelOptInfo::reltarget, root, RelOptInfo::rows, Path::rows, RTE_RELATION, RTE_SUBQUERY, RelOptInfo::rtekind, SpecialJoinInfo::semi_can_btree, SpecialJoinInfo::semi_can_hash, SpecialJoinInfo::semi_operators, SpecialJoinInfo::semi_rhs_exprs, Path::startup_cost, subpath(), UniquePath::subpath, RangeTblEntry::subquery, SpecialJoinInfo::syn_righthand, Path::total_cost, translate_sub_tlist(), UniquePath::umethod, UniquePath::uniq_exprs, UNIQUE_PATH_HASH, UNIQUE_PATH_NOOP, UNIQUE_PATH_SORT, and work_mem.

Referenced by consider_parallel_nestloop(), hash_inner_and_outer(), join_is_legal(), match_unsorted_outer(), populate_joinrel_with_paths(), and sort_inner_and_outer().

◆ create_upper_unique_path()

UpperUniquePath * create_upper_unique_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
int  numCols,
double  numGroups 
)

Definition at line 3190 of file pathnode.c.

3195{
3197
3198 pathnode->path.pathtype = T_Unique;
3199 pathnode->path.parent = rel;
3200 /* Unique doesn't project, so use source path's pathtarget */
3201 pathnode->path.pathtarget = subpath->pathtarget;
3202 /* For now, assume we are above any joins, so no parameterization */
3203 pathnode->path.param_info = NULL;
3204 pathnode->path.parallel_aware = false;
3205 pathnode->path.parallel_safe = rel->consider_parallel &&
3206 subpath->parallel_safe;
3207 pathnode->path.parallel_workers = subpath->parallel_workers;
3208 /* Unique doesn't change the input ordering */
3209 pathnode->path.pathkeys = subpath->pathkeys;
3210
3211 pathnode->subpath = subpath;
3212 pathnode->numkeys = numCols;
3213
3214 /*
3215 * Charge one cpu_operator_cost per comparison per input tuple. We assume
3216 * all columns get compared at most of the tuples. (XXX probably this is
3217 * an overestimate.)
3218 */
3219 pathnode->path.disabled_nodes = subpath->disabled_nodes;
3220 pathnode->path.startup_cost = subpath->startup_cost;
3221 pathnode->path.total_cost = subpath->total_cost +
3222 cpu_operator_cost * subpath->rows * numCols;
3223 pathnode->path.rows = numGroups;
3224
3225 return pathnode;
3226}

References RelOptInfo::consider_parallel, cpu_operator_cost, Path::disabled_nodes, makeNode, UpperUniquePath::numkeys, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, UpperUniquePath::path, Path::pathkeys, Path::pathtype, Path::rows, Path::startup_cost, subpath(), UpperUniquePath::subpath, and Path::total_cost.

Referenced by create_final_distinct_paths(), create_partial_distinct_paths(), and generate_union_paths().

◆ create_valuesscan_path()

Path * create_valuesscan_path ( PlannerInfo root,
RelOptInfo rel,
Relids  required_outer 
)

Definition at line 2170 of file pathnode.c.

2172{
2173 Path *pathnode = makeNode(Path);
2174
2175 pathnode->pathtype = T_ValuesScan;
2176 pathnode->parent = rel;
2177 pathnode->pathtarget = rel->reltarget;
2178 pathnode->param_info = get_baserel_parampathinfo(root, rel,
2179 required_outer);
2180 pathnode->parallel_aware = false;
2181 pathnode->parallel_safe = rel->consider_parallel;
2182 pathnode->parallel_workers = 0;
2183 pathnode->pathkeys = NIL; /* result is always unordered */
2184
2185 cost_valuesscan(pathnode, root, rel, pathnode->param_info);
2186
2187 return pathnode;
2188}
void cost_valuesscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1657

References RelOptInfo::consider_parallel, cost_valuesscan(), get_baserel_parampathinfo(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by set_values_pathlist().

◆ create_windowagg_path()

WindowAggPath * create_windowagg_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
PathTarget target,
List windowFuncs,
List runCondition,
WindowClause winclause,
List qual,
bool  topwindow 
)

Definition at line 3580 of file pathnode.c.

3589{
3591
3592 /* qual can only be set for the topwindow */
3593 Assert(qual == NIL || topwindow);
3594
3595 pathnode->path.pathtype = T_WindowAgg;
3596 pathnode->path.parent = rel;
3597 pathnode->path.pathtarget = target;
3598 /* For now, assume we are above any joins, so no parameterization */
3599 pathnode->path.param_info = NULL;
3600 pathnode->path.parallel_aware = false;
3601 pathnode->path.parallel_safe = rel->consider_parallel &&
3602 subpath->parallel_safe;
3603 pathnode->path.parallel_workers = subpath->parallel_workers;
3604 /* WindowAgg preserves the input sort order */
3605 pathnode->path.pathkeys = subpath->pathkeys;
3606
3607 pathnode->subpath = subpath;
3608 pathnode->winclause = winclause;
3609 pathnode->qual = qual;
3610 pathnode->runCondition = runCondition;
3611 pathnode->topwindow = topwindow;
3612
3613 /*
3614 * For costing purposes, assume that there are no redundant partitioning
3615 * or ordering columns; it's not worth the trouble to deal with that
3616 * corner case here. So we just pass the unmodified list lengths to
3617 * cost_windowagg.
3618 */
3619 cost_windowagg(&pathnode->path, root,
3620 windowFuncs,
3621 winclause,
3622 subpath->disabled_nodes,
3623 subpath->startup_cost,
3624 subpath->total_cost,
3625 subpath->rows);
3626
3627 /* add tlist eval cost for each output row */
3628 pathnode->path.startup_cost += target->cost.startup;
3629 pathnode->path.total_cost += target->cost.startup +
3630 target->cost.per_tuple * pathnode->path.rows;
3631
3632 return pathnode;
3633}
void cost_windowagg(Path *path, PlannerInfo *root, List *windowFuncs, WindowClause *winclause, int input_disabled_nodes, Cost input_startup_cost, Cost input_total_cost, double input_tuples)
Definition: costsize.c:3098
List * runCondition
Definition: pathnodes.h:2464
Path * subpath
Definition: pathnodes.h:2461
WindowClause * winclause
Definition: pathnodes.h:2462

References Assert(), RelOptInfo::consider_parallel, PathTarget::cost, cost_windowagg(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, WindowAggPath::path, Path::pathkeys, Path::pathtype, QualCost::per_tuple, WindowAggPath::qual, root, Path::rows, WindowAggPath::runCondition, QualCost::startup, Path::startup_cost, subpath(), WindowAggPath::subpath, WindowAggPath::topwindow, Path::total_cost, and WindowAggPath::winclause.

Referenced by create_one_window_path().

◆ create_worktablescan_path()

Path * create_worktablescan_path ( PlannerInfo root,
RelOptInfo rel,
Relids  required_outer 
)

Definition at line 2274 of file pathnode.c.

2276{
2277 Path *pathnode = makeNode(Path);
2278
2279 pathnode->pathtype = T_WorkTableScan;
2280 pathnode->parent = rel;
2281 pathnode->pathtarget = rel->reltarget;
2282 pathnode->param_info = get_baserel_parampathinfo(root, rel,
2283 required_outer);
2284 pathnode->parallel_aware = false;
2285 pathnode->parallel_safe = rel->consider_parallel;
2286 pathnode->parallel_workers = 0;
2287 pathnode->pathkeys = NIL; /* result is always unordered */
2288
2289 /* Cost is the same as for a regular CTE scan */
2290 cost_ctescan(pathnode, root, rel, pathnode->param_info);
2291
2292 return pathnode;
2293}

References RelOptInfo::consider_parallel, cost_ctescan(), get_baserel_parampathinfo(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by set_worktable_pathlist().

◆ expand_planner_arrays()

void expand_planner_arrays ( PlannerInfo root,
int  add_size 
)

Definition at line 163 of file relnode.c.

164{
165 int new_size;
166
167 Assert(add_size > 0);
168
169 new_size = root->simple_rel_array_size + add_size;
170
171 root->simple_rel_array =
172 repalloc0_array(root->simple_rel_array, RelOptInfo *, root->simple_rel_array_size, new_size);
173
174 root->simple_rte_array =
175 repalloc0_array(root->simple_rte_array, RangeTblEntry *, root->simple_rel_array_size, new_size);
176
177 if (root->append_rel_array)
178 root->append_rel_array =
179 repalloc0_array(root->append_rel_array, AppendRelInfo *, root->simple_rel_array_size, new_size);
180 else
181 root->append_rel_array =
182 palloc0_array(AppendRelInfo *, new_size);
183
184 root->simple_rel_array_size = new_size;
185}
#define palloc0_array(type, count)
Definition: fe_memutils.h:77
#define repalloc0_array(pointer, type, oldcount, count)
Definition: palloc.h:109
Size add_size(Size s1, Size s2)
Definition: shmem.c:493

References add_size(), Assert(), palloc0_array, repalloc0_array, and root.

Referenced by expand_inherited_rtentry(), and expand_partitioned_rtentry().

◆ fetch_upper_rel()

RelOptInfo * fetch_upper_rel ( PlannerInfo root,
UpperRelationKind  kind,
Relids  relids 
)

Definition at line 1458 of file relnode.c.

1459{
1460 RelOptInfo *upperrel;
1461 ListCell *lc;
1462
1463 /*
1464 * For the moment, our indexing data structure is just a List for each
1465 * relation kind. If we ever get so many of one kind that this stops
1466 * working well, we can improve it. No code outside this function should
1467 * assume anything about how to find a particular upperrel.
1468 */
1469
1470 /* If we already made this upperrel for the query, return it */
1471 foreach(lc, root->upper_rels[kind])
1472 {
1473 upperrel = (RelOptInfo *) lfirst(lc);
1474
1475 if (bms_equal(upperrel->relids, relids))
1476 return upperrel;
1477 }
1478
1479 upperrel = makeNode(RelOptInfo);
1480 upperrel->reloptkind = RELOPT_UPPER_REL;
1481 upperrel->relids = bms_copy(relids);
1482
1483 /* cheap startup cost is interesting iff not all tuples to be retrieved */
1484 upperrel->consider_startup = (root->tuple_fraction > 0);
1485 upperrel->consider_param_startup = false;
1486 upperrel->consider_parallel = false; /* might get changed later */
1487 upperrel->reltarget = create_empty_pathtarget();
1488 upperrel->pathlist = NIL;
1489 upperrel->cheapest_startup_path = NULL;
1490 upperrel->cheapest_total_path = NULL;
1491 upperrel->cheapest_unique_path = NULL;
1493
1494 root->upper_rels[kind] = lappend(root->upper_rels[kind], upperrel);
1495
1496 return upperrel;
1497}
@ RELOPT_UPPER_REL
Definition: pathnodes.h:855

References bms_copy(), bms_equal(), RelOptInfo::cheapest_parameterized_paths, RelOptInfo::cheapest_startup_path, RelOptInfo::cheapest_total_path, RelOptInfo::cheapest_unique_path, RelOptInfo::consider_parallel, RelOptInfo::consider_param_startup, RelOptInfo::consider_startup, create_empty_pathtarget(), lappend(), lfirst, makeNode, NIL, RelOptInfo::pathlist, RelOptInfo::relids, RELOPT_UPPER_REL, RelOptInfo::reloptkind, RelOptInfo::reltarget, and root.

Referenced by add_rtes_to_flat_rtable(), build_setop_child_paths(), create_distinct_paths(), create_ordered_paths(), create_partial_distinct_paths(), create_partial_grouping_paths(), create_window_paths(), generate_nonunion_paths(), generate_recursion_path(), generate_union_paths(), grouping_planner(), make_grouping_rel(), make_subplan(), preprocess_minmax_aggregates(), set_subquery_pathlist(), set_subquery_size_estimates(), SS_process_ctes(), standard_planner(), and subquery_planner().

◆ find_base_rel()

◆ find_base_rel_ignore_join()

RelOptInfo * find_base_rel_ignore_join ( PlannerInfo root,
int  relid 
)

Definition at line 454 of file relnode.c.

455{
456 /* use an unsigned comparison to prevent negative array element access */
457 if ((uint32) relid < (uint32) root->simple_rel_array_size)
458 {
459 RelOptInfo *rel;
460 RangeTblEntry *rte;
461
462 rel = root->simple_rel_array[relid];
463 if (rel)
464 return rel;
465
466 /*
467 * We could just return NULL here, but for debugging purposes it seems
468 * best to actually verify that the relid is an outer join and not
469 * something weird.
470 */
471 rte = root->simple_rte_array[relid];
472 if (rte && rte->rtekind == RTE_JOIN && rte->jointype != JOIN_INNER)
473 return NULL;
474 }
475
476 elog(ERROR, "no relation entry for relid %d", relid);
477
478 return NULL; /* keep compiler quiet */
479}
JoinType jointype
Definition: parsenodes.h:1165

References elog, ERROR, JOIN_INNER, RangeTblEntry::jointype, root, RTE_JOIN, and RangeTblEntry::rtekind.

Referenced by add_join_clause_to_rels(), create_lateral_join_info(), find_appinfos_by_relids(), and remove_join_clause_from_rels().

◆ find_base_rel_noerr()

RelOptInfo * find_base_rel_noerr ( PlannerInfo root,
int  relid 
)

Definition at line 436 of file relnode.c.

437{
438 /* use an unsigned comparison to prevent negative array element access */
439 if ((uint32) relid < (uint32) root->simple_rel_array_size)
440 return root->simple_rel_array[relid];
441 return NULL;
442}

References root.

Referenced by examine_simple_variable().

◆ find_childrel_parents()

Relids find_childrel_parents ( PlannerInfo root,
RelOptInfo rel 
)

Definition at line 1509 of file relnode.c.

1510{
1511 Relids result = NULL;
1512
1514 Assert(rel->relid > 0 && rel->relid < root->simple_rel_array_size);
1515
1516 do
1517 {
1518 AppendRelInfo *appinfo = root->append_rel_array[rel->relid];
1519 Index prelid = appinfo->parent_relid;
1520
1521 result = bms_add_member(result, prelid);
1522
1523 /* traverse up to the parent rel, loop if it's also a child rel */
1524 rel = find_base_rel(root, prelid);
1525 } while (rel->reloptkind == RELOPT_OTHER_MEMBER_REL);
1526
1528
1529 return result;
1530}
Bitmapset * bms_add_member(Bitmapset *a, int x)
Definition: bitmapset.c:815
unsigned int Index
Definition: c.h:585
RelOptInfo * find_base_rel(PlannerInfo *root, int relid)
Definition: relnode.c:414
Index parent_relid
Definition: pathnodes.h:3110

References Assert(), bms_add_member(), find_base_rel(), AppendRelInfo::parent_relid, RelOptInfo::relid, RELOPT_BASEREL, RELOPT_OTHER_MEMBER_REL, RelOptInfo::reloptkind, and root.

Referenced by check_index_predicates(), and generate_implied_equalities_for_column().

◆ find_join_rel()

RelOptInfo * find_join_rel ( PlannerInfo root,
Relids  relids 
)

Definition at line 527 of file relnode.c.

528{
529 /*
530 * Switch to using hash lookup when list grows "too long". The threshold
531 * is arbitrary and is known only here.
532 */
533 if (!root->join_rel_hash && list_length(root->join_rel_list) > 32)
535
536 /*
537 * Use either hashtable lookup or linear search, as appropriate.
538 *
539 * Note: the seemingly redundant hashkey variable is used to avoid taking
540 * the address of relids; unless the compiler is exceedingly smart, doing
541 * so would force relids out of a register and thus probably slow down the
542 * list-search case.
543 */
544 if (root->join_rel_hash)
545 {
546 Relids hashkey = relids;
547 JoinHashEntry *hentry;
548
549 hentry = (JoinHashEntry *) hash_search(root->join_rel_hash,
550 &hashkey,
551 HASH_FIND,
552 NULL);
553 if (hentry)
554 return hentry->join_rel;
555 }
556 else
557 {
558 ListCell *l;
559
560 foreach(l, root->join_rel_list)
561 {
562 RelOptInfo *rel = (RelOptInfo *) lfirst(l);
563
564 if (bms_equal(rel->relids, relids))
565 return rel;
566 }
567 }
568
569 return NULL;
570}
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:956
@ HASH_FIND
Definition: hsearch.h:113
static void build_join_rel_hash(PlannerInfo *root)
Definition: relnode.c:486
RelOptInfo * join_rel
Definition: relnode.c:41

References bms_equal(), build_join_rel_hash(), HASH_FIND, hash_search(), JoinHashEntry::join_rel, lfirst, list_length(), RelOptInfo::relids, and root.

Referenced by build_child_join_rel(), build_join_rel(), examine_variable(), find_join_input_rel(), get_matching_part_pairs(), and postgresPlanDirectModify().

◆ find_param_path_info()

ParamPathInfo * find_param_path_info ( RelOptInfo rel,
Relids  required_outer 
)

Definition at line 1889 of file relnode.c.

1890{
1891 ListCell *lc;
1892
1893 foreach(lc, rel->ppilist)
1894 {
1895 ParamPathInfo *ppi = (ParamPathInfo *) lfirst(lc);
1896
1897 if (bms_equal(ppi->ppi_req_outer, required_outer))
1898 return ppi;
1899 }
1900
1901 return NULL;
1902}
Relids ppi_req_outer
Definition: pathnodes.h:1712

References bms_equal(), lfirst, ParamPathInfo::ppi_req_outer, and RelOptInfo::ppilist.

Referenced by get_appendrel_parampathinfo(), get_baserel_parampathinfo(), get_joinrel_parampathinfo(), and reparameterize_path_by_child().

◆ get_appendrel_parampathinfo()

ParamPathInfo * get_appendrel_parampathinfo ( RelOptInfo appendrel,
Relids  required_outer 
)

Definition at line 1856 of file relnode.c.

1857{
1858 ParamPathInfo *ppi;
1859
1860 /* If rel has LATERAL refs, every path for it should account for them */
1861 Assert(bms_is_subset(appendrel->lateral_relids, required_outer));
1862
1863 /* Unparameterized paths have no ParamPathInfo */
1864 if (bms_is_empty(required_outer))
1865 return NULL;
1866
1867 Assert(!bms_overlap(appendrel->relids, required_outer));
1868
1869 /* If we already have a PPI for this parameterization, just return it */
1870 if ((ppi = find_param_path_info(appendrel, required_outer)))
1871 return ppi;
1872
1873 /* Else build the ParamPathInfo */
1874 ppi = makeNode(ParamPathInfo);
1875 ppi->ppi_req_outer = required_outer;
1876 ppi->ppi_rows = 0;
1877 ppi->ppi_clauses = NIL;
1878 ppi->ppi_serials = NULL;
1879 appendrel->ppilist = lappend(appendrel->ppilist, ppi);
1880
1881 return ppi;
1882}
bool bms_is_subset(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:412
ParamPathInfo * find_param_path_info(RelOptInfo *rel, Relids required_outer)
Definition: relnode.c:1889
Cardinality ppi_rows
Definition: pathnodes.h:1713
List * ppi_clauses
Definition: pathnodes.h:1714
Bitmapset * ppi_serials
Definition: pathnodes.h:1715

References Assert(), bms_is_empty, bms_is_subset(), bms_overlap(), find_param_path_info(), lappend(), RelOptInfo::lateral_relids, makeNode, NIL, ParamPathInfo::ppi_clauses, ParamPathInfo::ppi_req_outer, ParamPathInfo::ppi_rows, ParamPathInfo::ppi_serials, RelOptInfo::ppilist, and RelOptInfo::relids.

Referenced by create_append_path().

◆ get_baserel_parampathinfo()

ParamPathInfo * get_baserel_parampathinfo ( PlannerInfo root,
RelOptInfo baserel,
Relids  required_outer 
)

Definition at line 1545 of file relnode.c.

1547{
1548 ParamPathInfo *ppi;
1549 Relids joinrelids;
1550 List *pclauses;
1551 List *eqclauses;
1552 Bitmapset *pserials;
1553 double rows;
1554 ListCell *lc;
1555
1556 /* If rel has LATERAL refs, every path for it should account for them */
1557 Assert(bms_is_subset(baserel->lateral_relids, required_outer));
1558
1559 /* Unparameterized paths have no ParamPathInfo */
1560 if (bms_is_empty(required_outer))
1561 return NULL;
1562
1563 Assert(!bms_overlap(baserel->relids, required_outer));
1564
1565 /* If we already have a PPI for this parameterization, just return it */
1566 if ((ppi = find_param_path_info(baserel, required_outer)))
1567 return ppi;
1568
1569 /*
1570 * Identify all joinclauses that are movable to this base rel given this
1571 * parameterization.
1572 */
1573 joinrelids = bms_union(baserel->relids, required_outer);
1574 pclauses = NIL;
1575 foreach(lc, baserel->joininfo)
1576 {
1577 RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
1578
1580 baserel->relids,
1581 joinrelids))
1582 pclauses = lappend(pclauses, rinfo);
1583 }
1584
1585 /*
1586 * Add in joinclauses generated by EquivalenceClasses, too. (These
1587 * necessarily satisfy join_clause_is_movable_into; but in assert-enabled
1588 * builds, let's verify that.)
1589 */
1591 joinrelids,
1592 required_outer,
1593 baserel,
1594 NULL);
1595#ifdef USE_ASSERT_CHECKING
1596 foreach(lc, eqclauses)
1597 {
1598 RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
1599
1601 baserel->relids,
1602 joinrelids));
1603 }
1604#endif
1605 pclauses = list_concat(pclauses, eqclauses);
1606
1607 /* Compute set of serial numbers of the enforced clauses */
1608 pserials = NULL;
1609 foreach(lc, pclauses)
1610 {
1611 RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
1612
1613 pserials = bms_add_member(pserials, rinfo->rinfo_serial);
1614 }
1615
1616 /* Estimate the number of rows returned by the parameterized scan */
1617 rows = get_parameterized_baserel_size(root, baserel, pclauses);
1618
1619 /* And now we can build the ParamPathInfo */
1620 ppi = makeNode(ParamPathInfo);
1621 ppi->ppi_req_outer = required_outer;
1622 ppi->ppi_rows = rows;
1623 ppi->ppi_clauses = pclauses;
1624 ppi->ppi_serials = pserials;
1625 baserel->ppilist = lappend(baserel->ppilist, ppi);
1626
1627 return ppi;
1628}
double get_parameterized_baserel_size(PlannerInfo *root, RelOptInfo *rel, List *param_clauses)
Definition: costsize.c:5365
List * generate_join_implied_equalities(PlannerInfo *root, Relids join_relids, Relids outer_relids, RelOptInfo *inner_rel, SpecialJoinInfo *sjinfo)
Definition: equivclass.c:1550
bool join_clause_is_movable_into(RestrictInfo *rinfo, Relids currentrelids, Relids current_and_outer)
Definition: restrictinfo.c:661

References Assert(), bms_add_member(), bms_is_empty, bms_is_subset(), bms_overlap(), bms_union(), find_param_path_info(), generate_join_implied_equalities(), get_parameterized_baserel_size(), join_clause_is_movable_into(), RelOptInfo::joininfo, lappend(), RelOptInfo::lateral_relids, lfirst, list_concat(), makeNode, NIL, ParamPathInfo::ppi_clauses, ParamPathInfo::ppi_req_outer, ParamPathInfo::ppi_rows, ParamPathInfo::ppi_serials, RelOptInfo::ppilist, RelOptInfo::relids, RestrictInfo::rinfo_serial, and root.

Referenced by create_append_path(), create_bitmap_and_path(), create_bitmap_heap_path(), create_bitmap_or_path(), create_ctescan_path(), create_foreignscan_path(), create_functionscan_path(), create_gather_merge_path(), create_gather_path(), create_index_path(), create_namedtuplestorescan_path(), create_resultscan_path(), create_samplescan_path(), create_seqscan_path(), create_subqueryscan_path(), create_tablefuncscan_path(), create_tidrangescan_path(), create_tidscan_path(), create_valuesscan_path(), create_worktablescan_path(), postgresGetForeignPaths(), and reparameterize_path().

◆ get_joinrel_parampathinfo()

ParamPathInfo * get_joinrel_parampathinfo ( PlannerInfo root,
RelOptInfo joinrel,
Path outer_path,
Path inner_path,
SpecialJoinInfo sjinfo,
Relids  required_outer,
List **  restrict_clauses 
)

Definition at line 1659 of file relnode.c.

1665{
1666 ParamPathInfo *ppi;
1667 Relids join_and_req;
1668 Relids outer_and_req;
1669 Relids inner_and_req;
1670 List *pclauses;
1671 List *eclauses;
1672 List *dropped_ecs;
1673 double rows;
1674 ListCell *lc;
1675
1676 /* If rel has LATERAL refs, every path for it should account for them */
1677 Assert(bms_is_subset(joinrel->lateral_relids, required_outer));
1678
1679 /* Unparameterized paths have no ParamPathInfo or extra join clauses */
1680 if (bms_is_empty(required_outer))
1681 return NULL;
1682
1683 Assert(!bms_overlap(joinrel->relids, required_outer));
1684
1685 /*
1686 * Identify all joinclauses that are movable to this join rel given this
1687 * parameterization. These are the clauses that are movable into this
1688 * join, but not movable into either input path. Treat an unparameterized
1689 * input path as not accepting parameterized clauses (because it won't,
1690 * per the shortcut exit above), even though the joinclause movement rules
1691 * might allow the same clauses to be moved into a parameterized path for
1692 * that rel.
1693 */
1694 join_and_req = bms_union(joinrel->relids, required_outer);
1695 if (outer_path->param_info)
1696 outer_and_req = bms_union(outer_path->parent->relids,
1697 PATH_REQ_OUTER(outer_path));
1698 else
1699 outer_and_req = NULL; /* outer path does not accept parameters */
1700 if (inner_path->param_info)
1701 inner_and_req = bms_union(inner_path->parent->relids,
1702 PATH_REQ_OUTER(inner_path));
1703 else
1704 inner_and_req = NULL; /* inner path does not accept parameters */
1705
1706 pclauses = NIL;
1707 foreach(lc, joinrel->joininfo)
1708 {
1709 RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
1710
1712 joinrel->relids,
1713 join_and_req) &&
1715 outer_path->parent->relids,
1716 outer_and_req) &&
1718 inner_path->parent->relids,
1719 inner_and_req))
1720 pclauses = lappend(pclauses, rinfo);
1721 }
1722
1723 /* Consider joinclauses generated by EquivalenceClasses, too */
1725 join_and_req,
1726 required_outer,
1727 joinrel,
1728 NULL);
1729 /* We only want ones that aren't movable to lower levels */
1730 dropped_ecs = NIL;
1731 foreach(lc, eclauses)
1732 {
1733 RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
1734
1736 joinrel->relids,
1737 join_and_req));
1739 outer_path->parent->relids,
1740 outer_and_req))
1741 continue; /* drop if movable into LHS */
1743 inner_path->parent->relids,
1744 inner_and_req))
1745 {
1746 /* drop if movable into RHS, but remember EC for use below */
1747 Assert(rinfo->left_ec == rinfo->right_ec);
1748 dropped_ecs = lappend(dropped_ecs, rinfo->left_ec);
1749 continue;
1750 }
1751 pclauses = lappend(pclauses, rinfo);
1752 }
1753
1754 /*
1755 * EquivalenceClasses are harder to deal with than we could wish, because
1756 * of the fact that a given EC can generate different clauses depending on
1757 * context. Suppose we have an EC {X.X, Y.Y, Z.Z} where X and Y are the
1758 * LHS and RHS of the current join and Z is in required_outer, and further
1759 * suppose that the inner_path is parameterized by both X and Z. The code
1760 * above will have produced either Z.Z = X.X or Z.Z = Y.Y from that EC,
1761 * and in the latter case will have discarded it as being movable into the
1762 * RHS. However, the EC machinery might have produced either Y.Y = X.X or
1763 * Y.Y = Z.Z as the EC enforcement clause within the inner_path; it will
1764 * not have produced both, and we can't readily tell from here which one
1765 * it did pick. If we add no clause to this join, we'll end up with
1766 * insufficient enforcement of the EC; either Z.Z or X.X will fail to be
1767 * constrained to be equal to the other members of the EC. (When we come
1768 * to join Z to this X/Y path, we will certainly drop whichever EC clause
1769 * is generated at that join, so this omission won't get fixed later.)
1770 *
1771 * To handle this, for each EC we discarded such a clause from, try to
1772 * generate a clause connecting the required_outer rels to the join's LHS
1773 * ("Z.Z = X.X" in the terms of the above example). If successful, and if
1774 * the clause can't be moved to the LHS, add it to the current join's
1775 * restriction clauses. (If an EC cannot generate such a clause then it
1776 * has nothing that needs to be enforced here, while if the clause can be
1777 * moved into the LHS then it should have been enforced within that path.)
1778 *
1779 * Note that we don't need similar processing for ECs whose clause was
1780 * considered to be movable into the LHS, because the LHS can't refer to
1781 * the RHS so there is no comparable ambiguity about what it might
1782 * actually be enforcing internally.
1783 */
1784 if (dropped_ecs)
1785 {
1786 Relids real_outer_and_req;
1787
1788 real_outer_and_req = bms_union(outer_path->parent->relids,
1789 required_outer);
1790 eclauses =
1792 dropped_ecs,
1793 real_outer_and_req,
1794 required_outer,
1795 outer_path->parent);
1796 foreach(lc, eclauses)
1797 {
1798 RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
1799
1801 outer_path->parent->relids,
1802 real_outer_and_req));
1803 if (!join_clause_is_movable_into(rinfo,
1804 outer_path->parent->relids,
1805 outer_and_req))
1806 pclauses = lappend(pclauses, rinfo);
1807 }
1808 }
1809
1810 /*
1811 * Now, attach the identified moved-down clauses to the caller's
1812 * restrict_clauses list. By using list_concat in this order, we leave
1813 * the original list structure of restrict_clauses undamaged.
1814 */
1815 *restrict_clauses = list_concat(pclauses, *restrict_clauses);
1816
1817 /* If we already have a PPI for this parameterization, just return it */
1818 if ((ppi = find_param_path_info(joinrel, required_outer)))
1819 return ppi;
1820
1821 /* Estimate the number of rows returned by the parameterized join */
1822 rows = get_parameterized_joinrel_size(root, joinrel,
1823 outer_path,
1824 inner_path,
1825 sjinfo,
1826 *restrict_clauses);
1827
1828 /*
1829 * And now we can build the ParamPathInfo. No point in saving the
1830 * input-pair-dependent clause list, though.
1831 *
1832 * Note: in GEQO mode, we'll be called in a temporary memory context, but
1833 * the joinrel structure is there too, so no problem.
1834 */
1835 ppi = makeNode(ParamPathInfo);
1836 ppi->ppi_req_outer = required_outer;
1837 ppi->ppi_rows = rows;
1838 ppi->ppi_clauses = NIL;
1839 ppi->ppi_serials = NULL;
1840 joinrel->ppilist = lappend(joinrel->ppilist, ppi);
1841
1842 return ppi;
1843}
double get_parameterized_joinrel_size(PlannerInfo *root, RelOptInfo *rel, Path *outer_path, Path *inner_path, SpecialJoinInfo *sjinfo, List *restrict_clauses)
Definition: costsize.c:5446
List * generate_join_implied_equalities_for_ecs(PlannerInfo *root, List *eclasses, Relids join_relids, Relids outer_relids, RelOptInfo *inner_rel)
Definition: equivclass.c:1650

References Assert(), bms_is_empty, bms_is_subset(), bms_overlap(), bms_union(), find_param_path_info(), generate_join_implied_equalities(), generate_join_implied_equalities_for_ecs(), get_parameterized_joinrel_size(), join_clause_is_movable_into(), RelOptInfo::joininfo, lappend(), RelOptInfo::lateral_relids, lfirst, list_concat(), makeNode, NIL, PATH_REQ_OUTER, ParamPathInfo::ppi_clauses, ParamPathInfo::ppi_req_outer, ParamPathInfo::ppi_rows, ParamPathInfo::ppi_serials, RelOptInfo::ppilist, RelOptInfo::relids, and root.

Referenced by create_hashjoin_path(), create_mergejoin_path(), and create_nestloop_path().

◆ get_param_path_clause_serials()

Bitmapset * get_param_path_clause_serials ( Path path)

Definition at line 1910 of file relnode.c.

1911{
1912 if (path->param_info == NULL)
1913 return NULL; /* not parameterized */
1914
1915 /*
1916 * We don't currently support parameterized MergeAppend paths, as
1917 * explained in the comments for generate_orderedappend_paths.
1918 */
1919 Assert(!IsA(path, MergeAppendPath));
1920
1921 if (IsA(path, NestPath) ||
1922 IsA(path, MergePath) ||
1923 IsA(path, HashPath))
1924 {
1925 /*
1926 * For a join path, combine clauses enforced within either input path
1927 * with those enforced as joinrestrictinfo in this path. Note that
1928 * joinrestrictinfo may include some non-pushed-down clauses, but for
1929 * current purposes it's okay if we include those in the result. (To
1930 * be more careful, we could check for clause_relids overlapping the
1931 * path parameterization, but it's not worth the cycles for now.)
1932 */
1933 JoinPath *jpath = (JoinPath *) path;
1934 Bitmapset *pserials;
1935 ListCell *lc;
1936
1937 pserials = NULL;
1938 pserials = bms_add_members(pserials,
1940 pserials = bms_add_members(pserials,
1942 foreach(lc, jpath->joinrestrictinfo)
1943 {
1944 RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
1945
1946 pserials = bms_add_member(pserials, rinfo->rinfo_serial);
1947 }
1948 return pserials;
1949 }
1950 else if (IsA(path, AppendPath))
1951 {
1952 /*
1953 * For an appendrel, take the intersection of the sets of clauses
1954 * enforced in each input path.
1955 */
1956 AppendPath *apath = (AppendPath *) path;
1957 Bitmapset *pserials;
1958 ListCell *lc;
1959
1960 pserials = NULL;
1961 foreach(lc, apath->subpaths)
1962 {
1963 Path *subpath = (Path *) lfirst(lc);
1964 Bitmapset *subserials;
1965
1967 if (lc == list_head(apath->subpaths))
1968 pserials = bms_copy(subserials);
1969 else
1970 pserials = bms_int_members(pserials, subserials);
1971 }
1972 return pserials;
1973 }
1974 else
1975 {
1976 /*
1977 * Otherwise, it's a baserel path and we can use the
1978 * previously-computed set of serial numbers.
1979 */
1980 return path->param_info->ppi_serials;
1981 }
1982}
Bitmapset * bms_int_members(Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:1109
static ListCell * list_head(const List *l)
Definition: pg_list.h:128

References Assert(), bms_add_member(), bms_add_members(), bms_copy(), bms_int_members(), get_param_path_clause_serials(), JoinPath::innerjoinpath, IsA, JoinPath::joinrestrictinfo, lfirst, list_head(), JoinPath::outerjoinpath, RestrictInfo::rinfo_serial, subpath(), and AppendPath::subpaths.

Referenced by create_nestloop_path(), and get_param_path_clause_serials().

◆ min_join_parameterization()

Relids min_join_parameterization ( PlannerInfo root,
Relids  joinrelids,
RelOptInfo outer_rel,
RelOptInfo inner_rel 
)

Definition at line 1022 of file relnode.c.

1026{
1027 Relids result;
1028
1029 /*
1030 * Basically we just need the union of the inputs' lateral_relids, less
1031 * whatever is already in the join.
1032 *
1033 * It's not immediately obvious that this is a valid way to compute the
1034 * result, because it might seem that we're ignoring possible lateral refs
1035 * of PlaceHolderVars that are due to be computed at the join but not in
1036 * either input. However, because create_lateral_join_info() already
1037 * charged all such PHV refs to each member baserel of the join, they'll
1038 * be accounted for already in the inputs' lateral_relids. Likewise, we
1039 * do not need to worry about doing transitive closure here, because that
1040 * was already accounted for in the original baserel lateral_relids.
1041 */
1042 result = bms_union(outer_rel->lateral_relids, inner_rel->lateral_relids);
1043 result = bms_del_members(result, joinrelids);
1044 return result;
1045}

References bms_del_members(), bms_union(), and RelOptInfo::lateral_relids.

Referenced by build_join_rel(), and join_is_legal().

◆ path_is_reparameterizable_by_child()

bool path_is_reparameterizable_by_child ( Path path,
RelOptInfo child_rel 
)

Definition at line 4568 of file pathnode.c.

4569{
4570#define REJECT_IF_PATH_NOT_REPARAMETERIZABLE(path) \
4571do { \
4572 if (!path_is_reparameterizable_by_child(path, child_rel)) \
4573 return false; \
4574} while(0)
4575
4576#define REJECT_IF_PATH_LIST_NOT_REPARAMETERIZABLE(pathlist) \
4577do { \
4578 if (!pathlist_is_reparameterizable_by_child(pathlist, child_rel)) \
4579 return false; \
4580} while(0)
4581
4582 /*
4583 * If the path is not parameterized by the parent of the given relation,
4584 * it doesn't need reparameterization.
4585 */
4586 if (!path->param_info ||
4587 !bms_overlap(PATH_REQ_OUTER(path), child_rel->top_parent_relids))
4588 return true;
4589
4590 /*
4591 * Check that the path type is one that reparameterize_path_by_child() can
4592 * handle, and recursively check subpaths.
4593 */
4594 switch (nodeTag(path))
4595 {
4596 case T_Path:
4597 case T_IndexPath:
4598 break;
4599
4600 case T_BitmapHeapPath:
4601 {
4602 BitmapHeapPath *bhpath = (BitmapHeapPath *) path;
4603
4605 }
4606 break;
4607
4608 case T_BitmapAndPath:
4609 {
4610 BitmapAndPath *bapath = (BitmapAndPath *) path;
4611
4613 }
4614 break;
4615
4616 case T_BitmapOrPath:
4617 {
4618 BitmapOrPath *bopath = (BitmapOrPath *) path;
4619
4621 }
4622 break;
4623
4624 case T_ForeignPath:
4625 {
4626 ForeignPath *fpath = (ForeignPath *) path;
4627
4628 if (fpath->fdw_outerpath)
4630 }
4631 break;
4632
4633 case T_CustomPath:
4634 {
4635 CustomPath *cpath = (CustomPath *) path;
4636
4638 }
4639 break;
4640
4641 case T_NestPath:
4642 case T_MergePath:
4643 case T_HashPath:
4644 {
4645 JoinPath *jpath = (JoinPath *) path;
4646
4649 }
4650 break;
4651
4652 case T_AppendPath:
4653 {
4654 AppendPath *apath = (AppendPath *) path;
4655
4657 }
4658 break;
4659
4660 case T_MaterialPath:
4661 {
4662 MaterialPath *mpath = (MaterialPath *) path;
4663
4665 }
4666 break;
4667
4668 case T_MemoizePath:
4669 {
4670 MemoizePath *mpath = (MemoizePath *) path;
4671
4673 }
4674 break;
4675
4676 case T_GatherPath:
4677 {
4678 GatherPath *gpath = (GatherPath *) path;
4679
4681 }
4682 break;
4683
4684 default:
4685 /* We don't know how to reparameterize this path. */
4686 return false;
4687 }
4688
4689 return true;
4690}
#define nodeTag(nodeptr)
Definition: nodes.h:139
#define REJECT_IF_PATH_LIST_NOT_REPARAMETERIZABLE(pathlist)
#define REJECT_IF_PATH_NOT_REPARAMETERIZABLE(path)
List * custom_paths
Definition: pathnodes.h:2042

References BitmapHeapPath::bitmapqual, BitmapAndPath::bitmapquals, BitmapOrPath::bitmapquals, bms_overlap(), CustomPath::custom_paths, ForeignPath::fdw_outerpath, JoinPath::innerjoinpath, nodeTag, JoinPath::outerjoinpath, PATH_REQ_OUTER, REJECT_IF_PATH_LIST_NOT_REPARAMETERIZABLE, REJECT_IF_PATH_NOT_REPARAMETERIZABLE, MaterialPath::subpath, MemoizePath::subpath, GatherPath::subpath, AppendPath::subpaths, and RelOptInfo::top_parent_relids.

Referenced by pathlist_is_reparameterizable_by_child(), try_nestloop_path(), and try_partial_nestloop_path().

◆ reparameterize_path()

Path * reparameterize_path ( PlannerInfo root,
Path path,
Relids  required_outer,
double  loop_count 
)

Definition at line 4106 of file pathnode.c.

4109{
4110 RelOptInfo *rel = path->parent;
4111
4112 /* Can only increase, not decrease, path's parameterization */
4113 if (!bms_is_subset(PATH_REQ_OUTER(path), required_outer))
4114 return NULL;
4115 switch (path->pathtype)
4116 {
4117 case T_SeqScan:
4118 return create_seqscan_path(root, rel, required_outer, 0);
4119 case T_SampleScan:
4120 return (Path *) create_samplescan_path(root, rel, required_outer);
4121 case T_IndexScan:
4122 case T_IndexOnlyScan:
4123 {
4124 IndexPath *ipath = (IndexPath *) path;
4125 IndexPath *newpath = makeNode(IndexPath);
4126
4127 /*
4128 * We can't use create_index_path directly, and would not want
4129 * to because it would re-compute the indexqual conditions
4130 * which is wasted effort. Instead we hack things a bit:
4131 * flat-copy the path node, revise its param_info, and redo
4132 * the cost estimate.
4133 */
4134 memcpy(newpath, ipath, sizeof(IndexPath));
4135 newpath->path.param_info =
4136 get_baserel_parampathinfo(root, rel, required_outer);
4137 cost_index(newpath, root, loop_count, false);
4138 return (Path *) newpath;
4139 }
4140 case T_BitmapHeapScan:
4141 {
4142 BitmapHeapPath *bpath = (BitmapHeapPath *) path;
4143
4145 rel,
4146 bpath->bitmapqual,
4147 required_outer,
4148 loop_count, 0);
4149 }
4150 case T_SubqueryScan:
4151 {
4152 SubqueryScanPath *spath = (SubqueryScanPath *) path;
4153 Path *subpath = spath->subpath;
4154 bool trivial_pathtarget;
4155
4156 /*
4157 * If existing node has zero extra cost, we must have decided
4158 * its target is trivial. (The converse is not true, because
4159 * it might have a trivial target but quals to enforce; but in
4160 * that case the new node will too, so it doesn't matter
4161 * whether we get the right answer here.)
4162 */
4163 trivial_pathtarget =
4164 (subpath->total_cost == spath->path.total_cost);
4165
4167 rel,
4168 subpath,
4169 trivial_pathtarget,
4170 spath->path.pathkeys,
4171 required_outer);
4172 }
4173 case T_Result:
4174 /* Supported only for RTE_RESULT scan paths */
4175 if (IsA(path, Path))
4176 return create_resultscan_path(root, rel, required_outer);
4177 break;
4178 case T_Append:
4179 {
4180 AppendPath *apath = (AppendPath *) path;
4181 List *childpaths = NIL;
4182 List *partialpaths = NIL;
4183 int i;
4184 ListCell *lc;
4185
4186 /* Reparameterize the children */
4187 i = 0;
4188 foreach(lc, apath->subpaths)
4189 {
4190 Path *spath = (Path *) lfirst(lc);
4191
4192 spath = reparameterize_path(root, spath,
4193 required_outer,
4194 loop_count);
4195 if (spath == NULL)
4196 return NULL;
4197 /* We have to re-split the regular and partial paths */
4198 if (i < apath->first_partial_path)
4199 childpaths = lappend(childpaths, spath);
4200 else
4201 partialpaths = lappend(partialpaths, spath);
4202 i++;
4203 }
4204 return (Path *)
4205 create_append_path(root, rel, childpaths, partialpaths,
4206 apath->path.pathkeys, required_outer,
4207 apath->path.parallel_workers,
4208 apath->path.parallel_aware,
4209 -1);
4210 }
4211 case T_Material:
4212 {
4213 MaterialPath *mpath = (MaterialPath *) path;
4214 Path *spath = mpath->subpath;
4215
4216 spath = reparameterize_path(root, spath,
4217 required_outer,
4218 loop_count);
4219 if (spath == NULL)
4220 return NULL;
4221 return (Path *) create_material_path(rel, spath);
4222 }
4223 case T_Memoize:
4224 {
4225 MemoizePath *mpath = (MemoizePath *) path;
4226 Path *spath = mpath->subpath;
4227
4228 spath = reparameterize_path(root, spath,
4229 required_outer,
4230 loop_count);
4231 if (spath == NULL)
4232 return NULL;
4233 return (Path *) create_memoize_path(root, rel,
4234 spath,
4235 mpath->param_exprs,
4236 mpath->hash_operators,
4237 mpath->singlerow,
4238 mpath->binary_mode,
4239 mpath->calls);
4240 }
4241 default:
4242 break;
4243 }
4244 return NULL;
4245}
int i
Definition: isn.c:77
MemoizePath * create_memoize_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *param_exprs, List *hash_operators, bool singlerow, bool binary_mode, double calls)
Definition: pathnode.c:1667
Path * create_seqscan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer, int parallel_workers)
Definition: pathnode.c:983
AppendPath * create_append_path(PlannerInfo *root, RelOptInfo *rel, List *subpaths, List *partial_subpaths, List *pathkeys, Relids required_outer, int parallel_workers, bool parallel_aware, double rows)
Definition: pathnode.c:1300
SubqueryScanPath * create_subqueryscan_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, bool trivial_pathtarget, List *pathkeys, Relids required_outer)
Definition: pathnode.c:2088
BitmapHeapPath * create_bitmap_heap_path(PlannerInfo *root, RelOptInfo *rel, Path *bitmapqual, Relids required_outer, double loop_count, int parallel_degree)
Definition: pathnode.c:1098
Path * create_samplescan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition: pathnode.c:1008
MaterialPath * create_material_path(RelOptInfo *rel, Path *subpath)
Definition: pathnode.c:1634
Path * create_resultscan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition: pathnode.c:2248
Path * reparameterize_path(PlannerInfo *root, Path *path, Relids required_outer, double loop_count)
Definition: pathnode.c:4106

References MemoizePath::binary_mode, BitmapHeapPath::bitmapqual, bms_is_subset(), MemoizePath::calls, cost_index(), create_append_path(), create_bitmap_heap_path(), create_material_path(), create_memoize_path(), create_resultscan_path(), create_samplescan_path(), create_seqscan_path(), create_subqueryscan_path(), get_baserel_parampathinfo(), MemoizePath::hash_operators, i, IsA, lappend(), lfirst, makeNode, NIL, Path::parallel_aware, Path::parallel_workers, MemoizePath::param_exprs, IndexPath::path, SubqueryScanPath::path, AppendPath::path, PATH_REQ_OUTER, Path::pathkeys, Path::pathtype, reparameterize_path(), root, MemoizePath::singlerow, subpath(), SubqueryScanPath::subpath, MaterialPath::subpath, MemoizePath::subpath, AppendPath::subpaths, and Path::total_cost.

Referenced by get_cheapest_parameterized_child_path(), and reparameterize_path().

◆ reparameterize_path_by_child()

Path * reparameterize_path_by_child ( PlannerInfo root,
Path path,
RelOptInfo child_rel 
)

Definition at line 4272 of file pathnode.c.

4274{
4275 Path *new_path;
4276 ParamPathInfo *new_ppi;
4277 ParamPathInfo *old_ppi;
4278 Relids required_outer;
4279
4280#define ADJUST_CHILD_ATTRS(node) \
4281 ((node) = (void *) adjust_appendrel_attrs_multilevel(root, \
4282 (Node *) (node), \
4283 child_rel, \
4284 child_rel->top_parent))
4285
4286#define REPARAMETERIZE_CHILD_PATH(path) \
4287do { \
4288 (path) = reparameterize_path_by_child(root, (path), child_rel); \
4289 if ((path) == NULL) \
4290 return NULL; \
4291} while(0)
4292
4293#define REPARAMETERIZE_CHILD_PATH_LIST(pathlist) \
4294do { \
4295 if ((pathlist) != NIL) \
4296 { \
4297 (pathlist) = reparameterize_pathlist_by_child(root, (pathlist), \
4298 child_rel); \
4299 if ((pathlist) == NIL) \
4300 return NULL; \
4301 } \
4302} while(0)
4303
4304 /*
4305 * If the path is not parameterized by the parent of the given relation,
4306 * it doesn't need reparameterization.
4307 */
4308 if (!path->param_info ||
4309 !bms_overlap(PATH_REQ_OUTER(path), child_rel->top_parent_relids))
4310 return path;
4311
4312 /*
4313 * If possible, reparameterize the given path.
4314 *
4315 * This function is currently only applied to the inner side of a nestloop
4316 * join that is being partitioned by the partitionwise-join code. Hence,
4317 * we need only support path types that plausibly arise in that context.
4318 * (In particular, supporting sorted path types would be a waste of code
4319 * and cycles: even if we translated them here, they'd just lose in
4320 * subsequent cost comparisons.) If we do see an unsupported path type,
4321 * that just means we won't be able to generate a partitionwise-join plan
4322 * using that path type.
4323 */
4324 switch (nodeTag(path))
4325 {
4326 case T_Path:
4327 new_path = path;
4328 ADJUST_CHILD_ATTRS(new_path->parent->baserestrictinfo);
4329 if (path->pathtype == T_SampleScan)
4330 {
4331 Index scan_relid = path->parent->relid;
4332 RangeTblEntry *rte;
4333
4334 /* it should be a base rel with a tablesample clause... */
4335 Assert(scan_relid > 0);
4336 rte = planner_rt_fetch(scan_relid, root);
4337 Assert(rte->rtekind == RTE_RELATION);
4338 Assert(rte->tablesample != NULL);
4339
4341 }
4342 break;
4343
4344 case T_IndexPath:
4345 {
4346 IndexPath *ipath = (IndexPath *) path;
4347
4350 new_path = (Path *) ipath;
4351 }
4352 break;
4353
4354 case T_BitmapHeapPath:
4355 {
4356 BitmapHeapPath *bhpath = (BitmapHeapPath *) path;
4357
4358 ADJUST_CHILD_ATTRS(bhpath->path.parent->baserestrictinfo);
4360 new_path = (Path *) bhpath;
4361 }
4362 break;
4363
4364 case T_BitmapAndPath:
4365 {
4366 BitmapAndPath *bapath = (BitmapAndPath *) path;
4367
4369 new_path = (Path *) bapath;
4370 }
4371 break;
4372
4373 case T_BitmapOrPath:
4374 {
4375 BitmapOrPath *bopath = (BitmapOrPath *) path;
4376
4378 new_path = (Path *) bopath;
4379 }
4380 break;
4381
4382 case T_ForeignPath:
4383 {
4384 ForeignPath *fpath = (ForeignPath *) path;
4386
4387 ADJUST_CHILD_ATTRS(fpath->path.parent->baserestrictinfo);
4388 if (fpath->fdw_outerpath)
4390 if (fpath->fdw_restrictinfo)
4392
4393 /* Hand over to FDW if needed. */
4394 rfpc_func =
4395 path->parent->fdwroutine->ReparameterizeForeignPathByChild;
4396 if (rfpc_func)
4397 fpath->fdw_private = rfpc_func(root, fpath->fdw_private,
4398 child_rel);
4399 new_path = (Path *) fpath;
4400 }
4401 break;
4402
4403 case T_CustomPath:
4404 {
4405 CustomPath *cpath = (CustomPath *) path;
4406
4407 ADJUST_CHILD_ATTRS(cpath->path.parent->baserestrictinfo);
4409 if (cpath->custom_restrictinfo)
4411 if (cpath->methods &&
4413 cpath->custom_private =
4415 cpath->custom_private,
4416 child_rel);
4417 new_path = (Path *) cpath;
4418 }
4419 break;
4420
4421 case T_NestPath:
4422 {
4423 NestPath *npath = (NestPath *) path;
4424 JoinPath *jpath = (JoinPath *) npath;
4425
4429 new_path = (Path *) npath;
4430 }
4431 break;
4432
4433 case T_MergePath:
4434 {
4435 MergePath *mpath = (MergePath *) path;
4436 JoinPath *jpath = (JoinPath *) mpath;
4437
4442 new_path = (Path *) mpath;
4443 }
4444 break;
4445
4446 case T_HashPath:
4447 {
4448 HashPath *hpath = (HashPath *) path;
4449 JoinPath *jpath = (JoinPath *) hpath;
4450
4455 new_path = (Path *) hpath;
4456 }
4457 break;
4458
4459 case T_AppendPath:
4460 {
4461 AppendPath *apath = (AppendPath *) path;
4462
4464 new_path = (Path *) apath;
4465 }
4466 break;
4467
4468 case T_MaterialPath:
4469 {
4470 MaterialPath *mpath = (MaterialPath *) path;
4471
4473 new_path = (Path *) mpath;
4474 }
4475 break;
4476
4477 case T_MemoizePath:
4478 {
4479 MemoizePath *mpath = (MemoizePath *) path;
4480
4483 new_path = (Path *) mpath;
4484 }
4485 break;
4486
4487 case T_GatherPath:
4488 {
4489 GatherPath *gpath = (GatherPath *) path;
4490
4492 new_path = (Path *) gpath;
4493 }
4494 break;
4495
4496 default:
4497 /* We don't know how to reparameterize this path. */
4498 return NULL;
4499 }
4500
4501 /*
4502 * Adjust the parameterization information, which refers to the topmost
4503 * parent. The topmost parent can be multiple levels away from the given
4504 * child, hence use multi-level expression adjustment routines.
4505 */
4506 old_ppi = new_path->param_info;
4507 required_outer =
4509 child_rel,
4510 child_rel->top_parent);
4511
4512 /* If we already have a PPI for this parameterization, just return it */
4513 new_ppi = find_param_path_info(new_path->parent, required_outer);
4514
4515 /*
4516 * If not, build a new one and link it to the list of PPIs. For the same
4517 * reason as explained in mark_dummy_rel(), allocate new PPI in the same
4518 * context the given RelOptInfo is in.
4519 */
4520 if (new_ppi == NULL)
4521 {
4522 MemoryContext oldcontext;
4523 RelOptInfo *rel = path->parent;
4524
4526
4527 new_ppi = makeNode(ParamPathInfo);
4528 new_ppi->ppi_req_outer = bms_copy(required_outer);
4529 new_ppi->ppi_rows = old_ppi->ppi_rows;
4530 new_ppi->ppi_clauses = old_ppi->ppi_clauses;
4532 new_ppi->ppi_serials = bms_copy(old_ppi->ppi_serials);
4533 rel->ppilist = lappend(rel->ppilist, new_ppi);
4534
4535 MemoryContextSwitchTo(oldcontext);
4536 }
4537 bms_free(required_outer);
4538
4539 new_path->param_info = new_ppi;
4540
4541 /*
4542 * Adjust the path target if the parent of the outer relation is
4543 * referenced in the targetlist. This can happen when only the parent of
4544 * outer relation is laterally referenced in this relation.
4545 */
4546 if (bms_overlap(path->parent->lateral_relids,
4547 child_rel->top_parent_relids))
4548 {
4549 new_path->pathtarget = copy_pathtarget(new_path->pathtarget);
4550 ADJUST_CHILD_ATTRS(new_path->pathtarget->exprs);
4551 }
4552
4553 return new_path;
4554}
Relids adjust_child_relids_multilevel(PlannerInfo *root, Relids relids, RelOptInfo *childrel, RelOptInfo *parentrel)
Definition: appendinfo.c:608
void bms_free(Bitmapset *a)
Definition: bitmapset.c:239
List *(* ReparameterizeForeignPathByChild_function)(PlannerInfo *root, List *fdw_private, RelOptInfo *child_rel)
Definition: fdwapi.h:182
#define REPARAMETERIZE_CHILD_PATH_LIST(pathlist)
#define REPARAMETERIZE_CHILD_PATH(path)
#define ADJUST_CHILD_ATTRS(node)
struct List *(* ReparameterizeCustomPathByChild)(PlannerInfo *root, List *custom_private, RelOptInfo *child_rel)
Definition: extensible.h:103
const struct CustomPathMethods * methods
Definition: pathnodes.h:2045
List * custom_private
Definition: pathnodes.h:2044
List * custom_restrictinfo
Definition: pathnodes.h:2043
List * indrestrictinfo
Definition: pathnodes.h:1207
struct TableSampleClause * tablesample
Definition: parsenodes.h:1112
PathTarget * copy_pathtarget(PathTarget *src)
Definition: tlist.c:657

References ADJUST_CHILD_ATTRS, adjust_child_relids_multilevel(), Assert(), BitmapHeapPath::bitmapqual, BitmapAndPath::bitmapquals, BitmapOrPath::bitmapquals, bms_copy(), bms_free(), bms_overlap(), copy_pathtarget(), CustomPath::custom_paths, CustomPath::custom_private, CustomPath::custom_restrictinfo, ForeignPath::fdw_outerpath, ForeignPath::fdw_private, ForeignPath::fdw_restrictinfo, find_param_path_info(), GetMemoryChunkContext(), IndexPath::indexclauses, IndexPath::indexinfo, IndexOptInfo::indrestrictinfo, JoinPath::innerjoinpath, JoinPath::joinrestrictinfo, lappend(), makeNode, MemoryContextSwitchTo(), CustomPath::methods, nodeTag, JoinPath::outerjoinpath, MemoizePath::param_exprs, BitmapHeapPath::path, ForeignPath::path, CustomPath::path, HashPath::path_hashclauses, MergePath::path_mergeclauses, PATH_REQ_OUTER, Path::pathtype, planner_rt_fetch, ParamPathInfo::ppi_clauses, ParamPathInfo::ppi_req_outer, ParamPathInfo::ppi_rows, ParamPathInfo::ppi_serials, RelOptInfo::ppilist, REPARAMETERIZE_CHILD_PATH, REPARAMETERIZE_CHILD_PATH_LIST, CustomPathMethods::ReparameterizeCustomPathByChild, root, RTE_RELATION, RangeTblEntry::rtekind, MaterialPath::subpath, MemoizePath::subpath, GatherPath::subpath, AppendPath::subpaths, RangeTblEntry::tablesample, and RelOptInfo::top_parent_relids.

Referenced by create_nestloop_plan(), and reparameterize_pathlist_by_child().

◆ set_cheapest()

void set_cheapest ( RelOptInfo parent_rel)

Definition at line 269 of file pathnode.c.

270{
271 Path *cheapest_startup_path;
272 Path *cheapest_total_path;
273 Path *best_param_path;
274 List *parameterized_paths;
275 ListCell *p;
276
277 Assert(IsA(parent_rel, RelOptInfo));
278
279 if (parent_rel->pathlist == NIL)
280 elog(ERROR, "could not devise a query plan for the given query");
281
282 cheapest_startup_path = cheapest_total_path = best_param_path = NULL;
283 parameterized_paths = NIL;
284
285 foreach(p, parent_rel->pathlist)
286 {
287 Path *path = (Path *) lfirst(p);
288 int cmp;
289
290 if (path->param_info)
291 {
292 /* Parameterized path, so add it to parameterized_paths */
293 parameterized_paths = lappend(parameterized_paths, path);
294
295 /*
296 * If we have an unparameterized cheapest-total, we no longer care
297 * about finding the best parameterized path, so move on.
298 */
299 if (cheapest_total_path)
300 continue;
301
302 /*
303 * Otherwise, track the best parameterized path, which is the one
304 * with least total cost among those of the minimum
305 * parameterization.
306 */
307 if (best_param_path == NULL)
308 best_param_path = path;
309 else
310 {
312 PATH_REQ_OUTER(best_param_path)))
313 {
314 case BMS_EQUAL:
315 /* keep the cheaper one */
316 if (compare_path_costs(path, best_param_path,
317 TOTAL_COST) < 0)
318 best_param_path = path;
319 break;
320 case BMS_SUBSET1:
321 /* new path is less-parameterized */
322 best_param_path = path;
323 break;
324 case BMS_SUBSET2:
325 /* old path is less-parameterized, keep it */
326 break;
327 case BMS_DIFFERENT:
328
329 /*
330 * This means that neither path has the least possible
331 * parameterization for the rel. We'll sit on the old
332 * path until something better comes along.
333 */
334 break;
335 }
336 }
337 }
338 else
339 {
340 /* Unparameterized path, so consider it for cheapest slots */
341 if (cheapest_total_path == NULL)
342 {
343 cheapest_startup_path = cheapest_total_path = path;
344 continue;
345 }
346
347 /*
348 * If we find two paths of identical costs, try to keep the
349 * better-sorted one. The paths might have unrelated sort
350 * orderings, in which case we can only guess which might be
351 * better to keep, but if one is superior then we definitely
352 * should keep that one.
353 */
354 cmp = compare_path_costs(cheapest_startup_path, path, STARTUP_COST);
355 if (cmp > 0 ||
356 (cmp == 0 &&
357 compare_pathkeys(cheapest_startup_path->pathkeys,
358 path->pathkeys) == PATHKEYS_BETTER2))
359 cheapest_startup_path = path;
360
361 cmp = compare_path_costs(cheapest_total_path, path, TOTAL_COST);
362 if (cmp > 0 ||
363 (cmp == 0 &&
364 compare_pathkeys(cheapest_total_path->pathkeys,
365 path->pathkeys) == PATHKEYS_BETTER2))
366 cheapest_total_path = path;
367 }
368 }
369
370 /* Add cheapest unparameterized path, if any, to parameterized_paths */
371 if (cheapest_total_path)
372 parameterized_paths = lcons(cheapest_total_path, parameterized_paths);
373
374 /*
375 * If there is no unparameterized path, use the best parameterized path as
376 * cheapest_total_path (but not as cheapest_startup_path).
377 */
378 if (cheapest_total_path == NULL)
379 cheapest_total_path = best_param_path;
380 Assert(cheapest_total_path != NULL);
381
382 parent_rel->cheapest_startup_path = cheapest_startup_path;
383 parent_rel->cheapest_total_path = cheapest_total_path;
384 parent_rel->cheapest_unique_path = NULL; /* computed only if needed */
385 parent_rel->cheapest_parameterized_paths = parameterized_paths;
386}
@ BMS_DIFFERENT
Definition: bitmapset.h:65
List * lcons(void *datum, List *list)
Definition: list.c:495
static int cmp(const chr *x, const chr *y, size_t len)
Definition: regc_locale.c:743

References Assert(), BMS_DIFFERENT, BMS_EQUAL, BMS_SUBSET1, BMS_SUBSET2, bms_subset_compare(), RelOptInfo::cheapest_parameterized_paths, RelOptInfo::cheapest_startup_path, RelOptInfo::cheapest_total_path, RelOptInfo::cheapest_unique_path, cmp(), compare_path_costs(), compare_pathkeys(), elog, ERROR, IsA, lappend(), lcons(), lfirst, NIL, PATH_REQ_OUTER, Path::pathkeys, PATHKEYS_BETTER2, RelOptInfo::pathlist, STARTUP_COST, and TOTAL_COST.

Referenced by apply_scanjoin_target_to_paths(), create_distinct_paths(), create_grouping_paths(), create_ordinary_grouping_paths(), create_partial_distinct_paths(), create_partitionwise_grouping_paths(), create_window_paths(), generate_partitionwise_join_paths(), mark_dummy_rel(), merge_clump(), postprocess_setop_rel(), query_planner(), set_dummy_rel_pathlist(), set_rel_pathlist(), standard_join_search(), and subquery_planner().

◆ setup_simple_rel_arrays()

void setup_simple_rel_arrays ( PlannerInfo root)

Definition at line 94 of file relnode.c.

95{
96 int size;
97 Index rti;
98 ListCell *lc;
99
100 /* Arrays are accessed using RT indexes (1..N) */
101 size = list_length(root->parse->rtable) + 1;
102 root->simple_rel_array_size = size;
103
104 /*
105 * simple_rel_array is initialized to all NULLs, since no RelOptInfos
106 * exist yet. It'll be filled by later calls to build_simple_rel().
107 */
108 root->simple_rel_array = (RelOptInfo **)
109 palloc0(size * sizeof(RelOptInfo *));
110
111 /* simple_rte_array is an array equivalent of the rtable list */
112 root->simple_rte_array = (RangeTblEntry **)
113 palloc0(size * sizeof(RangeTblEntry *));
114 rti = 1;
115 foreach(lc, root->parse->rtable)
116 {
117 RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc);
118
119 root->simple_rte_array[rti++] = rte;
120 }
121
122 /* append_rel_array is not needed if there are no AppendRelInfos */
123 if (root->append_rel_list == NIL)
124 {
125 root->append_rel_array = NULL;
126 return;
127 }
128
129 root->append_rel_array = (AppendRelInfo **)
130 palloc0(size * sizeof(AppendRelInfo *));
131
132 /*
133 * append_rel_array is filled with any already-existing AppendRelInfos,
134 * which currently could only come from UNION ALL flattening. We might
135 * add more later during inheritance expansion, but it's the
136 * responsibility of the expansion code to update the array properly.
137 */
138 foreach(lc, root->append_rel_list)
139 {
141 int child_relid = appinfo->child_relid;
142
143 /* Sanity check */
144 Assert(child_relid < size);
145
146 if (root->append_rel_array[child_relid])
147 elog(ERROR, "child relation already exists");
148
149 root->append_rel_array[child_relid] = appinfo;
150 }
151}
#define lfirst_node(type, lc)
Definition: pg_list.h:176
Index child_relid
Definition: pathnodes.h:3111

References Assert(), AppendRelInfo::child_relid, elog, ERROR, lfirst, lfirst_node, list_length(), NIL, palloc0(), and root.

Referenced by plan_cluster_use_sort(), plan_create_index_workers(), plan_set_operations(), and query_planner().