Further -Wshadow=compatible-local warning fixes
authorDavid Rowley <[email protected]>
Wed, 24 Aug 2022 10:04:28 +0000 (22:04 +1200)
committerDavid Rowley <[email protected]>
Wed, 24 Aug 2022 10:04:28 +0000 (22:04 +1200)
These should have been included in 421892a19 as these shadowed variable
warnings can also be fixed by adjusting the scope of the shadowed variable
to put the declaration for it in an inner scope.

This is part of the same effort as f01592f91.

By my count, this takes the warning count from 114 down to 106.

Author: David Rowley and Justin Pryzby
Discussion: https://postgr.es/m/CAApHDvrwLGBP%2BYw9vriayyf%3DXR4uPWP5jr6cQhP9au_kaDUhbA%40mail.gmail.com

src/backend/access/spgist/spgdoinsert.c
src/backend/commands/trigger.c
src/backend/executor/nodeHash.c
src/backend/optimizer/plan/planner.c
src/backend/tsearch/ts_typanalyze.c
src/backend/utils/adt/levenshtein.c
src/backend/utils/adt/rangetypes_gist.c
src/backend/utils/adt/ruleutils.c

index e84b5edc035f8a9f750b97f31bf955c4ee485422..f6aced03d37add23c5fdd34f239bd3812c475ed1 100644 (file)
@@ -395,7 +395,6 @@ moveLeafs(Relation index, SpGistState *state,
                size;
    Buffer      nbuf;
    Page        npage;
-   SpGistLeafTuple it;
    OffsetNumber r = InvalidOffsetNumber,
                startOffset = InvalidOffsetNumber;
    bool        replaceDead = false;
@@ -467,6 +466,8 @@ moveLeafs(Relation index, SpGistState *state,
    {
        for (i = 0; i < nDelete; i++)
        {
+           SpGistLeafTuple it;
+
            it = (SpGistLeafTuple) PageGetItem(current->page,
                                               PageGetItemId(current->page, toDelete[i]));
            Assert(it->tupstate == SPGIST_LIVE);
index 62a09fb131b3b4a59d43118e6ac049779a587fc9..7661e004a93442bd3018e7187fea50dd941a629b 100644 (file)
@@ -1149,7 +1149,6 @@ CreateTriggerFiringOn(CreateTrigStmt *stmt, const char *queryString,
        PartitionDesc partdesc = RelationGetPartitionDesc(rel, true);
        List       *idxs = NIL;
        List       *childTbls = NIL;
-       ListCell   *l;
        int         i;
        MemoryContext oldcxt,
                    perChildCxt;
@@ -1181,6 +1180,7 @@ CreateTriggerFiringOn(CreateTrigStmt *stmt, const char *queryString,
        for (i = 0; i < partdesc->nparts; i++)
        {
            Oid         indexOnChild = InvalidOid;
+           ListCell   *l;
            ListCell   *l2;
            CreateTrigStmt *childStmt;
            Relation    childTbl;
index c48d92259f9725cadba438089772b7dc80d26d74..841896c7781312ad0e9a958af57cc5ceb7a1a745 100644 (file)
@@ -1080,7 +1080,6 @@ static void
 ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable)
 {
    ParallelHashJoinState *pstate = hashtable->parallel_state;
-   int         i;
 
    Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_HASHING_INNER);
 
@@ -1244,7 +1243,7 @@ ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable)
                ExecParallelHashTableSetCurrentBatch(hashtable, 0);
 
                /* Are any of the new generation of batches exhausted? */
-               for (i = 0; i < hashtable->nbatch; ++i)
+               for (int i = 0; i < hashtable->nbatch; ++i)
                {
                    ParallelHashJoinBatch *batch = hashtable->batches[i].shared;
 
index cf9e0a74dbf38fbc9825319de2cddb2eed1e7f31..d929ce341716db2a6e21c66e68fd059566b5b603 100644 (file)
@@ -1981,7 +1981,6 @@ preprocess_grouping_sets(PlannerInfo *root)
    Query      *parse = root->parse;
    List       *sets;
    int         maxref = 0;
-   ListCell   *lc;
    ListCell   *lc_set;
    grouping_sets_data *gd = palloc0(sizeof(grouping_sets_data));
 
@@ -2024,6 +2023,7 @@ preprocess_grouping_sets(PlannerInfo *root)
    if (!bms_is_empty(gd->unsortable_refs))
    {
        List       *sortable_sets = NIL;
+       ListCell   *lc;
 
        foreach(lc, parse->groupingSets)
        {
index e771a7cd62d190fec3f5e40cc2ce1547a1899e17..e2d2ec18c90c4ab82487377a1e115facb775aae1 100644 (file)
@@ -161,7 +161,6 @@ compute_tsvector_stats(VacAttrStats *stats,
    int         vector_no,
                lexeme_no;
    LexemeHashKey hash_key;
-   TrackItem  *item;
 
    /*
     * We want statistics_target * 10 lexemes in the MCELEM array.  This
@@ -240,6 +239,7 @@ compute_tsvector_stats(VacAttrStats *stats,
        curentryptr = ARRPTR(vector);
        for (j = 0; j < vector->size; j++)
        {
+           TrackItem  *item;
            bool        found;
 
            /*
@@ -296,6 +296,7 @@ compute_tsvector_stats(VacAttrStats *stats,
        int         nonnull_cnt = samplerows - null_cnt;
        int         i;
        TrackItem **sort_table;
+       TrackItem  *item;
        int         track_len;
        int         cutoff_freq;
        int         minfreq,
index 3026cc2431117706592e627cb62670696dd18417..2fdb3b808bdc99e5f57a476d972ee915668a250e 100644 (file)
@@ -81,8 +81,7 @@ varstr_levenshtein(const char *source, int slen,
    int        *prev;
    int        *curr;
    int        *s_char_len = NULL;
-   int         i,
-               j;
+   int         j;
    const char *y;
 
    /*
@@ -217,7 +216,7 @@ varstr_levenshtein(const char *source, int slen,
     * To transform the first i characters of s into the first 0 characters of
     * t, we must perform i deletions.
     */
-   for (i = START_COLUMN; i < STOP_COLUMN; i++)
+   for (int i = START_COLUMN; i < STOP_COLUMN; i++)
        prev[i] = i * del_c;
 
    /* Loop through rows of the notional array */
@@ -226,6 +225,7 @@ varstr_levenshtein(const char *source, int slen,
        int        *temp;
        const char *x = source;
        int         y_char_len = n != tlen + 1 ? pg_mblen(y) : 1;
+       int         i;
 
 #ifdef LEVENSHTEIN_LESS_EQUAL
 
index fbf39dbf303a5a3d325d7ed18650dbab05e2c6a7..777fdf0e2e966fef634b66e2b09d4456d3816f13 100644 (file)
@@ -1322,8 +1322,7 @@ range_gist_double_sorting_split(TypeCacheEntry *typcache,
    ConsiderSplitContext context;
    OffsetNumber i,
                maxoff;
-   RangeType  *range,
-              *left_range = NULL,
+   RangeType  *left_range = NULL,
               *right_range = NULL;
    int         common_entries_count;
    NonEmptyRange *by_lower,
@@ -1518,6 +1517,7 @@ range_gist_double_sorting_split(TypeCacheEntry *typcache,
     */
    for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))
    {
+       RangeType  *range;
        RangeBound  lower,
                    upper;
        bool        empty;
@@ -1593,6 +1593,7 @@ range_gist_double_sorting_split(TypeCacheEntry *typcache,
         */
        for (i = 0; i < common_entries_count; i++)
        {
+           RangeType  *range;
            int         idx = common_entries[i].index;
 
            range = DatumGetRangeTypeP(entryvec->vector[idx].key);
index 8964f73b929b0b68461398e40a940f71f51130e6..8280711f7ef02070abcffec8fd6e9961929f1edb 100644 (file)
@@ -1615,7 +1615,6 @@ pg_get_statisticsobj_worker(Oid statextid, bool columns_only, bool missing_ok)
    ArrayType  *arr;
    char       *enabled;
    Datum       datum;
-   bool        isnull;
    bool        ndistinct_enabled;
    bool        dependencies_enabled;
    bool        mcv_enabled;
@@ -1668,6 +1667,8 @@ pg_get_statisticsobj_worker(Oid statextid, bool columns_only, bool missing_ok)
 
    if (!columns_only)
    {
+       bool        isnull;
+
        nsp = get_namespace_name_or_temp(statextrec->stxnamespace);
        appendStringInfo(&buf, "CREATE STATISTICS %s",
                         quote_qualified_identifier(nsp,