Move a few ResourceOwnerEnlarge() calls for safety and clarity.
authorHeikki Linnakangas <[email protected]>
Wed, 8 Nov 2023 11:30:46 +0000 (13:30 +0200)
committerHeikki Linnakangas <[email protected]>
Wed, 8 Nov 2023 11:30:46 +0000 (13:30 +0200)
These are functions where a lot of things happen between the
ResourceOwnerEnlarge and ResourceOwnerRemember calls. It's important
that there are no unrelated ResourceOwnerRemember calls in the code in
between, otherwise the reserved entry might be used up by the
intervening ResourceOwnerRemember and not be available at the intended
ResourceOwnerRemember call anymore. I don't see any bugs here, but the
longer the code path between the calls is, the harder it is to verify.

In bufmgr.c, there is a function similar to ResourceOwnerEnlarge,
ReservePrivateRefCountEntry(), to ensure that the private refcount
array has enough space. The ReservePrivateRefCountEntry() calls were
made at different places than the ResourceOwnerEnlargeBuffers()
calls. Move the ResourceOwnerEnlargeBuffers() and
ReservePrivateRefCountEntry() calls together for consistency.

Reviewed-by: Aleksander Alekseev, Michael Paquier, Julien Rouhaud
Reviewed-by: Kyotaro Horiguchi, Hayato Kuroda, Álvaro Herrera, Zhihong Yu
Reviewed-by: Peter Eisentraut, Andres Freund
Discussion: https://www.postgresql.org/message-id/cbfabeb0-cd3c-e951-a572-19b365ed314d%40iki.fi

src/backend/storage/buffer/bufmgr.c
src/backend/storage/buffer/localbuf.c
src/backend/utils/cache/catcache.c

index dc504a1ae000c24fd2337ad935b4c508de27ab26..506f71e6533be38efd041794935de4e0bc86633c 100644 (file)
@@ -1023,9 +1023,6 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
                                                                 forkNum, strategy, flags);
        }
 
-       /* Make sure we will have room to remember the buffer pin */
-       ResourceOwnerEnlargeBuffers(CurrentResourceOwner);
-
        TRACE_POSTGRESQL_BUFFER_READ_START(forkNum, blockNum,
                                                                           smgr->smgr_rlocator.locator.spcOid,
                                                                           smgr->smgr_rlocator.locator.dbOid,
@@ -1230,6 +1227,10 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
        BufferDesc *victim_buf_hdr;
        uint32          victim_buf_state;
 
+       /* Make sure we will have room to remember the buffer pin */
+       ResourceOwnerEnlargeBuffers(CurrentResourceOwner);
+       ReservePrivateRefCountEntry();
+
        /* create a tag so we can lookup the buffer */
        InitBufferTag(&newTag, &smgr->smgr_rlocator.locator, forkNum, blockNum);
 
@@ -1591,7 +1592,7 @@ GetVictimBuffer(BufferAccessStrategy strategy, IOContext io_context)
 
        /*
         * Ensure, while the spinlock's not yet held, that there's a free refcount
-        * entry.
+        * entry, and a resource owner slot for the pin.
         */
        ReservePrivateRefCountEntry();
        ResourceOwnerEnlargeBuffers(CurrentResourceOwner);
@@ -1859,9 +1860,6 @@ ExtendBufferedRelShared(BufferManagerRelation bmr,
                MemSet((char *) buf_block, 0, BLCKSZ);
        }
 
-       /* in case we need to pin an existing buffer below */
-       ResourceOwnerEnlargeBuffers(CurrentResourceOwner);
-
        /*
         * Lock relation against concurrent extensions, unless requested not to.
         *
@@ -1947,6 +1945,10 @@ ExtendBufferedRelShared(BufferManagerRelation bmr,
                LWLock     *partition_lock;
                int                     existing_id;
 
+               /* in case we need to pin an existing buffer below */
+               ResourceOwnerEnlargeBuffers(CurrentResourceOwner);
+               ReservePrivateRefCountEntry();
+
                InitBufferTag(&tag, &bmr.smgr->smgr_rlocator.locator, fork, first_block + i);
                hash = BufTableHashCode(&tag);
                partition_lock = BufMappingPartitionLock(hash);
@@ -2281,7 +2283,8 @@ ReleaseAndReadBuffer(Buffer buffer,
  * taking the buffer header lock; instead update the state variable in loop of
  * CAS operations. Hopefully it's just a single CAS.
  *
- * Note that ResourceOwnerEnlargeBuffers must have been done already.
+ * Note that ResourceOwnerEnlargeBuffers and ReservePrivateRefCountEntry()
+ * must have been done already.
  *
  * Returns true if buffer is BM_VALID, else false.  This provision allows
  * some callers to avoid an extra spinlock cycle.
@@ -2294,6 +2297,7 @@ PinBuffer(BufferDesc *buf, BufferAccessStrategy strategy)
        PrivateRefCountEntry *ref;
 
        Assert(!BufferIsLocal(b));
+       Assert(ReservedRefCountEntry != NULL);
 
        ref = GetPrivateRefCountEntry(b, true);
 
@@ -2302,7 +2306,6 @@ PinBuffer(BufferDesc *buf, BufferAccessStrategy strategy)
                uint32          buf_state;
                uint32          old_buf_state;
 
-               ReservePrivateRefCountEntry();
                ref = NewPrivateRefCountEntry(b);
 
                old_buf_state = pg_atomic_read_u32(&buf->state);
@@ -2375,7 +2378,8 @@ PinBuffer(BufferDesc *buf, BufferAccessStrategy strategy)
  * The spinlock is released before return.
  *
  * As this function is called with the spinlock held, the caller has to
- * previously call ReservePrivateRefCountEntry().
+ * previously call ReservePrivateRefCountEntry() and
+ * ResourceOwnerEnlargeBuffers(CurrentResourceOwner);
  *
  * Currently, no callers of this function want to modify the buffer's
  * usage_count at all, so there's no need for a strategy parameter.
@@ -2550,9 +2554,6 @@ BufferSync(int flags)
        int                     mask = BM_DIRTY;
        WritebackContext wb_context;
 
-       /* Make sure we can handle the pin inside SyncOneBuffer */
-       ResourceOwnerEnlargeBuffers(CurrentResourceOwner);
-
        /*
         * Unless this is a shutdown checkpoint or we have been explicitly told,
         * we write only permanent, dirty buffers.  But at shutdown or end of
@@ -3029,9 +3030,6 @@ BgBufferSync(WritebackContext *wb_context)
         * requirements, or hit the bgwriter_lru_maxpages limit.
         */
 
-       /* Make sure we can handle the pin inside SyncOneBuffer */
-       ResourceOwnerEnlargeBuffers(CurrentResourceOwner);
-
        num_to_scan = bufs_to_lap;
        num_written = 0;
        reusable_buffers = reusable_buffers_est;
@@ -3113,8 +3111,6 @@ BgBufferSync(WritebackContext *wb_context)
  *
  * (BUF_WRITTEN could be set in error if FlushBuffer finds the buffer clean
  * after locking it, but we don't care all that much.)
- *
- * Note: caller must have done ResourceOwnerEnlargeBuffers.
  */
 static int
 SyncOneBuffer(int buf_id, bool skip_recently_used, WritebackContext *wb_context)
@@ -3124,7 +3120,9 @@ SyncOneBuffer(int buf_id, bool skip_recently_used, WritebackContext *wb_context)
        uint32          buf_state;
        BufferTag       tag;
 
+       /* Make sure we can handle the pin */
        ReservePrivateRefCountEntry();
+       ResourceOwnerEnlargeBuffers(CurrentResourceOwner);
 
        /*
         * Check whether buffer needs writing.
@@ -4169,9 +4167,6 @@ FlushRelationBuffers(Relation rel)
                return;
        }
 
-       /* Make sure we can handle the pin inside the loop */
-       ResourceOwnerEnlargeBuffers(CurrentResourceOwner);
-
        for (i = 0; i < NBuffers; i++)
        {
                uint32          buf_state;
@@ -4185,7 +4180,9 @@ FlushRelationBuffers(Relation rel)
                if (!BufTagMatchesRelFileLocator(&bufHdr->tag, &rel->rd_locator))
                        continue;
 
+               /* Make sure we can handle the pin */
                ReservePrivateRefCountEntry();
+               ResourceOwnerEnlargeBuffers(CurrentResourceOwner);
 
                buf_state = LockBufHdr(bufHdr);
                if (BufTagMatchesRelFileLocator(&bufHdr->tag, &rel->rd_locator) &&
@@ -4242,9 +4239,6 @@ FlushRelationsAllBuffers(SMgrRelation *smgrs, int nrels)
        if (use_bsearch)
                pg_qsort(srels, nrels, sizeof(SMgrSortArray), rlocator_comparator);
 
-       /* Make sure we can handle the pin inside the loop */
-       ResourceOwnerEnlargeBuffers(CurrentResourceOwner);
-
        for (i = 0; i < NBuffers; i++)
        {
                SMgrSortArray *srelent = NULL;
@@ -4283,7 +4277,9 @@ FlushRelationsAllBuffers(SMgrRelation *smgrs, int nrels)
                if (srelent == NULL)
                        continue;
 
+               /* Make sure we can handle the pin */
                ReservePrivateRefCountEntry();
+               ResourceOwnerEnlargeBuffers(CurrentResourceOwner);
 
                buf_state = LockBufHdr(bufHdr);
                if (BufTagMatchesRelFileLocator(&bufHdr->tag, &srelent->rlocator) &&
@@ -4478,9 +4474,6 @@ FlushDatabaseBuffers(Oid dbid)
        int                     i;
        BufferDesc *bufHdr;
 
-       /* Make sure we can handle the pin inside the loop */
-       ResourceOwnerEnlargeBuffers(CurrentResourceOwner);
-
        for (i = 0; i < NBuffers; i++)
        {
                uint32          buf_state;
@@ -4494,7 +4487,9 @@ FlushDatabaseBuffers(Oid dbid)
                if (bufHdr->tag.dbOid != dbid)
                        continue;
 
+               /* Make sure we can handle the pin */
                ReservePrivateRefCountEntry();
+               ResourceOwnerEnlargeBuffers(CurrentResourceOwner);
 
                buf_state = LockBufHdr(bufHdr);
                if (bufHdr->tag.dbOid == dbid &&
index 9f20dca121247c2f4731bd6f7d46dee80f6829d7..b8715b54651c9610d1111ffc48a2cf14a376212c 100644 (file)
@@ -130,6 +130,8 @@ LocalBufferAlloc(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum,
        if (LocalBufHash == NULL)
                InitLocalBuffers();
 
+       ResourceOwnerEnlargeBuffers(CurrentResourceOwner);
+
        /* See if the desired buffer already exists */
        hresult = (LocalBufferLookupEnt *)
                hash_search(LocalBufHash, &newTag, HASH_FIND, NULL);
index 1aacb736c22327408b0d7d9b6035bac7e6a72e4a..18db7e78e21b2ee02a582e837116ec15322fa5c3 100644 (file)
@@ -1605,8 +1605,6 @@ SearchCatCacheList(CatCache *cache,
         * block to ensure we can undo those refcounts if we get an error before
         * we finish constructing the CatCList.
         */
-       ResourceOwnerEnlargeCatCacheListRefs(CurrentResourceOwner);
-
        ctlist = NIL;
 
        PG_TRY();
@@ -1694,6 +1692,9 @@ SearchCatCacheList(CatCache *cache,
 
                table_close(relation, AccessShareLock);
 
+               /* Make sure the resource owner has room to remember this entry. */
+               ResourceOwnerEnlargeCatCacheListRefs(CurrentResourceOwner);
+
                /* Now we can build the CatCList entry. */
                oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
                nmembers = list_length(ctlist);