Unpin buffer before inplace update waits for an XID to end.
authorNoah Misch <[email protected]>
Tue, 29 Oct 2024 16:39:55 +0000 (09:39 -0700)
committerNoah Misch <[email protected]>
Tue, 29 Oct 2024 16:39:55 +0000 (09:39 -0700)
Commit a07e03fd8fa7daf4d1356f7cb501ffe784ea6257 changed inplace updates
to wait for heap_update() commands like GRANT TABLE and GRANT DATABASE.
By keeping the pin during that wait, a sequence of autovacuum workers
and an uncommitted GRANT starved one foreground LockBufferForCleanup()
for six minutes, on buildfarm member sarus.  Prevent, at the cost of a
bit of complexity.  Back-patch to v12, like the earlier commit.  That
commit and heap_inplace_lock() have not yet appeared in any release.

Discussion: https://postgr.es/m/20241026184936[email protected]

src/backend/access/heap/heapam.c
src/backend/access/index/genam.c
src/include/access/heapam.h

index 75ff9e7388fd8fc3fc829b636eea1dc96020bb7e..1748eafa100bd4e98327e18bff1f080fc73d0f38 100644 (file)
@@ -6165,8 +6165,8 @@ heap_abort_speculative(Relation relation, ItemPointer tid)
  * transaction.  If compatible, return true with the buffer exclusive-locked,
  * and the caller must release that by calling
  * heap_inplace_update_and_unlock(), calling heap_inplace_unlock(), or raising
- * an error.  Otherwise, return false after blocking transactions, if any,
- * have ended.
+ * an error.  Otherwise, call release_callback(arg), wait for blocking
+ * transactions to end, and return false.
  *
  * Since this is intended for system catalogs and SERIALIZABLE doesn't cover
  * DDL, this doesn't guarantee any particular predicate locking.
@@ -6200,7 +6200,8 @@ heap_abort_speculative(Relation relation, ItemPointer tid)
  */
 bool
 heap_inplace_lock(Relation relation,
-                 HeapTuple oldtup_ptr, Buffer buffer)
+                 HeapTuple oldtup_ptr, Buffer buffer,
+                 void (*release_callback) (void *), void *arg)
 {
    HeapTupleData oldtup = *oldtup_ptr; /* minimize diff vs. heap_update() */
    TM_Result   result;
@@ -6265,6 +6266,7 @@ heap_inplace_lock(Relation relation,
                                        lockmode, NULL))
            {
                LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
+               release_callback(arg);
                ret = false;
                MultiXactIdWait((MultiXactId) xwait, mxact_status, infomask,
                                relation, &oldtup.t_self, XLTW_Update,
@@ -6280,6 +6282,7 @@ heap_inplace_lock(Relation relation,
        else
        {
            LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
+           release_callback(arg);
            ret = false;
            XactLockTableWait(xwait, relation, &oldtup.t_self,
                              XLTW_Update);
@@ -6291,6 +6294,7 @@ heap_inplace_lock(Relation relation,
        if (!ret)
        {
            LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
+           release_callback(arg);
        }
    }
 
index 69c36084321a0d7f4d05313e176c1f058f994e24..60c61039d66e9d87301043b432305133018537b4 100644 (file)
@@ -814,6 +814,7 @@ systable_inplace_update_begin(Relation relation,
    int         retries = 0;
    SysScanDesc scan;
    HeapTuple   oldtup;
+   BufferHeapTupleTableSlot *bslot;
 
    /*
     * For now, we don't allow parallel updates.  Unlike a regular update,
@@ -835,10 +836,9 @@ systable_inplace_update_begin(Relation relation,
    Assert(IsInplaceUpdateRelation(relation) || !IsSystemRelation(relation));
 
    /* Loop for an exclusive-locked buffer of a non-updated tuple. */
-   for (;;)
+   do
    {
        TupleTableSlot *slot;
-       BufferHeapTupleTableSlot *bslot;
 
        CHECK_FOR_INTERRUPTS();
 
@@ -864,11 +864,9 @@ systable_inplace_update_begin(Relation relation,
        slot = scan->slot;
        Assert(TTS_IS_BUFFERTUPLE(slot));
        bslot = (BufferHeapTupleTableSlot *) slot;
-       if (heap_inplace_lock(scan->heap_rel,
-                             bslot->base.tuple, bslot->buffer))
-           break;
-       systable_endscan(scan);
-   };
+   } while (!heap_inplace_lock(scan->heap_rel,
+                               bslot->base.tuple, bslot->buffer,
+                               (void (*) (void *)) systable_endscan, scan));
 
    *oldtupcopy = heap_copytuple(oldtup);
    *state = scan;
index b951466ced246f99539871fd66ae02c3c294eaa1..96cf82f97b754bffb9128942e3e8fd313238f96c 100644 (file)
@@ -338,7 +338,8 @@ extern TM_Result heap_lock_tuple(Relation relation, HeapTuple tuple,
                                 Buffer *buffer, struct TM_FailureData *tmfd);
 
 extern bool heap_inplace_lock(Relation relation,
-                             HeapTuple oldtup_ptr, Buffer buffer);
+                             HeapTuple oldtup_ptr, Buffer buffer,
+                             void (*release_callback) (void *), void *arg);
 extern void heap_inplace_update_and_unlock(Relation relation,
                                           HeapTuple oldtup, HeapTuple tuple,
                                           Buffer buffer);