}
/*
- * heap_delete - delete a tuple, optionally fetching it into a slot
+ * heap_delete - delete a tuple
*
* See table_tuple_delete() for an explanation of the parameters, except that
- * this routine directly takes a tuple rather than a slot. Also, we don't
- * place a lock on the tuple in this function, just fetch the existing version.
+ * this routine directly takes a tuple rather than a slot.
*
* In the failure cases, the routine fills *tmfd with the tuple's t_ctid,
* t_xmax (resolving a possible MultiXact, if necessary), and t_cmax (the last
*/
TM_Result
heap_delete(Relation relation, ItemPointer tid,
- CommandId cid, Snapshot crosscheck, int options,
- TM_FailureData *tmfd, bool changingPart,
- TupleTableSlot *oldSlot)
+ CommandId cid, Snapshot crosscheck, bool wait,
+ TM_FailureData *tmfd, bool changingPart)
{
TM_Result result;
TransactionId xid = GetCurrentTransactionId();
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("attempted to delete invisible tuple")));
}
- else if (result == TM_BeingModified && (options & TABLE_MODIFY_WAIT))
+ else if (result == TM_BeingModified && wait)
{
TransactionId xwait;
uint16 infomask;
tmfd->cmax = HeapTupleHeaderGetCmax(tp.t_data);
else
tmfd->cmax = InvalidCommandId;
-
- /*
- * If we're asked to lock the updated tuple, we just fetch the
- * existing tuple. That let's the caller save some resources on
- * placing the lock.
- */
- if (result == TM_Updated &&
- (options & TABLE_MODIFY_LOCK_UPDATED))
- {
- BufferHeapTupleTableSlot *bslot;
-
- Assert(TTS_IS_BUFFERTUPLE(oldSlot));
- bslot = (BufferHeapTupleTableSlot *) oldSlot;
-
- LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
- bslot->base.tupdata = tp;
- ExecStorePinnedBufferHeapTuple(&bslot->base.tupdata,
- oldSlot,
- buffer);
- }
- else
- {
- UnlockReleaseBuffer(buffer);
- }
+ UnlockReleaseBuffer(buffer);
if (have_tuple_lock)
UnlockTupleTuplock(relation, &(tp.t_self), LockTupleExclusive);
if (vmbuffer != InvalidBuffer)
*/
CacheInvalidateHeapTuple(relation, &tp, NULL);
- /* Fetch the old tuple version if we're asked for that. */
- if (options & TABLE_MODIFY_FETCH_OLD_TUPLE)
- {
- BufferHeapTupleTableSlot *bslot;
-
- Assert(TTS_IS_BUFFERTUPLE(oldSlot));
- bslot = (BufferHeapTupleTableSlot *) oldSlot;
-
- bslot->base.tupdata = tp;
- ExecStorePinnedBufferHeapTuple(&bslot->base.tupdata,
- oldSlot,
- buffer);
- }
- else
- {
- /* Now we can release the buffer */
- ReleaseBuffer(buffer);
- }
+ /* Now we can release the buffer */
+ ReleaseBuffer(buffer);
/*
* Release the lmgr tuple lock, if we had it.
result = heap_delete(relation, tid,
GetCurrentCommandId(true), InvalidSnapshot,
- TABLE_MODIFY_WAIT /* wait for commit */ ,
- &tmfd, false /* changingPart */ , NULL);
+ true /* wait for commit */ ,
+ &tmfd, false /* changingPart */ );
switch (result)
{
case TM_SelfModified:
}
/*
- * heap_update - replace a tuple, optionally fetching it into a slot
+ * heap_update - replace a tuple
*
* See table_tuple_update() for an explanation of the parameters, except that
- * this routine directly takes a tuple rather than a slot. Also, we don't
- * place a lock on the tuple in this function, just fetch the existing version.
+ * this routine directly takes a tuple rather than a slot.
*
* In the failure cases, the routine fills *tmfd with the tuple's t_ctid,
* t_xmax (resolving a possible MultiXact, if necessary), and t_cmax (the last
*/
TM_Result
heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
- CommandId cid, Snapshot crosscheck, int options,
+ CommandId cid, Snapshot crosscheck, bool wait,
TM_FailureData *tmfd, LockTupleMode *lockmode,
- TU_UpdateIndexes *update_indexes, TupleTableSlot *oldSlot)
+ TU_UpdateIndexes *update_indexes)
{
TM_Result result;
TransactionId xid = GetCurrentTransactionId();
result = HeapTupleSatisfiesUpdate(&oldtup, cid, buffer);
/* see below about the "no wait" case */
- Assert(result != TM_BeingModified || (options & TABLE_MODIFY_WAIT));
+ Assert(result != TM_BeingModified || wait);
if (result == TM_Invisible)
{
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("attempted to update invisible tuple")));
}
- else if (result == TM_BeingModified && (options & TABLE_MODIFY_WAIT))
+ else if (result == TM_BeingModified && wait)
{
TransactionId xwait;
uint16 infomask;
tmfd->cmax = HeapTupleHeaderGetCmax(oldtup.t_data);
else
tmfd->cmax = InvalidCommandId;
-
- /*
- * If we're asked to lock the updated tuple, we just fetch the
- * existing tuple. That lets the caller save some resources on
- * placing the lock.
- */
- if (result == TM_Updated &&
- (options & TABLE_MODIFY_LOCK_UPDATED))
- {
- BufferHeapTupleTableSlot *bslot;
-
- Assert(TTS_IS_BUFFERTUPLE(oldSlot));
- bslot = (BufferHeapTupleTableSlot *) oldSlot;
-
- LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
- bslot->base.tupdata = oldtup;
- ExecStorePinnedBufferHeapTuple(&bslot->base.tupdata,
- oldSlot,
- buffer);
- }
- else
- {
- UnlockReleaseBuffer(buffer);
- }
+ UnlockReleaseBuffer(buffer);
if (have_tuple_lock)
UnlockTupleTuplock(relation, &(oldtup.t_self), *lockmode);
if (vmbuffer != InvalidBuffer)
/* Now we can release the buffer(s) */
if (newbuf != buffer)
ReleaseBuffer(newbuf);
-
- /* Fetch the old tuple version if we're asked for that. */
- if (options & TABLE_MODIFY_FETCH_OLD_TUPLE)
- {
- BufferHeapTupleTableSlot *bslot;
-
- Assert(TTS_IS_BUFFERTUPLE(oldSlot));
- bslot = (BufferHeapTupleTableSlot *) oldSlot;
-
- bslot->base.tupdata = oldtup;
- ExecStorePinnedBufferHeapTuple(&bslot->base.tupdata,
- oldSlot,
- buffer);
- }
- else
- {
- /* Now we can release the buffer */
- ReleaseBuffer(buffer);
- }
-
+ ReleaseBuffer(buffer);
if (BufferIsValid(vmbuffer_new))
ReleaseBuffer(vmbuffer_new);
if (BufferIsValid(vmbuffer))
result = heap_update(relation, otid, tup,
GetCurrentCommandId(true), InvalidSnapshot,
- TABLE_MODIFY_WAIT /* wait for commit */ ,
- &tmfd, &lockmode, update_indexes, NULL);
+ true /* wait for commit */ ,
+ &tmfd, &lockmode, update_indexes);
switch (result)
{
case TM_SelfModified:
* tuples.
*
* Output parameters:
- * *slot: BufferHeapTupleTableSlot filled with tuple
+ * *tuple: all fields filled in
+ * *buffer: set to buffer holding tuple (pinned but not locked at exit)
* *tmfd: filled in failure cases (see below)
*
* Function results are the same as the ones for table_tuple_lock().
*
- * If *slot already contains the target tuple, it takes advantage on that by
- * skipping the ReadBuffer() call.
- *
* In the failure cases other than TM_Invisible, the routine fills
* *tmfd with the tuple's t_ctid, t_xmax (resolving a possible MultiXact,
* if necessary), and t_cmax (the last only for TM_SelfModified,
* See README.tuplock for a thorough explanation of this mechanism.
*/
TM_Result
-heap_lock_tuple(Relation relation, ItemPointer tid, TupleTableSlot *slot,
+heap_lock_tuple(Relation relation, HeapTuple tuple,
CommandId cid, LockTupleMode mode, LockWaitPolicy wait_policy,
- bool follow_updates, TM_FailureData *tmfd)
+ bool follow_updates,
+ Buffer *buffer, TM_FailureData *tmfd)
{
TM_Result result;
+ ItemPointer tid = &(tuple->t_self);
ItemId lp;
Page page;
- Buffer buffer;
Buffer vmbuffer = InvalidBuffer;
BlockNumber block;
TransactionId xid,
bool skip_tuple_lock = false;
bool have_tuple_lock = false;
bool cleared_all_frozen = false;
- BufferHeapTupleTableSlot *bslot = (BufferHeapTupleTableSlot *) slot;
- HeapTuple tuple = &bslot->base.tupdata;
-
- Assert(TTS_IS_BUFFERTUPLE(slot));
- /* Take advantage if slot already contains the relevant tuple */
- if (!TTS_EMPTY(slot) &&
- slot->tts_tableOid == relation->rd_id &&
- ItemPointerCompare(&slot->tts_tid, tid) == 0 &&
- BufferIsValid(bslot->buffer))
- {
- buffer = bslot->buffer;
- IncrBufferRefCount(buffer);
- }
- else
- {
- buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
- }
+ *buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
block = ItemPointerGetBlockNumber(tid);
/*
* in the middle of changing this, so we'll need to recheck after we have
* the lock.
*/
- if (PageIsAllVisible(BufferGetPage(buffer)))
+ if (PageIsAllVisible(BufferGetPage(*buffer)))
visibilitymap_pin(relation, block, &vmbuffer);
- LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
+ LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
- page = BufferGetPage(buffer);
+ page = BufferGetPage(*buffer);
lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
Assert(ItemIdIsNormal(lp));
- tuple->t_self = *tid;
tuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
tuple->t_len = ItemIdGetLength(lp);
tuple->t_tableOid = RelationGetRelid(relation);
l3:
- result = HeapTupleSatisfiesUpdate(tuple, cid, buffer);
+ result = HeapTupleSatisfiesUpdate(tuple, cid, *buffer);
if (result == TM_Invisible)
{
infomask2 = tuple->t_data->t_infomask2;
ItemPointerCopy(&tuple->t_data->t_ctid, &t_ctid);
- LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
+ LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
/*
* If any subtransaction of the current top transaction already holds
{
result = res;
/* recovery code expects to have buffer lock held */
- LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
+ LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
goto failed;
}
}
- LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
+ LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
/*
* Make sure it's still an appropriate lock, else start over.
if (HEAP_XMAX_IS_LOCKED_ONLY(infomask) &&
!HEAP_XMAX_IS_EXCL_LOCKED(infomask))
{
- LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
+ LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
/*
* Make sure it's still an appropriate lock, else start over.
* No conflict, but if the xmax changed under us in the
* meantime, start over.
*/
- LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
+ LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
!TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
xwait))
}
else if (HEAP_XMAX_IS_KEYSHR_LOCKED(infomask))
{
- LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
+ LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
/* if the xmax changed in the meantime, start over */
if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
TransactionIdIsCurrentTransactionId(xwait))
{
/* ... but if the xmax changed in the meantime, start over */
- LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
+ LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
!TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
xwait))
*/
if (require_sleep && (result == TM_Updated || result == TM_Deleted))
{
- LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
+ LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
goto failed;
}
else if (require_sleep)
*/
result = TM_WouldBlock;
/* recovery code expects to have buffer lock held */
- LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
+ LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
goto failed;
}
{
result = TM_WouldBlock;
/* recovery code expects to have buffer lock held */
- LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
+ LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
goto failed;
}
break;
{
result = TM_WouldBlock;
/* recovery code expects to have buffer lock held */
- LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
+ LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
goto failed;
}
break;
{
result = res;
/* recovery code expects to have buffer lock held */
- LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
+ LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
goto failed;
}
}
- LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
+ LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
/*
* xwait is done, but if xwait had just locked the tuple then some
* don't check for this in the multixact case, because some
* locker transactions might still be running.
*/
- UpdateXmaxHintBits(tuple->t_data, buffer, xwait);
+ UpdateXmaxHintBits(tuple->t_data, *buffer, xwait);
}
}
*/
if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
{
- LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
+ LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
visibilitymap_pin(relation, block, &vmbuffer);
- LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
+ LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
goto l3;
}
cleared_all_frozen = true;
- MarkBufferDirty(buffer);
+ MarkBufferDirty(*buffer);
/*
* XLOG stuff. You might think that we don't need an XLOG record because
XLogRecPtr recptr;
XLogBeginInsert();
- XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
+ XLogRegisterBuffer(0, *buffer, REGBUF_STANDARD);
xlrec.offnum = ItemPointerGetOffsetNumber(&tuple->t_self);
xlrec.xmax = xid;
result = TM_Ok;
out_locked:
- LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
+ LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
out_unlocked:
if (BufferIsValid(vmbuffer))
if (have_tuple_lock)
UnlockTupleTuplock(relation, tid, mode);
- /* Put the target tuple to the slot */
- ExecStorePinnedBufferHeapTuple(tuple, slot, buffer);
-
return result;
}
#include "utils/builtins.h"
#include "utils/rel.h"
-static TM_Result heapam_tuple_lock(Relation relation, ItemPointer tid,
- Snapshot snapshot, TupleTableSlot *slot,
- CommandId cid, LockTupleMode mode,
- LockWaitPolicy wait_policy, uint8 flags,
- TM_FailureData *tmfd);
static void reform_and_rewrite_tuple(HeapTuple tuple,
Relation OldHeap, Relation NewHeap,
Datum *values, bool *isnull, RewriteState rwstate);
static TM_Result
heapam_tuple_delete(Relation relation, ItemPointer tid, CommandId cid,
- Snapshot snapshot, Snapshot crosscheck, int options,
- TM_FailureData *tmfd, bool changingPart,
- TupleTableSlot *oldSlot)
+ Snapshot snapshot, Snapshot crosscheck, bool wait,
+ TM_FailureData *tmfd, bool changingPart)
{
- TM_Result result;
-
/*
* Currently Deleting of index tuples are handled at vacuum, in case if
* the storage itself is cleaning the dead tuples by itself, it is the
* time to call the index tuple deletion also.
*/
- result = heap_delete(relation, tid, cid, crosscheck, options,
- tmfd, changingPart, oldSlot);
-
- /*
- * If the tuple has been concurrently updated, then get the lock on it.
- * (Do only if caller asked for this by setting the
- * TABLE_MODIFY_LOCK_UPDATED option) With the lock held retry of the
- * delete should succeed even if there are more concurrent update
- * attempts.
- */
- if (result == TM_Updated && (options & TABLE_MODIFY_LOCK_UPDATED))
- {
- /*
- * heapam_tuple_lock() will take advantage of tuple loaded into
- * oldSlot by heap_delete().
- */
- result = heapam_tuple_lock(relation, tid, snapshot,
- oldSlot, cid, LockTupleExclusive,
- (options & TABLE_MODIFY_WAIT) ?
- LockWaitBlock :
- LockWaitSkip,
- TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
- tmfd);
-
- if (result == TM_Ok)
- return TM_Updated;
- }
-
- return result;
+ return heap_delete(relation, tid, cid, crosscheck, wait, tmfd, changingPart);
}
static TM_Result
heapam_tuple_update(Relation relation, ItemPointer otid, TupleTableSlot *slot,
CommandId cid, Snapshot snapshot, Snapshot crosscheck,
- int options, TM_FailureData *tmfd,
- LockTupleMode *lockmode, TU_UpdateIndexes *update_indexes,
- TupleTableSlot *oldSlot)
+ bool wait, TM_FailureData *tmfd,
+ LockTupleMode *lockmode, TU_UpdateIndexes *update_indexes)
{
bool shouldFree = true;
HeapTuple tuple = ExecFetchSlotHeapTuple(slot, true, &shouldFree);
slot->tts_tableOid = RelationGetRelid(relation);
tuple->t_tableOid = slot->tts_tableOid;
- result = heap_update(relation, otid, tuple, cid, crosscheck, options,
- tmfd, lockmode, update_indexes, oldSlot);
+ result = heap_update(relation, otid, tuple, cid, crosscheck, wait,
+ tmfd, lockmode, update_indexes);
ItemPointerCopy(&tuple->t_self, &slot->tts_tid);
/*
if (shouldFree)
pfree(tuple);
- /*
- * If the tuple has been concurrently updated, then get the lock on it.
- * (Do only if caller asked for this by setting the
- * TABLE_MODIFY_LOCK_UPDATED option) With the lock held retry of the
- * update should succeed even if there are more concurrent update
- * attempts.
- */
- if (result == TM_Updated && (options & TABLE_MODIFY_LOCK_UPDATED))
- {
- /*
- * heapam_tuple_lock() will take advantage of tuple loaded into
- * oldSlot by heap_update().
- */
- result = heapam_tuple_lock(relation, otid, snapshot,
- oldSlot, cid, *lockmode,
- (options & TABLE_MODIFY_WAIT) ?
- LockWaitBlock :
- LockWaitSkip,
- TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
- tmfd);
-
- if (result == TM_Ok)
- return TM_Updated;
- }
-
return result;
}
{
BufferHeapTupleTableSlot *bslot = (BufferHeapTupleTableSlot *) slot;
TM_Result result;
+ Buffer buffer;
HeapTuple tuple = &bslot->base.tupdata;
bool follow_updates;
Assert(TTS_IS_BUFFERTUPLE(slot));
tuple_lock_retry:
- result = heap_lock_tuple(relation, tid, slot, cid, mode, wait_policy,
- follow_updates, tmfd);
+ tuple->t_self = *tid;
+ result = heap_lock_tuple(relation, tuple, cid, mode, wait_policy,
+ follow_updates, &buffer, tmfd);
if (result == TM_Updated &&
(flags & TUPLE_LOCK_FLAG_FIND_LAST_VERSION))
/* Should not encounter speculative tuple on recheck */
Assert(!HeapTupleHeaderIsSpeculative(tuple->t_data));
+ ReleaseBuffer(buffer);
+
if (!ItemPointerEquals(&tmfd->ctid, &tuple->t_self))
{
SnapshotData SnapshotDirty;
InitDirtySnapshot(SnapshotDirty);
for (;;)
{
- Buffer buffer = InvalidBuffer;
-
if (ItemPointerIndicatesMovedPartitions(tid))
ereport(ERROR,
(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
/*
* This is a live tuple, so try to lock it again.
*/
- ExecStorePinnedBufferHeapTuple(tuple, slot, buffer);
+ ReleaseBuffer(buffer);
goto tuple_lock_retry;
}
*/
if (tuple->t_data == NULL)
{
- ReleaseBuffer(buffer);
+ Assert(!BufferIsValid(buffer));
return TM_Deleted;
}
slot->tts_tableOid = RelationGetRelid(relation);
tuple->t_tableOid = slot->tts_tableOid;
+ /* store in slot, transferring existing pin */
+ ExecStorePinnedBufferHeapTuple(tuple, slot, buffer);
+
return result;
}
* via ereport().
*/
void
-simple_table_tuple_delete(Relation rel, ItemPointer tid, Snapshot snapshot,
- TupleTableSlot *oldSlot)
+simple_table_tuple_delete(Relation rel, ItemPointer tid, Snapshot snapshot)
{
TM_Result result;
TM_FailureData tmfd;
- int options = TABLE_MODIFY_WAIT; /* wait for commit */
-
- /* Fetch old tuple if the relevant slot is provided */
- if (oldSlot)
- options |= TABLE_MODIFY_FETCH_OLD_TUPLE;
result = table_tuple_delete(rel, tid,
GetCurrentCommandId(true),
snapshot, InvalidSnapshot,
- options,
- &tmfd, false /* changingPart */ ,
- oldSlot);
+ true /* wait for commit */ ,
+ &tmfd, false /* changingPart */ );
switch (result)
{
simple_table_tuple_update(Relation rel, ItemPointer otid,
TupleTableSlot *slot,
Snapshot snapshot,
- TU_UpdateIndexes *update_indexes,
- TupleTableSlot *oldSlot)
+ TU_UpdateIndexes *update_indexes)
{
TM_Result result;
TM_FailureData tmfd;
LockTupleMode lockmode;
- int options = TABLE_MODIFY_WAIT; /* wait for commit */
-
- /* Fetch old tuple if the relevant slot is provided */
- if (oldSlot)
- options |= TABLE_MODIFY_FETCH_OLD_TUPLE;
result = table_tuple_update(rel, otid, slot,
GetCurrentCommandId(true),
snapshot, InvalidSnapshot,
- options,
- &tmfd, &lockmode, update_indexes,
- oldSlot);
+ true /* wait for commit */ ,
+ &tmfd, &lockmode, update_indexes);
switch (result)
{
void
ExecARDeleteTriggers(EState *estate,
ResultRelInfo *relinfo,
+ ItemPointer tupleid,
HeapTuple fdw_trigtuple,
- TupleTableSlot *slot,
TransitionCaptureState *transition_capture,
bool is_crosspart_update)
{
if ((trigdesc && trigdesc->trig_delete_after_row) ||
(transition_capture && transition_capture->tcs_delete_old_table))
{
- /*
- * Put the FDW old tuple to the slot. Otherwise, the caller is
- * expected to have an old tuple already fetched to the slot.
- */
- if (fdw_trigtuple != NULL)
+ TupleTableSlot *slot = ExecGetTriggerOldSlot(estate, relinfo);
+
+ Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
+ if (fdw_trigtuple == NULL)
+ GetTupleForTrigger(estate,
+ NULL,
+ relinfo,
+ tupleid,
+ LockTupleExclusive,
+ slot,
+ NULL,
+ NULL,
+ NULL);
+ else
ExecForceStoreHeapTuple(fdw_trigtuple, slot, false);
AfterTriggerSaveEvent(estate, relinfo, NULL, NULL,
* Note: 'src_partinfo' and 'dst_partinfo', when non-NULL, refer to the source
* and destination partitions, respectively, of a cross-partition update of
* the root partitioned table mentioned in the query, given by 'relinfo'.
- * 'oldslot' contains the "old" tuple in the source partition, and 'newslot'
- * contains the "new" tuple in the destination partition. This interface
- * allows to support the requirements of ExecCrossPartitionUpdateForeignKey();
- * is_crosspart_update must be true in that case.
+ * 'tupleid' in that case refers to the ctid of the "old" tuple in the source
+ * partition, and 'newslot' contains the "new" tuple in the destination
+ * partition. This interface allows to support the requirements of
+ * ExecCrossPartitionUpdateForeignKey(); is_crosspart_update must be true in
+ * that case.
*/
void
ExecARUpdateTriggers(EState *estate, ResultRelInfo *relinfo,
ResultRelInfo *src_partinfo,
ResultRelInfo *dst_partinfo,
+ ItemPointer tupleid,
HeapTuple fdw_trigtuple,
- TupleTableSlot *oldslot,
TupleTableSlot *newslot,
List *recheckIndexes,
TransitionCaptureState *transition_capture,
* separately for DELETE and INSERT to capture transition table rows.
* In such case, either old tuple or new tuple can be NULL.
*/
+ TupleTableSlot *oldslot;
+ ResultRelInfo *tupsrc;
+
Assert((src_partinfo != NULL && dst_partinfo != NULL) ||
!is_crosspart_update);
- if (fdw_trigtuple != NULL)
- {
- Assert(oldslot);
+ tupsrc = src_partinfo ? src_partinfo : relinfo;
+ oldslot = ExecGetTriggerOldSlot(estate, tupsrc);
+
+ if (fdw_trigtuple == NULL && ItemPointerIsValid(tupleid))
+ GetTupleForTrigger(estate,
+ NULL,
+ tupsrc,
+ tupleid,
+ LockTupleExclusive,
+ oldslot,
+ NULL,
+ NULL,
+ NULL);
+ else if (fdw_trigtuple != NULL)
ExecForceStoreHeapTuple(fdw_trigtuple, oldslot, false);
- }
+ else
+ ExecClearTuple(oldslot);
AfterTriggerSaveEvent(estate, relinfo,
src_partinfo, dst_partinfo,
{
List *recheckIndexes = NIL;
TU_UpdateIndexes update_indexes;
- TupleTableSlot *oldSlot = NULL;
/* Compute stored generated columns */
if (rel->rd_att->constr &&
if (rel->rd_rel->relispartition)
ExecPartitionCheck(resultRelInfo, slot, estate, true);
- if (resultRelInfo->ri_TrigDesc &&
- resultRelInfo->ri_TrigDesc->trig_update_after_row)
- oldSlot = ExecGetTriggerOldSlot(estate, resultRelInfo);
-
simple_table_tuple_update(rel, tid, slot, estate->es_snapshot,
- &update_indexes, oldSlot);
+ &update_indexes);
if (resultRelInfo->ri_NumIndices > 0 && (update_indexes != TU_None))
recheckIndexes = ExecInsertIndexTuples(resultRelInfo,
/* AFTER ROW UPDATE Triggers */
ExecARUpdateTriggers(estate, resultRelInfo,
NULL, NULL,
- NULL, oldSlot, slot,
+ tid, NULL, slot,
recheckIndexes, NULL, false);
list_free(recheckIndexes);
if (!skip_tuple)
{
- TupleTableSlot *oldSlot = NULL;
-
- if (resultRelInfo->ri_TrigDesc &&
- resultRelInfo->ri_TrigDesc->trig_delete_after_row)
- oldSlot = ExecGetTriggerOldSlot(estate, resultRelInfo);
-
/* OK, delete the tuple */
- simple_table_tuple_delete(rel, tid, estate->es_snapshot, oldSlot);
+ simple_table_tuple_delete(rel, tid, estate->es_snapshot);
/* AFTER ROW DELETE Triggers */
ExecARDeleteTriggers(estate, resultRelInfo,
- NULL, oldSlot, NULL, false);
+ tid, NULL, NULL, false);
}
}
table_slot_create(resultRelInfo->ri_RelationDesc,
&estate->es_tupleTable);
- /*
- * In the ON CONFLICT UPDATE case, we will also need a slot for the old
- * tuple to calculate the updated tuple on its base.
- */
- if (node->onConflictAction == ONCONFLICT_UPDATE)
- resultRelInfo->ri_oldTupleSlot =
- table_slot_create(resultRelInfo->ri_RelationDesc,
- &estate->es_tupleTable);
-
/* Build ProjectionInfo if needed (it probably isn't). */
if (need_projection)
{
ExecARUpdateTriggers(estate, resultRelInfo,
NULL, NULL,
NULL,
- resultRelInfo->ri_oldTupleSlot,
+ NULL,
slot,
NULL,
mtstate->mt_transition_capture,
*/
static TM_Result
ExecDeleteAct(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
- ItemPointer tupleid, bool changingPart, int options,
- TupleTableSlot *oldSlot)
+ ItemPointer tupleid, bool changingPart)
{
EState *estate = context->estate;
estate->es_output_cid,
estate->es_snapshot,
estate->es_crosscheck_snapshot,
- options,
+ true /* wait for commit */ ,
&context->tmfd,
- changingPart,
- oldSlot);
+ changingPart);
}
/*
* Closing steps of tuple deletion; this invokes AFTER FOR EACH ROW triggers,
* including the UPDATE triggers if the deletion is being done as part of a
* cross-partition tuple move.
- *
- * The old tuple is already fetched into ‘slot’ for regular tables. For FDW,
- * the old tuple is given as 'oldtuple' and is to be stored in 'slot' when
- * needed.
*/
static void
ExecDeleteEpilogue(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
- ItemPointer tupleid, HeapTuple oldtuple,
- TupleTableSlot *slot, bool changingPart)
+ ItemPointer tupleid, HeapTuple oldtuple, bool changingPart)
{
ModifyTableState *mtstate = context->mtstate;
EState *estate = context->estate;
{
ExecARUpdateTriggers(estate, resultRelInfo,
NULL, NULL,
- oldtuple,
- slot, NULL, NULL, mtstate->mt_transition_capture,
+ tupleid, oldtuple,
+ NULL, NULL, mtstate->mt_transition_capture,
false);
/*
}
/* AFTER ROW DELETE Triggers */
- ExecARDeleteTriggers(estate, resultRelInfo, oldtuple, slot,
+ ExecARDeleteTriggers(estate, resultRelInfo, tupleid, oldtuple,
ar_delete_trig_tcs, changingPart);
}
-/*
- * Initializes the tuple slot in a ResultRelInfo for DELETE action.
- *
- * We mark 'projectNewInfoValid' even though the projections themselves
- * are not initialized here.
- */
-static void
-ExecInitDeleteTupleSlot(ModifyTableState *mtstate,
- ResultRelInfo *resultRelInfo)
-{
- EState *estate = mtstate->ps.state;
-
- Assert(!resultRelInfo->ri_projectNewInfoValid);
-
- resultRelInfo->ri_oldTupleSlot =
- table_slot_create(resultRelInfo->ri_RelationDesc,
- &estate->es_tupleTable);
- resultRelInfo->ri_projectNewInfoValid = true;
-}
-
/* ----------------------------------------------------------------
* ExecDelete
*
* part of an UPDATE of partition-key, then the slot returned by
* EvalPlanQual() is passed back using output parameter epqreturnslot.
*
- * Returns RETURNING result if any, otherwise NULL. The deleted tuple
- * to be stored into oldslot independently that.
+ * Returns RETURNING result if any, otherwise NULL.
* ----------------------------------------------------------------
*/
static TupleTableSlot *
ResultRelInfo *resultRelInfo,
ItemPointer tupleid,
HeapTuple oldtuple,
- TupleTableSlot *oldslot,
bool processReturning,
bool changingPart,
bool canSetTag,
}
else
{
- int options = TABLE_MODIFY_WAIT | TABLE_MODIFY_FETCH_OLD_TUPLE;
-
- /*
- * Specify that we need to lock and fetch the last tuple version for
- * EPQ on appropriate transaction isolation levels.
- */
- if (!IsolationUsesXactSnapshot())
- options |= TABLE_MODIFY_LOCK_UPDATED;
-
/*
* delete the tuple
*
* transaction-snapshot mode transactions.
*/
ldelete:
- result = ExecDeleteAct(context, resultRelInfo, tupleid, changingPart,
- options, oldslot);
+ result = ExecDeleteAct(context, resultRelInfo, tupleid, changingPart);
if (tmresult)
*tmresult = result;
case TM_Updated:
{
+ TupleTableSlot *inputslot;
TupleTableSlot *epqslot;
if (IsolationUsesXactSnapshot())
errmsg("could not serialize access due to concurrent update")));
/*
- * We need to do EPQ. The latest tuple is already found
- * and locked as a result of TABLE_MODIFY_LOCK_UPDATED.
+ * Already know that we're going to need to do EPQ, so
+ * fetch tuple directly into the right slot.
*/
- Assert(context->tmfd.traversed);
- epqslot = EvalPlanQual(context->epqstate,
- resultRelationDesc,
- resultRelInfo->ri_RangeTableIndex,
- oldslot);
- if (TupIsNull(epqslot))
- /* Tuple not passing quals anymore, exiting... */
- return NULL;
+ EvalPlanQualBegin(context->epqstate);
+ inputslot = EvalPlanQualSlot(context->epqstate, resultRelationDesc,
+ resultRelInfo->ri_RangeTableIndex);
- /*
- * If requested, skip delete and pass back the updated
- * row.
- */
- if (epqreturnslot)
+ result = table_tuple_lock(resultRelationDesc, tupleid,
+ estate->es_snapshot,
+ inputslot, estate->es_output_cid,
+ LockTupleExclusive, LockWaitBlock,
+ TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
+ &context->tmfd);
+
+ switch (result)
{
- *epqreturnslot = epqslot;
- return NULL;
+ case TM_Ok:
+ Assert(context->tmfd.traversed);
+ epqslot = EvalPlanQual(context->epqstate,
+ resultRelationDesc,
+ resultRelInfo->ri_RangeTableIndex,
+ inputslot);
+ if (TupIsNull(epqslot))
+ /* Tuple not passing quals anymore, exiting... */
+ return NULL;
+
+ /*
+ * If requested, skip delete and pass back the
+ * updated row.
+ */
+ if (epqreturnslot)
+ {
+ *epqreturnslot = epqslot;
+ return NULL;
+ }
+ else
+ goto ldelete;
+
+ case TM_SelfModified:
+
+ /*
+ * This can be reached when following an update
+ * chain from a tuple updated by another session,
+ * reaching a tuple that was already updated in
+ * this transaction. If previously updated by this
+ * command, ignore the delete, otherwise error
+ * out.
+ *
+ * See also TM_SelfModified response to
+ * table_tuple_delete() above.
+ */
+ if (context->tmfd.cmax != estate->es_output_cid)
+ ereport(ERROR,
+ (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
+ errmsg("tuple to be deleted was already modified by an operation triggered by the current command"),
+ errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
+ return NULL;
+
+ case TM_Deleted:
+ /* tuple already deleted; nothing to do */
+ return NULL;
+
+ default:
+
+ /*
+ * TM_Invisible should be impossible because we're
+ * waiting for updated row versions, and would
+ * already have errored out if the first version
+ * is invisible.
+ *
+ * TM_Updated should be impossible, because we're
+ * locking the latest version via
+ * TUPLE_LOCK_FLAG_FIND_LAST_VERSION.
+ */
+ elog(ERROR, "unexpected table_tuple_lock status: %u",
+ result);
+ return NULL;
}
- else
- goto ldelete;
+
+ Assert(false);
+ break;
}
case TM_Deleted:
if (tupleDeleted)
*tupleDeleted = true;
- ExecDeleteEpilogue(context, resultRelInfo, tupleid, oldtuple,
- oldslot, changingPart);
+ ExecDeleteEpilogue(context, resultRelInfo, tupleid, oldtuple, changingPart);
/* Process RETURNING if present and if requested */
if (processReturning && resultRelInfo->ri_projectReturning)
}
else
{
- /* Copy old tuple to the returning slot */
slot = ExecGetReturningSlot(estate, resultRelInfo);
if (oldtuple != NULL)
+ {
ExecForceStoreHeapTuple(oldtuple, slot, false);
+ }
else
- ExecCopySlot(slot, oldslot);
- Assert(!TupIsNull(slot));
+ {
+ if (!table_tuple_fetch_row_version(resultRelationDesc, tupleid,
+ SnapshotAny, slot))
+ elog(ERROR, "failed to fetch deleted tuple for DELETE RETURNING");
+ }
}
rslot = ExecProcessReturning(resultRelInfo, slot, context->planSlot);
MemoryContextSwitchTo(oldcxt);
}
- /*
- * Make sure ri_oldTupleSlot is initialized. The old tuple is to be saved
- * there by ExecDelete() to save effort on further re-fetching.
- */
- if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
- ExecInitUpdateProjection(mtstate, resultRelInfo);
-
/*
* Row movement, part 1. Delete the tuple, but skip RETURNING processing.
* We want to return rows from INSERT.
*/
ExecDelete(context, resultRelInfo,
- tupleid, oldtuple, resultRelInfo->ri_oldTupleSlot,
+ tupleid, oldtuple,
false, /* processReturning */
true, /* changingPart */
false, /* canSetTag */
return true;
else
{
- /*
- * ExecDelete already fetches the most recent version of old tuple
- * to resultRelInfo->ri_oldTupleSlot. So, just project the new
- * tuple to retry the UPDATE with.
- */
+ /* Fetch the most recent version of old tuple. */
+ TupleTableSlot *oldSlot;
+
+ /* ... but first, make sure ri_oldTupleSlot is initialized. */
+ if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
+ ExecInitUpdateProjection(mtstate, resultRelInfo);
+ oldSlot = resultRelInfo->ri_oldTupleSlot;
+ if (!table_tuple_fetch_row_version(resultRelInfo->ri_RelationDesc,
+ tupleid,
+ SnapshotAny,
+ oldSlot))
+ elog(ERROR, "failed to fetch tuple being updated");
+ /* and project the new tuple to retry the UPDATE with */
*retry_slot = ExecGetUpdateNewTuple(resultRelInfo, epqslot,
- resultRelInfo->ri_oldTupleSlot);
+ oldSlot);
return false;
}
}
static TM_Result
ExecUpdateAct(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot,
- bool canSetTag, int options, TupleTableSlot *oldSlot,
- UpdateContext *updateCxt)
+ bool canSetTag, UpdateContext *updateCxt)
{
EState *estate = context->estate;
Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
ExecCrossPartitionUpdateForeignKey(context,
resultRelInfo,
insert_destrel,
- tupleid,
- resultRelInfo->ri_oldTupleSlot,
+ tupleid, slot,
inserted_tuple);
return TM_Ok;
estate->es_output_cid,
estate->es_snapshot,
estate->es_crosscheck_snapshot,
- options /* wait for commit */ ,
+ true /* wait for commit */ ,
&context->tmfd, &updateCxt->lockmode,
- &updateCxt->updateIndexes,
- oldSlot);
+ &updateCxt->updateIndexes);
return result;
}
static void
ExecUpdateEpilogue(ModifyTableContext *context, UpdateContext *updateCxt,
ResultRelInfo *resultRelInfo, ItemPointer tupleid,
- HeapTuple oldtuple, TupleTableSlot *slot,
- TupleTableSlot *oldslot)
+ HeapTuple oldtuple, TupleTableSlot *slot)
{
ModifyTableState *mtstate = context->mtstate;
List *recheckIndexes = NIL;
/* AFTER ROW UPDATE Triggers */
ExecARUpdateTriggers(context->estate, resultRelInfo,
NULL, NULL,
- oldtuple, oldslot, slot,
+ tupleid, oldtuple, slot,
recheckIndexes,
mtstate->operation == CMD_INSERT ?
mtstate->mt_oc_transition_capture :
/* Perform the root table's triggers. */
ExecARUpdateTriggers(context->estate,
rootRelInfo, sourcePartInfo, destPartInfo,
- NULL, oldslot, newslot, NIL, NULL, true);
+ tupleid, NULL, newslot, NIL, NULL, true);
}
/* ----------------------------------------------------------------
* no relevant triggers.
*
* slot contains the new tuple value to be stored.
- * oldslot is the slot to store the old tuple.
* planSlot is the output of the ModifyTable's subplan; we use it
* to access values from other input tables (for RETURNING),
* row-ID junk columns, etc.
static TupleTableSlot *
ExecUpdate(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot,
- TupleTableSlot *oldslot, bool canSetTag, bool locked)
+ bool canSetTag)
{
EState *estate = context->estate;
Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
}
else
{
- int options = TABLE_MODIFY_WAIT | TABLE_MODIFY_FETCH_OLD_TUPLE;
-
- /*
- * Specify that we need to lock and fetch the last tuple version for
- * EPQ on appropriate transaction isolation levels if the tuple isn't
- * locked already.
- */
- if (!locked && !IsolationUsesXactSnapshot())
- options |= TABLE_MODIFY_LOCK_UPDATED;
-
/*
* If we generate a new candidate tuple after EvalPlanQual testing, we
* must loop back here to try again. (We don't need to redo triggers,
*/
redo_act:
result = ExecUpdateAct(context, resultRelInfo, tupleid, oldtuple, slot,
- canSetTag, options, oldslot, &updateCxt);
+ canSetTag, &updateCxt);
/*
* If ExecUpdateAct reports that a cross-partition update was done,
case TM_Updated:
{
+ TupleTableSlot *inputslot;
TupleTableSlot *epqslot;
+ TupleTableSlot *oldSlot;
if (IsolationUsesXactSnapshot())
ereport(ERROR,
(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
errmsg("could not serialize access due to concurrent update")));
- /* Shouldn't get there if the tuple was previously locked */
- Assert(!locked);
-
/*
- * We need to do EPQ. The latest tuple is already found
- * and locked as a result of TABLE_MODIFY_LOCK_UPDATED.
+ * Already know that we're going to need to do EPQ, so
+ * fetch tuple directly into the right slot.
*/
- Assert(context->tmfd.traversed);
- epqslot = EvalPlanQual(context->epqstate,
- resultRelationDesc,
- resultRelInfo->ri_RangeTableIndex,
- oldslot);
- if (TupIsNull(epqslot))
- /* Tuple not passing quals anymore, exiting... */
- return NULL;
- slot = ExecGetUpdateNewTuple(resultRelInfo,
- epqslot,
- oldslot);
- goto redo_act;
+ inputslot = EvalPlanQualSlot(context->epqstate, resultRelationDesc,
+ resultRelInfo->ri_RangeTableIndex);
+
+ result = table_tuple_lock(resultRelationDesc, tupleid,
+ estate->es_snapshot,
+ inputslot, estate->es_output_cid,
+ updateCxt.lockmode, LockWaitBlock,
+ TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
+ &context->tmfd);
+
+ switch (result)
+ {
+ case TM_Ok:
+ Assert(context->tmfd.traversed);
+
+ epqslot = EvalPlanQual(context->epqstate,
+ resultRelationDesc,
+ resultRelInfo->ri_RangeTableIndex,
+ inputslot);
+ if (TupIsNull(epqslot))
+ /* Tuple not passing quals anymore, exiting... */
+ return NULL;
+
+ /* Make sure ri_oldTupleSlot is initialized. */
+ if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
+ ExecInitUpdateProjection(context->mtstate,
+ resultRelInfo);
+
+ /* Fetch the most recent version of old tuple. */
+ oldSlot = resultRelInfo->ri_oldTupleSlot;
+ if (!table_tuple_fetch_row_version(resultRelationDesc,
+ tupleid,
+ SnapshotAny,
+ oldSlot))
+ elog(ERROR, "failed to fetch tuple being updated");
+ slot = ExecGetUpdateNewTuple(resultRelInfo,
+ epqslot, oldSlot);
+ goto redo_act;
+
+ case TM_Deleted:
+ /* tuple already deleted; nothing to do */
+ return NULL;
+
+ case TM_SelfModified:
+
+ /*
+ * This can be reached when following an update
+ * chain from a tuple updated by another session,
+ * reaching a tuple that was already updated in
+ * this transaction. If previously modified by
+ * this command, ignore the redundant update,
+ * otherwise error out.
+ *
+ * See also TM_SelfModified response to
+ * table_tuple_update() above.
+ */
+ if (context->tmfd.cmax != estate->es_output_cid)
+ ereport(ERROR,
+ (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
+ errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
+ errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
+ return NULL;
+
+ default:
+ /* see table_tuple_lock call in ExecDelete() */
+ elog(ERROR, "unexpected table_tuple_lock status: %u",
+ result);
+ return NULL;
+ }
}
break;
(estate->es_processed)++;
ExecUpdateEpilogue(context, &updateCxt, resultRelInfo, tupleid, oldtuple,
- slot, oldslot);
+ slot);
/* Process RETURNING if present */
if (resultRelInfo->ri_projectReturning)
*returning = ExecUpdate(context, resultRelInfo,
conflictTid, NULL,
resultRelInfo->ri_onConflict->oc_ProjSlot,
- existing,
- canSetTag, true);
+ canSetTag);
/*
* Clear out existing tuple, as there might not be another conflict among
{
result = ExecUpdateAct(context, resultRelInfo, tupleid,
NULL, newslot, canSetTag,
- TABLE_MODIFY_WAIT, NULL,
&updateCxt);
/*
if (result == TM_Ok)
{
ExecUpdateEpilogue(context, &updateCxt, resultRelInfo,
- tupleid, NULL, newslot,
- resultRelInfo->ri_oldTupleSlot);
+ tupleid, NULL, newslot);
mtstate->mt_merge_updated += 1;
}
break;
}
else
result = ExecDeleteAct(context, resultRelInfo, tupleid,
- false, TABLE_MODIFY_WAIT, NULL);
+ false);
if (result == TM_Ok)
{
ExecDeleteEpilogue(context, resultRelInfo, tupleid, NULL,
- resultRelInfo->ri_oldTupleSlot, false);
+ false);
mtstate->mt_merge_deleted += 1;
}
break;
/* Now apply the update. */
slot = ExecUpdate(&context, resultRelInfo, tupleid, oldtuple,
- slot, resultRelInfo->ri_oldTupleSlot,
- node->canSetTag, false);
+ slot, node->canSetTag);
break;
case CMD_DELETE:
- /* Initialize slot for DELETE to fetch the old tuple */
- if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
- ExecInitDeleteTupleSlot(node, resultRelInfo);
-
slot = ExecDelete(&context, resultRelInfo, tupleid, oldtuple,
- resultRelInfo->ri_oldTupleSlot, true, false,
- node->canSetTag, NULL, NULL, NULL);
+ true, false, node->canSetTag, NULL, NULL, NULL);
break;
case CMD_MERGE:
int ntuples, CommandId cid, int options,
BulkInsertState bistate);
extern TM_Result heap_delete(Relation relation, ItemPointer tid,
- CommandId cid, Snapshot crosscheck, int options,
- struct TM_FailureData *tmfd, bool changingPart,
- TupleTableSlot *oldSlot);
+ CommandId cid, Snapshot crosscheck, bool wait,
+ struct TM_FailureData *tmfd, bool changingPart);
extern void heap_finish_speculative(Relation relation, ItemPointer tid);
extern void heap_abort_speculative(Relation relation, ItemPointer tid);
extern TM_Result heap_update(Relation relation, ItemPointer otid,
HeapTuple newtup,
- CommandId cid, Snapshot crosscheck, int options,
+ CommandId cid, Snapshot crosscheck, bool wait,
struct TM_FailureData *tmfd, LockTupleMode *lockmode,
- TU_UpdateIndexes *update_indexes,
- TupleTableSlot *oldSlot);
-extern TM_Result heap_lock_tuple(Relation relation, ItemPointer tid,
- TupleTableSlot *slot,
- CommandId cid, LockTupleMode mode,
- LockWaitPolicy wait_policy, bool follow_updates,
- struct TM_FailureData *tmfd);
+ TU_UpdateIndexes *update_indexes);
+extern TM_Result heap_lock_tuple(Relation relation, HeapTuple tuple,
+ CommandId cid, LockTupleMode mode, LockWaitPolicy wait_policy,
+ bool follow_updates,
+ Buffer *buffer, struct TM_FailureData *tmfd);
extern void heap_inplace_update(Relation relation, HeapTuple tuple);
extern bool heap_prepare_freeze_tuple(HeapTupleHeader tuple,
/* Follow update chain and lock latest version of tuple */
#define TUPLE_LOCK_FLAG_FIND_LAST_VERSION (1 << 1)
-/*
- * "options" flag bits for table_tuple_update and table_tuple_delete,
- * Wait for any conflicting update to commit/abort */
-#define TABLE_MODIFY_WAIT 0x0001
-/* Fetch the existing tuple into a dedicated slot */
-#define TABLE_MODIFY_FETCH_OLD_TUPLE 0x0002
-/* On concurrent update, follow the update chain and lock latest version of tuple */
-#define TABLE_MODIFY_LOCK_UPDATED 0x0004
-
/* Typedef for callback function for table_index_build_scan */
typedef void (*IndexBuildCallback) (Relation index,
CommandId cid,
Snapshot snapshot,
Snapshot crosscheck,
- int options,
+ bool wait,
TM_FailureData *tmfd,
- bool changingPart,
- TupleTableSlot *oldSlot);
+ bool changingPart);
/* see table_tuple_update() for reference about parameters */
TM_Result (*tuple_update) (Relation rel,
CommandId cid,
Snapshot snapshot,
Snapshot crosscheck,
- int options,
+ bool wait,
TM_FailureData *tmfd,
LockTupleMode *lockmode,
- TU_UpdateIndexes *update_indexes,
- TupleTableSlot *oldSlot);
+ TU_UpdateIndexes *update_indexes);
/* see table_tuple_lock() for reference about parameters */
TM_Result (*tuple_lock) (Relation rel,
}
/*
- * Delete a tuple (and optionally lock the last tuple version).
+ * Delete a tuple.
*
* NB: do not call this directly unless prepared to deal with
* concurrent-update conditions. Use simple_table_tuple_delete instead.
* cid - delete command ID (used for visibility test, and stored into
* cmax if successful)
* crosscheck - if not InvalidSnapshot, also check tuple against this
- * options:
- * If TABLE_MODIFY_WAIT, wait for any conflicting update to commit/abort.
- * If TABLE_MODIFY_FETCH_OLD_TUPLE option is given, the existing tuple is
- * fetched into oldSlot when the update is successful.
- * If TABLE_MODIFY_LOCK_UPDATED option is given and the tuple is
- * concurrently updated, then the last tuple version is locked and fetched
- * into oldSlot.
- *
+ * wait - true if should wait for any conflicting update to commit/abort
* Output parameters:
* tmfd - filled in failure cases (see below)
* changingPart - true iff the tuple is being moved to another partition
* table due to an update of the partition key. Otherwise, false.
- * oldSlot - slot to save the deleted or locked tuple. Can be NULL if none of
- * TABLE_MODIFY_FETCH_OLD_TUPLE or TABLE_MODIFY_LOCK_UPDATED options
- * is specified.
*
* Normal, successful return value is TM_Ok, which means we did actually
* delete it. Failure return codes are TM_SelfModified, TM_Updated, and
*/
static inline TM_Result
table_tuple_delete(Relation rel, ItemPointer tid, CommandId cid,
- Snapshot snapshot, Snapshot crosscheck, int options,
- TM_FailureData *tmfd, bool changingPart,
- TupleTableSlot *oldSlot)
+ Snapshot snapshot, Snapshot crosscheck, bool wait,
+ TM_FailureData *tmfd, bool changingPart)
{
return rel->rd_tableam->tuple_delete(rel, tid, cid,
snapshot, crosscheck,
- options, tmfd, changingPart,
- oldSlot);
+ wait, tmfd, changingPart);
}
/*
- * Update a tuple (and optionally lock the last tuple version).
+ * Update a tuple.
*
* NB: do not call this directly unless you are prepared to deal with
* concurrent-update conditions. Use simple_table_tuple_update instead.
* cid - update command ID (used for visibility test, and stored into
* cmax/cmin if successful)
* crosscheck - if not InvalidSnapshot, also check old tuple against this
- * options:
- * If TABLE_MODIFY_WAIT, wait for any conflicting update to commit/abort.
- * If TABLE_MODIFY_FETCH_OLD_TUPLE option is given, the existing tuple is
- * fetched into oldSlot when the update is successful.
- * If TABLE_MODIFY_LOCK_UPDATED option is given and the tuple is
- * concurrently updated, then the last tuple version is locked and fetched
- * into oldSlot.
- *
+ * wait - true if should wait for any conflicting update to commit/abort
* Output parameters:
* tmfd - filled in failure cases (see below)
* lockmode - filled with lock mode acquired on tuple
* update_indexes - in success cases this is set to true if new index entries
* are required for this tuple
- * oldSlot - slot to save the deleted or locked tuple. Can be NULL if none of
- * TABLE_MODIFY_FETCH_OLD_TUPLE or TABLE_MODIFY_LOCK_UPDATED options
- * is specified.
-
+ *
* Normal, successful return value is TM_Ok, which means we did actually
* update it. Failure return codes are TM_SelfModified, TM_Updated, and
* TM_BeingModified (the last only possible if wait == false).
static inline TM_Result
table_tuple_update(Relation rel, ItemPointer otid, TupleTableSlot *slot,
CommandId cid, Snapshot snapshot, Snapshot crosscheck,
- int options, TM_FailureData *tmfd, LockTupleMode *lockmode,
- TU_UpdateIndexes *update_indexes,
- TupleTableSlot *oldSlot)
+ bool wait, TM_FailureData *tmfd, LockTupleMode *lockmode,
+ TU_UpdateIndexes *update_indexes)
{
return rel->rd_tableam->tuple_update(rel, otid, slot,
cid, snapshot, crosscheck,
- options, tmfd,
- lockmode, update_indexes,
- oldSlot);
+ wait, tmfd,
+ lockmode, update_indexes);
}
/*
extern void simple_table_tuple_insert(Relation rel, TupleTableSlot *slot);
extern void simple_table_tuple_delete(Relation rel, ItemPointer tid,
- Snapshot snapshot,
- TupleTableSlot *oldSlot);
+ Snapshot snapshot);
extern void simple_table_tuple_update(Relation rel, ItemPointer otid,
TupleTableSlot *slot, Snapshot snapshot,
- TU_UpdateIndexes *update_indexes,
- TupleTableSlot *oldSlot);
+ TU_UpdateIndexes *update_indexes);
/* ----------------------------------------------------------------------------
TM_FailureData *tmfd);
extern void ExecARDeleteTriggers(EState *estate,
ResultRelInfo *relinfo,
+ ItemPointer tupleid,
HeapTuple fdw_trigtuple,
- TupleTableSlot *slot,
TransitionCaptureState *transition_capture,
bool is_crosspart_update);
extern bool ExecIRDeleteTriggers(EState *estate,
ResultRelInfo *relinfo,
ResultRelInfo *src_partinfo,
ResultRelInfo *dst_partinfo,
+ ItemPointer tupleid,
HeapTuple fdw_trigtuple,
- TupleTableSlot *oldslot,
TupleTableSlot *newslot,
List *recheckIndexes,
TransitionCaptureState *transition_capture,