BufferDescPadded *BufferDescriptors;
char *BufferBlocks;
-LWLockMinimallyPadded *BufferIOLWLockArray = NULL;
+ConditionVariableMinimallyPadded *BufferIOCVArray = NULL;
WritebackContext BackendWritebackContext;
CkptSortItem *CkptBufferIds;
{
bool foundBufs,
foundDescs,
- foundIOLocks,
+ foundIOCV,
foundBufCkpt;
/* Align descriptors to a cacheline boundary. */
NBuffers * (Size) BLCKSZ, &foundBufs);
/* Align lwlocks to cacheline boundary */
- BufferIOLWLockArray = (LWLockMinimallyPadded *)
- ShmemInitStruct("Buffer IO Locks",
- NBuffers * (Size) sizeof(LWLockMinimallyPadded),
- &foundIOLocks);
+ BufferIOCVArray = (ConditionVariableMinimallyPadded *)
+ ShmemInitStruct("Buffer IO Condition Variables",
+ NBuffers * (Size) sizeof(ConditionVariableMinimallyPadded),
+ &foundIOCV);
/*
* The array used to sort to-be-checkpointed buffer ids is located in
ShmemInitStruct("Checkpoint BufferIds",
NBuffers * sizeof(CkptSortItem), &foundBufCkpt);
- if (foundDescs || foundBufs || foundIOLocks || foundBufCkpt)
+ if (foundDescs || foundBufs || foundIOCV || foundBufCkpt)
{
/* should find all of these, or none of them */
- Assert(foundDescs && foundBufs && foundIOLocks && foundBufCkpt);
+ Assert(foundDescs && foundBufs && foundIOCV && foundBufCkpt);
/* note: this path is only taken in EXEC_BACKEND case */
}
else
LWLockInitialize(BufferDescriptorGetContentLock(buf),
LWTRANCHE_BUFFER_CONTENT);
- LWLockInitialize(BufferDescriptorGetIOLock(buf),
- LWTRANCHE_BUFFER_IO);
+ ConditionVariableInit(BufferDescriptorGetIOCV(buf));
}
/* Correct last entry of linked list */
LWLockRelease(newPartitionLock);
/*
- * Buffer contents are currently invalid. Try to get the io_in_progress
- * lock. If StartBufferIO returns false, then someone else managed to
+ * Buffer contents are currently invalid. Try to obtain the right to start
+ * I/O. If StartBufferIO returns false, then someone else managed to
* read it before we did, so there's nothing left for BufferAlloc() to do.
*/
if (StartBufferIO(buf, true))
*/
VALGRIND_MAKE_MEM_NOACCESS(BufHdrGetBlock(buf), BLCKSZ);
- /* I'd better not still hold any locks on the buffer */
+ /* I'd better not still hold the buffer content lock */
Assert(!LWLockHeldByMe(BufferDescriptorGetContentLock(buf)));
- Assert(!LWLockHeldByMe(BufferDescriptorGetIOLock(buf)));
/*
* Decrement the shared reference count.
uint32 buf_state;
/*
- * Acquire the buffer's io_in_progress lock. If StartBufferIO returns
- * false, then someone else flushed the buffer before we could, so we need
- * not do anything.
+ * Try to start an I/O operation. If StartBufferIO returns false, then
+ * someone else flushed the buffer before we could, so we need not do
+ * anything.
*/
if (!StartBufferIO(buf, false))
return;
/*
* Now it's safe to write buffer to disk. Note that no one else should
* have been able to write it while we were busy with log flushing because
- * we have the io_in_progress lock.
+ * only one process at a time can set the BM_IO_IN_PROGRESS bit.
*/
bufBlock = BufHdrGetBlock(buf);
/*
* Mark the buffer as clean (unless BM_JUST_DIRTIED has become set) and
- * end the io_in_progress state.
+ * end the BM_IO_IN_PROGRESS state.
*/
TerminateBufferIO(buf, true, 0);
* Functions for buffer I/O handling
*
* Note: We assume that nested buffer I/O never occurs.
- * i.e at most one io_in_progress lock is held per proc.
+ * i.e at most one BM_IO_IN_PROGRESS bit is set per proc.
*
* Also note that these are used only for shared buffers, not local ones.
*/
static void
WaitIO(BufferDesc *buf)
{
- /*
- * Changed to wait until there's no IO - Inoue 01/13/2000
- *
- * Note this is *necessary* because an error abort in the process doing
- * I/O could release the io_in_progress_lock prematurely. See
- * AbortBufferIO.
- */
+ ConditionVariable *cv = BufferDescriptorGetIOCV(buf);
+
+ ConditionVariablePrepareToSleep(cv);
+
for (;;)
{
uint32 buf_state;
if (!(buf_state & BM_IO_IN_PROGRESS))
break;
- LWLockAcquire(BufferDescriptorGetIOLock(buf), LW_SHARED);
- LWLockRelease(BufferDescriptorGetIOLock(buf));
+ ConditionVariableSleep(cv, WAIT_EVENT_BUFFILE_WAITIO);
}
+ ConditionVariableCancelSleep();
}
/*
* In some scenarios there are race conditions in which multiple backends
* could attempt the same I/O operation concurrently. If someone else
* has already started I/O on this buffer then we will block on the
- * io_in_progress lock until he's done.
+ * I/O condition variable until he's done.
*
* Input operations are only attempted on buffers that are not BM_VALID,
* and output operations only on buffers that are BM_VALID and BM_DIRTY,
for (;;)
{
- /*
- * Grab the io_in_progress lock so that other processes can wait for
- * me to finish the I/O.
- */
- LWLockAcquire(BufferDescriptorGetIOLock(buf), LW_EXCLUSIVE);
-
buf_state = LockBufHdr(buf);
if (!(buf_state & BM_IO_IN_PROGRESS))
break;
-
- /*
- * The only way BM_IO_IN_PROGRESS could be set when the io_in_progress
- * lock isn't held is if the process doing the I/O is recovering from
- * an error (see AbortBufferIO). If that's the case, we must wait for
- * him to get unwedged.
- */
UnlockBufHdr(buf, buf_state);
- LWLockRelease(BufferDescriptorGetIOLock(buf));
WaitIO(buf);
}
{
/* someone else already did the I/O */
UnlockBufHdr(buf, buf_state);
- LWLockRelease(BufferDescriptorGetIOLock(buf));
return false;
}
* (Assumptions)
* My process is executing IO for the buffer
* BM_IO_IN_PROGRESS bit is set for the buffer
- * We hold the buffer's io_in_progress lock
* The buffer is Pinned
*
* If clear_dirty is true and BM_JUST_DIRTIED is not set, we clear the
InProgressBuf = NULL;
- LWLockRelease(BufferDescriptorGetIOLock(buf));
+ ConditionVariableBroadcast(BufferDescriptorGetIOCV(buf));
}
/*
{
uint32 buf_state;
- /*
- * Since LWLockReleaseAll has already been called, we're not holding
- * the buffer's io_in_progress_lock. We have to re-acquire it so that
- * we can use TerminateBufferIO. Anyone who's executing WaitIO on the
- * buffer will be in a busy spin until we succeed in doing this.
- */
- LWLockAcquire(BufferDescriptorGetIOLock(buf), LW_EXCLUSIVE);
-
buf_state = LockBufHdr(buf);
Assert(buf_state & BM_IO_IN_PROGRESS);
if (IsForInput)