Assert(BlockNumberIsValid(blockNum));
/* create a tag so we can lookup the buffer */
- INIT_BUFFERTAG(newTag, smgr_reln->smgr_rlocator.locator,
+ InitBufferTag(&newTag, &smgr_reln->smgr_rlocator.locator,
forkNum, blockNum);
/* determine its hash code and partition lock ID */
ResourceOwnerEnlargeBuffers(CurrentResourceOwner);
ReservePrivateRefCountEntry();
- INIT_BUFFERTAG(tag, rlocator, forkNum, blockNum);
+ InitBufferTag(&tag, &rlocator, forkNum, blockNum);
if (BufferIsLocal(recent_buffer))
{
buf_state = pg_atomic_read_u32(&bufHdr->state);
/* Is it still valid and holding the right tag? */
- if ((buf_state & BM_VALID) && BUFFERTAGS_EQUAL(tag, bufHdr->tag))
+ if ((buf_state & BM_VALID) && BufferTagsEqual(&tag, &bufHdr->tag))
{
/*
* Bump buffer's ref and usage counts. This is equivalent of
else
buf_state = LockBufHdr(bufHdr);
- if ((buf_state & BM_VALID) && BUFFERTAGS_EQUAL(tag, bufHdr->tag))
+ if ((buf_state & BM_VALID) && BufferTagsEqual(&tag, &bufHdr->tag))
{
/*
* It's now safe to pin the buffer. We can't pin first and ask
uint32 buf_state;
/* create a tag so we can lookup the buffer */
- INIT_BUFFERTAG(newTag, smgr->smgr_rlocator.locator, forkNum, blockNum);
+ InitBufferTag(&newTag, &smgr->smgr_rlocator.locator, forkNum, blockNum);
/* determine its hash code and partition lock ID */
newHash = BufTableHashCode(&newTag);
buf_state = LockBufHdr(buf);
/* If it's changed while we were waiting for lock, do nothing */
- if (!BUFFERTAGS_EQUAL(buf->tag, oldTag))
+ if (!BufferTagsEqual(&buf->tag, &oldTag))
{
UnlockBufHdr(buf, buf_state);
LWLockRelease(oldPartitionLock);
* linear scans of the buffer array don't think the buffer is valid.
*/
oldFlags = buf_state & BUF_FLAG_MASK;
- CLEAR_BUFFERTAG(buf->tag);
+ ClearBufferTag(&buf->tag);
buf_state &= ~(BUF_FLAG_MASK | BUF_USAGECOUNT_MASK);
UnlockBufHdr(buf, buf_state);
uint32 buf_state;
/* create a tag so we can lookup the buffer */
- INIT_BUFFERTAG(bufTag, rlocator, forkNum, curBlock);
+ InitBufferTag(&bufTag, &rlocator, forkNum, curBlock);
/* determine its hash code and partition lock ID */
bufHash = BufTableHashCode(&bufTag);
BufferTag newTag; /* identity of requested block */
LocalBufferLookupEnt *hresult;
- INIT_BUFFERTAG(newTag, smgr->smgr_rlocator.locator, forkNum, blockNum);
+ InitBufferTag(&newTag, &smgr->smgr_rlocator.locator, forkNum, blockNum);
/* Initialize local buffers if first request in this session */
if (LocalBufHash == NULL)
bool found;
uint32 buf_state;
- INIT_BUFFERTAG(newTag, smgr->smgr_rlocator.locator, forkNum, blockNum);
+ InitBufferTag(&newTag, &smgr->smgr_rlocator.locator, forkNum, blockNum);
/* Initialize local buffers if first request in this session */
if (LocalBufHash == NULL)
{
b = hresult->id;
bufHdr = GetLocalBufferDescriptor(b);
- Assert(BUFFERTAGS_EQUAL(bufHdr->tag, newTag));
+ Assert(BufferTagsEqual(&bufHdr->tag, &newTag));
#ifdef LBDEBUG
fprintf(stderr, "LB ALLOC (%u,%d,%d) %d\n",
smgr->smgr_rlocator.locator.relNumber, forkNum, blockNum, -b - 1);
if (!hresult) /* shouldn't happen */
elog(ERROR, "local buffer hash table corrupted");
/* mark buffer invalid just in case hash insert fails */
- CLEAR_BUFFERTAG(bufHdr->tag);
+ ClearBufferTag(&bufHdr->tag);
buf_state &= ~(BM_VALID | BM_TAG_VALID);
pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
}
if (!hresult) /* shouldn't happen */
elog(ERROR, "local buffer hash table corrupted");
/* Mark buffer invalid */
- CLEAR_BUFFERTAG(bufHdr->tag);
+ ClearBufferTag(&bufHdr->tag);
buf_state &= ~BUF_FLAG_MASK;
buf_state &= ~BUF_USAGECOUNT_MASK;
pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
if (!hresult) /* shouldn't happen */
elog(ERROR, "local buffer hash table corrupted");
/* Mark buffer invalid */
- CLEAR_BUFFERTAG(bufHdr->tag);
+ ClearBufferTag(&bufHdr->tag);
buf_state &= ~BUF_FLAG_MASK;
buf_state &= ~BUF_USAGECOUNT_MASK;
pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
* relation is visible yet (its xact may have started before the xact that
* created the rel). The storage manager must be able to cope anyway.
*
- * Note: if there's any pad bytes in the struct, INIT_BUFFERTAG will have
+ * Note: if there's any pad bytes in the struct, InitBufferTag will have
* to be fixed to zero them, since this struct is used as a hash key.
*/
typedef struct buftag
BlockNumber blockNum; /* blknum relative to begin of reln */
} BufferTag;
-#define CLEAR_BUFFERTAG(a) \
-( \
- (a).rlocator.spcOid = InvalidOid, \
- (a).rlocator.dbOid = InvalidOid, \
- (a).rlocator.relNumber = InvalidRelFileNumber, \
- (a).forkNum = InvalidForkNumber, \
- (a).blockNum = InvalidBlockNumber \
-)
-
-#define INIT_BUFFERTAG(a,xx_rlocator,xx_forkNum,xx_blockNum) \
-( \
- (a).rlocator = (xx_rlocator), \
- (a).forkNum = (xx_forkNum), \
- (a).blockNum = (xx_blockNum) \
-)
-
-#define BUFFERTAGS_EQUAL(a,b) \
-( \
- RelFileLocatorEquals((a).rlocator, (b).rlocator) && \
- (a).blockNum == (b).blockNum && \
- (a).forkNum == (b).forkNum \
-)
+static inline void
+ClearBufferTag(BufferTag *tag)
+{
+ tag->rlocator.spcOid = InvalidOid;
+ tag->rlocator.dbOid = InvalidOid;
+ tag->rlocator.relNumber = InvalidRelFileNumber;
+ tag->forkNum = InvalidForkNumber;
+ tag->blockNum = InvalidBlockNumber;
+}
+
+static inline void
+InitBufferTag(BufferTag *tag, const RelFileLocator *rlocator,
+ ForkNumber forkNum, BlockNumber blockNum)
+{
+ tag->rlocator = *rlocator;
+ tag->forkNum = forkNum;
+ tag->blockNum = blockNum;
+}
+
+static inline bool
+BufferTagsEqual(const BufferTag *tag1, const BufferTag *tag2)
+{
+ return RelFileLocatorEquals(tag1->rlocator, tag2->rlocator) &&
+ (tag1->blockNum == tag2->blockNum) &&
+ (tag1->forkNum == tag2->forkNum);
+}
/*
* The shared buffer mapping table is partitioned to reduce contention.
* hash code with BufTableHashCode(), then apply BufMappingPartitionLock().
* NB: NUM_BUFFER_PARTITIONS must be a power of 2!
*/
-#define BufTableHashPartition(hashcode) \
- ((hashcode) % NUM_BUFFER_PARTITIONS)
-#define BufMappingPartitionLock(hashcode) \
- (&MainLWLockArray[BUFFER_MAPPING_LWLOCK_OFFSET + \
- BufTableHashPartition(hashcode)].lock)
-#define BufMappingPartitionLockByIndex(i) \
- (&MainLWLockArray[BUFFER_MAPPING_LWLOCK_OFFSET + (i)].lock)
+static inline uint32
+BufTableHashPartition(uint32 hashcode)
+{
+ return hashcode % NUM_BUFFER_PARTITIONS;
+}
+
+static inline LWLock *
+BufMappingPartitionLock(uint32 hashcode)
+{
+ return &MainLWLockArray[BUFFER_MAPPING_LWLOCK_OFFSET +
+ BufTableHashPartition(hashcode)].lock;
+}
+
+static inline LWLock *
+BufMappingPartitionLockByIndex(uint32 index)
+{
+ return &MainLWLockArray[BUFFER_MAPPING_LWLOCK_OFFSET + index].lock;
+}
/*
* BufferDesc -- shared descriptor/state data for a single shared buffer.
char pad[BUFFERDESC_PAD_TO_SIZE];
} BufferDescPadded;
-#define GetBufferDescriptor(id) (&BufferDescriptors[(id)].bufferdesc)
-#define GetLocalBufferDescriptor(id) (&LocalBufferDescriptors[(id)])
-
-#define BufferDescriptorGetBuffer(bdesc) ((bdesc)->buf_id + 1)
-
-#define BufferDescriptorGetIOCV(bdesc) \
- (&(BufferIOCVArray[(bdesc)->buf_id]).cv)
-#define BufferDescriptorGetContentLock(bdesc) \
- ((LWLock*) (&(bdesc)->content_lock))
-
-extern PGDLLIMPORT ConditionVariableMinimallyPadded *BufferIOCVArray;
-
-/*
- * The freeNext field is either the index of the next freelist entry,
- * or one of these special values:
- */
-#define FREENEXT_END_OF_LIST (-1)
-#define FREENEXT_NOT_IN_LIST (-2)
-
-/*
- * Functions for acquiring/releasing a shared buffer header's spinlock. Do
- * not apply these to local buffers!
- */
-extern uint32 LockBufHdr(BufferDesc *desc);
-#define UnlockBufHdr(desc, s) \
- do { \
- pg_write_barrier(); \
- pg_atomic_write_u32(&(desc)->state, (s) & (~BM_LOCKED)); \
- } while (0)
-
-
/*
* The PendingWriteback & WritebackContext structure are used to keep
* information about pending flush requests to be issued to the OS.
/* in buf_init.c */
extern PGDLLIMPORT BufferDescPadded *BufferDescriptors;
+extern PGDLLIMPORT ConditionVariableMinimallyPadded *BufferIOCVArray;
extern PGDLLIMPORT WritebackContext BackendWritebackContext;
/* in localbuf.c */
extern PGDLLIMPORT BufferDesc *LocalBufferDescriptors;
+
+static inline BufferDesc *
+GetBufferDescriptor(uint32 id)
+{
+ return &(BufferDescriptors[id]).bufferdesc;
+}
+
+static inline BufferDesc *
+GetLocalBufferDescriptor(uint32 id)
+{
+ return &LocalBufferDescriptors[id];
+}
+
+static inline Buffer
+BufferDescriptorGetBuffer(const BufferDesc *bdesc)
+{
+ return (Buffer) (bdesc->buf_id + 1);
+}
+
+static inline ConditionVariable *
+BufferDescriptorGetIOCV(const BufferDesc *bdesc)
+{
+ return &(BufferIOCVArray[bdesc->buf_id]).cv;
+}
+
+static inline LWLock *
+BufferDescriptorGetContentLock(const BufferDesc *bdesc)
+{
+ return (LWLock *) (&bdesc->content_lock);
+}
+
+/*
+ * The freeNext field is either the index of the next freelist entry,
+ * or one of these special values:
+ */
+#define FREENEXT_END_OF_LIST (-1)
+#define FREENEXT_NOT_IN_LIST (-2)
+
+/*
+ * Functions for acquiring/releasing a shared buffer header's spinlock. Do
+ * not apply these to local buffers!
+ */
+extern uint32 LockBufHdr(BufferDesc *desc);
+
+static inline void
+UnlockBufHdr(BufferDesc *desc, uint32 buf_state)
+{
+ pg_write_barrier();
+ pg_atomic_write_u32(&desc->state, buf_state & (~BM_LOCKED));
+}
+
/* in bufmgr.c */
/*