Page metapage;
/* Construct metapage. */
- metapage = (Page) palloc(BLCKSZ);
+ metapage = (Page) palloc_io_aligned(BLCKSZ, 0);
BloomFillMetapage(index, metapage);
/*
* Write an empty page as a placeholder for the root page. It will be
* replaced with the real root page at the end.
*/
- page = palloc0(BLCKSZ);
+ page = palloc_io_aligned(BLCKSZ, MCXT_ALLOC_ZERO);
RelationOpenSmgr(state->indexrel);
smgrextend(state->indexrel->rd_smgr, MAIN_FORKNUM, GIST_ROOT_BLKNO,
page, true);
if (parent == NULL)
{
parent = palloc(sizeof(GistSortedBuildPageState));
- parent->page = (Page) palloc(BLCKSZ);
+ parent->page = (Page) palloc_io_aligned(BLCKSZ, 0);
parent->parent = NULL;
gistinitpage(parent->page, 0);
gist_indexsortbuild_pagestate_add(state, parent, union_tuple);
/* Re-initialize the page buffer for next page on this level. */
- pagestate->page = palloc(BLCKSZ);
+ pagestate->page = palloc_io_aligned(BLCKSZ, 0);
gistinitpage(pagestate->page, isleaf ? F_LEAF : 0);
/*
{
GISTNodeBufferPage *pageBuffer;
- pageBuffer = (GISTNodeBufferPage *) MemoryContextAllocZero(gfbb->context,
- BLCKSZ);
+ pageBuffer = (GISTNodeBufferPage *)
+ MemoryContextAllocIOAligned(gfbb->context,
+ BLCKSZ, MCXT_ALLOC_ZERO);
pageBuffer->prev = InvalidBlockNumber;
/* Set page free space */
state->rs_old_rel = old_heap;
state->rs_new_rel = new_heap;
- state->rs_buffer = (Page) palloc(BLCKSZ);
+ state->rs_buffer = (Page) palloc_io_aligned(BLCKSZ, 0);
/* new_heap needn't be empty, just locked */
state->rs_blockno = RelationGetNumberOfBlocks(new_heap);
state->rs_buffer_valid = false;
Page metapage;
/* Construct metapage. */
- metapage = (Page) palloc(BLCKSZ);
+ metapage = (Page) palloc_io_aligned(BLCKSZ, 0);
_bt_initmetapage(metapage, P_NONE, 0, _bt_allequalimage(index, false));
/*
Page page;
BTPageOpaque opaque;
- page = (Page) palloc(BLCKSZ);
+ page = (Page) palloc_io_aligned(BLCKSZ, 0);
/* Zero the page and set up standard page header info */
_bt_pageinit(page, BLCKSZ);
while (blkno > wstate->btws_pages_written)
{
if (!wstate->btws_zeropage)
- wstate->btws_zeropage = (Page) palloc0(BLCKSZ);
+ wstate->btws_zeropage =
+ (Page) palloc_io_aligned(BLCKSZ, MCXT_ALLOC_ZERO);
+
/* don't set checksum for all-zero page */
smgrextend(wstate->index->rd_smgr, MAIN_FORKNUM,
wstate->btws_pages_written++,
* set to point to "P_NONE"). This changes the index to the "valid" state
* by filling in a valid magic number in the metapage.
*/
- metapage = (Page) palloc(BLCKSZ);
+ metapage = (Page) palloc_io_aligned(BLCKSZ, 0);
_bt_initmetapage(metapage, rootblkno, rootlevel,
wstate->inskey->allequalimage);
_bt_blwritepage(wstate, metapage, BTREE_METAPAGE);
Page page;
/* Construct metapage. */
- page = (Page) palloc(BLCKSZ);
+ page = (Page) palloc_io_aligned(BLCKSZ, 0);
SpGistInitMetapage(page);
/*
&foundDescs);
BufferBlocks = (char *)
- ShmemInitStruct("Buffer Blocks",
- NBuffers * (Size) BLCKSZ, &foundBufs);
+ TYPEALIGN(BLCKSZ,
+ ShmemInitStruct("Buffer Blocks",
+ (NBuffers + 1) * (Size) BLCKSZ, &foundBufs));
/* Align lwlocks to cacheline boundary */
BufferIOCVArray = (ConditionVariableMinimallyPadded *)
size = add_size(size, PG_CACHE_LINE_SIZE);
/* size of data pages */
+ /* to allow aligning buffer blocks */
+ size = add_size(size, BLCKSZ);
size = add_size(size, mul_size(NBuffers, BLCKSZ));
/* size of stuff controlled by freelist.c */
/* And don't overflow MaxAllocSize, either */
num_bufs = Min(num_bufs, MaxAllocSize / BLCKSZ);
- cur_block = (char *) MemoryContextAlloc(LocalBufferContext,
- num_bufs * BLCKSZ);
+ cur_block = (char *) MemoryContextAllocIOAligned(LocalBufferContext,
+ num_bufs * BLCKSZ, 0);
next_buf_in_block = 0;
num_bufs_in_block = num_bufs;
}
* and second to avoid wasting space in processes that never call this.
*/
if (pageCopy == NULL)
- pageCopy = MemoryContextAlloc(TopMemoryContext, BLCKSZ);
+ pageCopy = MemoryContextAllocIOAligned(TopMemoryContext, BLCKSZ, 0);
memcpy(pageCopy, (char *) page, BLCKSZ);
((PageHeader) pageCopy)->pd_checksum = pg_checksum_page(pageCopy, blkno);
int nbytes;
MdfdVec *v;
+ AssertPointerAlignment(buffer, 4096);
+
/* This assert is too expensive to have on normally ... */
#ifdef CHECK_WRITE_VS_EXTEND
Assert(blocknum >= mdnblocks(reln, forknum));
int nbytes;
MdfdVec *v;
+ AssertPointerAlignment(buffer, 4096);
+
TRACE_POSTGRESQL_SMGR_MD_READ_START(forknum, blocknum,
reln->smgr_rnode.node.spcNode,
reln->smgr_rnode.node.dbNode,
int nbytes;
MdfdVec *v;
+ AssertPointerAlignment(buffer, 4096);
+
/* This assert is too expensive to have on normally ... */
#ifdef CHECK_WRITE_VS_EXTEND
Assert(blocknum < mdnblocks(reln, forknum));
*/
if (nblocks < ((BlockNumber) RELSEG_SIZE))
{
- char *zerobuf = palloc0(BLCKSZ);
+ char *zerobuf = palloc_io_aligned(BLCKSZ, MCXT_ALLOC_ZERO);
mdextend(reln, forknum,
nextsegno * ((BlockNumber) RELSEG_SIZE) - 1,
static void MemoryContextStatsPrint(MemoryContext context, void *passthru,
const char *stats_string);
+static void AlignedAllocFree(MemoryContext context, void *pointer);
+
+
+static const MemoryContextMethods AlignedRedirectMethods = {
+ .free_p = AlignedAllocFree,
+};
+
+static const MemoryContextData AlignedRedirectContext = {
+ .type = T_AlignedAllocRedirectContext,
+ .methods = &AlignedRedirectMethods
+};
+
/*
* You should not do memory allocations within a critical section, because
* an out-of-memory error will be escalated to a PANIC. To enforce that
MemoryContext context = GetMemoryChunkContext(pointer);
context->methods->free_p(context, pointer);
- VALGRIND_MEMPOOL_FREE(context, pointer);
+
+ if (context != &AlignedRedirectContext)
+ VALGRIND_MEMPOOL_FREE(context, pointer);
}
/*
n--;
return pnstrdup(in, n);
}
+
+static void
+AlignedAllocFree(MemoryContext context, void *pointer)
+{
+ void *unaligned;
+
+ unaligned = (void *)*(uintptr_t*)((char *) pointer - sizeof(uintptr_t) - sizeof(uintptr_t));
+
+ pfree(unaligned);
+}
+
+void *
+MemoryContextAllocAligned(MemoryContext context,
+ Size size, Size alignto, int flags)
+{
+ Size constant_overhead =
+ sizeof(uintptr_t) /* pointer to fake memory context */
+ + sizeof(uintptr_t) /* pointer to actual allocation */
+ ;
+
+ Size alloc_size;
+ void *unaligned;
+ void *aligned;
+
+ /* wouldn't make much sense to waste that much space */
+ AssertArg(alignto < (128 * 1024 * 1024));
+
+ if (alignto < MAXIMUM_ALIGNOF)
+ return palloc_extended(size, flags);
+
+ /* allocate enough space for alignment padding */
+ alloc_size = size + alignto + constant_overhead;
+
+ unaligned = MemoryContextAllocExtended(context, alloc_size, flags);
+
+ aligned = (char *) unaligned + constant_overhead;
+ aligned = (void *) TYPEALIGN(alignto, aligned);
+
+ Assert((uintptr_t)aligned - (uintptr_t)unaligned >= constant_overhead);
+
+ *(uintptr_t*)((char *)aligned - sizeof(uintptr_t)) = (uintptr_t) &AlignedRedirectContext;
+ *(uintptr_t*)((char *)aligned - sizeof(uintptr_t) - sizeof(uintptr_t)) = (uintptr_t) unaligned;
+
+ /* XXX: should we adjust valgrind state here? */
+ return aligned;
+}
+
+void *
+MemoryContextAllocIOAligned(MemoryContext context, Size size, int flags)
+{
+ // FIXME: don't hardcode page size
+ return MemoryContextAllocAligned(context, size, 4096, flags);
+}
+
+void *
+palloc_aligned(Size size, Size alignto, int flags)
+{
+ return MemoryContextAllocAligned(CurrentMemoryContext, size, alignto, flags);
+}
+
+void *
+palloc_io_aligned(Size size, int flags)
+{
+ return MemoryContextAllocIOAligned(CurrentMemoryContext, size, flags);
+}
*
* Add new context types to the set accepted by this macro.
*/
-#define MemoryContextIsValid(context) \
+#define MemoryContextIsValid(context) \
((context) != NULL && \
(IsA((context), AllocSetContext) || \
IsA((context), SlabContext) || \
- IsA((context), GenerationContext)))
+ IsA((context), GenerationContext) || \
+ IsA((context), AlignedAllocRedirectContext)))
#endif /* MEMNODES_H */
T_AllocSetContext,
T_SlabContext,
T_GenerationContext,
+ T_AlignedAllocRedirectContext,
/*
* TAGS FOR VALUE NODES (value.h)
extern void *MemoryContextAllocZeroAligned(MemoryContext context, Size size);
extern void *MemoryContextAllocExtended(MemoryContext context,
Size size, int flags);
+extern void *MemoryContextAllocAligned(MemoryContext context,
+ Size size, Size alignto, int flags);
+extern void *MemoryContextAllocIOAligned(MemoryContext context, Size size, int flags);
extern void *palloc(Size size);
extern void *palloc0(Size size);
extern void *palloc_extended(Size size, int flags);
+extern void *palloc_aligned(Size size, Size alignto, int flags);
+extern void *palloc_io_aligned(Size size, int flags);
extern pg_nodiscard void *repalloc(void *pointer, Size size);
extern void pfree(void *pointer);