Submitting IO in larger batches can be more efficient than doing so
one-by-one, particularly for many small reads. It does, however, require
the ReadStreamBlockNumberCB callback to abide by the restrictions of AIO
batching (c.f. pgaio_enter_batchmode()). Basically, the callback may not:
a) block without first calling pgaio_submit_staged(), unless a
to-be-waited-on lock cannot be part of a deadlock, e.g. because it is
never held while waiting for IO.
b) directly or indirectly start another batch pgaio_enter_batchmode()
As this requires care and is nontrivial in some cases, batching is only
used with explicit opt-in.
This patch adds an explicit flag (READ_STREAM_USE_BATCHING) to read_stream and
uses it where appropriate.
There are two cases where batching would likely be beneficial, but where we
aren't using it yet:
1) bitmap heap scans, because the callback reads the VM
This should soon be solved, because we are planning to remove the use of
the VM, due to that not being sound.
2) The first phase of heap vacuum
This could be made to support batchmode, but would require some care.
Reviewed-by: Noah Misch <[email protected]>
Reviewed-by: Thomas Munro <[email protected]>
Discussion: https://postgr.es/m/uvrtrknj4kdytuboidbhwclo4gxhswwcpgadptsjvjqcluzmah%40brqs62irg4dt
if (skip_option == SKIP_PAGES_NONE)
{
+ /*
+ * It is safe to use batchmode as block_range_read_stream_cb takes no
+ * locks.
+ */
stream_cb = block_range_read_stream_cb;
- stream_flags = READ_STREAM_SEQUENTIAL | READ_STREAM_FULL;
+ stream_flags = READ_STREAM_SEQUENTIAL |
+ READ_STREAM_FULL |
+ READ_STREAM_USE_BATCHING;
stream_data = &stream_skip_data.range;
}
else
{
+ /*
+ * It would not be safe to naively use use batchmode, as
+ * heapcheck_read_stream_next_unskippable takes locks. It shouldn't be
+ * too hard to convert though.
+ */
stream_cb = heapcheck_read_stream_next_unskippable;
stream_flags = READ_STREAM_DEFAULT;
stream_data = &stream_skip_data;
p.current_blocknum = first_block;
p.last_exclusive = last_block + 1;
- stream = read_stream_begin_relation(READ_STREAM_FULL,
+ /*
+ * It is safe to use batchmode as block_range_read_stream_cb takes no
+ * locks.
+ */
+ stream = read_stream_begin_relation(READ_STREAM_FULL |
+ READ_STREAM_USE_BATCHING,
NULL,
rel,
forkNumber,
{
p.current_blocknum = 0;
p.last_exclusive = nblocks;
- stream = read_stream_begin_relation(READ_STREAM_FULL,
+
+ /*
+ * It is safe to use batchmode as block_range_read_stream_cb takes no
+ * locks.
+ */
+ stream = read_stream_begin_relation(READ_STREAM_FULL |
+ READ_STREAM_USE_BATCHING,
bstrategy,
rel,
MAIN_FORKNUM,
needLock = !RELATION_IS_LOCAL(rel);
p.current_blocknum = GIST_ROOT_BLKNO;
- stream = read_stream_begin_relation(READ_STREAM_FULL,
+
+ /*
+ * It is safe to use batchmode as block_range_read_stream_cb takes no
+ * locks.
+ */
+ stream = read_stream_begin_relation(READ_STREAM_FULL |
+ READ_STREAM_USE_BATCHING,
info->strategy,
rel,
MAIN_FORKNUM,
else
cb = heap_scan_stream_read_next_serial;
- scan->rs_read_stream = read_stream_begin_relation(READ_STREAM_SEQUENTIAL,
+ /* ---
+ * It is safe to use batchmode as the only locks taken by `cb`
+ * are never taken while waiting for IO:
+ * - SyncScanLock is used in the non-parallel case
+ * - in the parallel case, only spinlocks and atomics are used
+ * ---
+ */
+ scan->rs_read_stream = read_stream_begin_relation(READ_STREAM_SEQUENTIAL |
+ READ_STREAM_USE_BATCHING,
scan->rs_strategy,
scan->rs_base.rs_rd,
MAIN_FORKNUM,
}
else if (scan->rs_base.rs_flags & SO_TYPE_BITMAPSCAN)
{
+ /*
+ * Currently we can't trivially use batching, due to the
+ * VM_ALL_VISIBLE check in bitmapheap_stream_read_next. While that
+ * could be made safe, we are about to remove the all-visible logic
+ * from bitmap scans due to its unsoundness.
+ */
scan->rs_read_stream = read_stream_begin_relation(READ_STREAM_DEFAULT,
scan->rs_strategy,
scan->rs_base.rs_rd,
vacrel->next_unskippable_eager_scanned = false;
vacrel->next_unskippable_vmbuffer = InvalidBuffer;
- /* Set up the read stream for vacuum's first pass through the heap */
+ /*
+ * Set up the read stream for vacuum's first pass through the heap.
+ *
+ * This could be made safe for READ_STREAM_USE_BATCHING, but only with
+ * explicit work in heap_vac_scan_next_block.
+ */
stream = read_stream_begin_relation(READ_STREAM_MAINTENANCE,
vacrel->bstrategy,
vacrel->rel,
* Read stream callback for vacuum's third phase (second pass over the heap).
* Gets the next block from the TID store and returns it or InvalidBlockNumber
* if there are no further blocks to vacuum.
+ *
+ * NB: Assumed to be safe to use with READ_STREAM_USE_BATCHING.
*/
static BlockNumber
vacuum_reap_lp_read_stream_next(ReadStream *stream,
iter = TidStoreBeginIterate(vacrel->dead_items);
- /* Set up the read stream for vacuum's second pass through the heap */
- stream = read_stream_begin_relation(READ_STREAM_MAINTENANCE,
+ /*
+ * Set up the read stream for vacuum's second pass through the heap.
+ *
+ * It is safe to use batchmode, as vacuum_reap_lp_read_stream_next() does
+ * not need to wait for IO and does not perform locking. Once we support
+ * parallelism it should still be fine, as presumably the holder of locks
+ * would never be blocked by IO while holding the lock.
+ */
+ stream = read_stream_begin_relation(READ_STREAM_MAINTENANCE |
+ READ_STREAM_USE_BATCHING,
vacrel->bstrategy,
vacrel->rel,
MAIN_FORKNUM,
needLock = !RELATION_IS_LOCAL(rel);
p.current_blocknum = BTREE_METAPAGE + 1;
- stream = read_stream_begin_relation(READ_STREAM_FULL,
+
+ /*
+ * It is safe to use batchmode as block_range_read_stream_cb takes no
+ * locks.
+ */
+ stream = read_stream_begin_relation(READ_STREAM_FULL |
+ READ_STREAM_USE_BATCHING,
info->strategy,
rel,
MAIN_FORKNUM,
/* We can skip locking for new or temp relations */
needLock = !RELATION_IS_LOCAL(index);
p.current_blocknum = SPGIST_METAPAGE_BLKNO + 1;
- stream = read_stream_begin_relation(READ_STREAM_FULL,
+
+ /*
+ * It is safe to use batchmode as block_range_read_stream_cb takes no
+ * locks.
+ */
+ stream = read_stream_begin_relation(READ_STREAM_FULL |
+ READ_STREAM_USE_BATCHING,
bds->info->strategy,
index,
MAIN_FORKNUM,
scan = table_beginscan_analyze(onerel);
slot = table_slot_create(onerel, NULL);
- stream = read_stream_begin_relation(READ_STREAM_MAINTENANCE,
+ /*
+ * It is safe to use batching, as block_sampling_read_stream_next never
+ * blocks.
+ */
+ stream = read_stream_begin_relation(READ_STREAM_MAINTENANCE |
+ READ_STREAM_USE_BATCHING,
vac_strategy,
scan->rs_rd,
MAIN_FORKNUM,
int16 initialized_buffers;
int read_buffers_flags;
bool sync_mode; /* using io_method=sync */
+ bool batch_mode; /* READ_STREAM_USE_BATCHING */
bool advice_enabled;
bool temporary;
static void
read_stream_look_ahead(ReadStream *stream)
{
+ /*
+ * Allow amortizing the cost of submitting IO over multiple IOs. This
+ * requires that we don't do any operations that could lead to a deadlock
+ * with staged-but-unsubmitted IO. The callback needs to opt-in to being
+ * careful.
+ */
+ if (stream->batch_mode)
+ pgaio_enter_batchmode();
+
while (stream->ios_in_progress < stream->max_ios &&
stream->pinned_buffers + stream->pending_read_nblocks < stream->distance)
{
{
/* We've hit the buffer or I/O limit. Rewind and stop here. */
read_stream_unget_block(stream, blocknum);
+ if (stream->batch_mode)
+ pgaio_exit_batchmode();
return;
}
}
* time.
*/
Assert(stream->pinned_buffers > 0 || stream->distance == 0);
+
+ if (stream->batch_mode)
+ pgaio_exit_batchmode();
}
/*
MAXALIGN(&stream->ios[Max(1, max_ios)]);
stream->sync_mode = io_method == IOMETHOD_SYNC;
+ stream->batch_mode = flags & READ_STREAM_USE_BATCHING;
#ifdef USE_PREFETCH
p.current_blocknum = 0;
p.last_exclusive = nblocks;
src_smgr = smgropen(srclocator, INVALID_PROC_NUMBER);
- src_stream = read_stream_begin_smgr_relation(READ_STREAM_FULL,
+
+ /*
+ * It is safe to use batchmode as block_range_read_stream_cb takes no
+ * locks.
+ */
+ src_stream = read_stream_begin_smgr_relation(READ_STREAM_FULL |
+ READ_STREAM_USE_BATCHING,
bstrategy_src,
src_smgr,
permanent ? RELPERSISTENCE_PERMANENT : RELPERSISTENCE_UNLOGGED,
*/
#define READ_STREAM_FULL 0x04
+/* ---
+ * Opt-in to using AIO batchmode.
+ *
+ * Submitting IO in larger batches can be more efficient than doing so
+ * one-by-one, particularly for many small reads. It does, however, require
+ * the ReadStreamBlockNumberCB callback to abide by the restrictions of AIO
+ * batching (c.f. pgaio_enter_batchmode()). Basically, the callback may not:
+ *
+ * a) block without first calling pgaio_submit_staged(), unless a
+ * to-be-waited-on lock cannot be part of a deadlock, e.g. because it is
+ * never held while waiting for IO.
+ *
+ * b) start another batch (without first exiting batchmode and re-entering
+ * before returning)
+ *
+ * As this requires care and is nontrivial in some cases, batching is only
+ * used with explicit opt-in.
+ * ---
+ */
+#define READ_STREAM_USE_BATCHING 0x08
+
struct ReadStream;
typedef struct ReadStream ReadStream;