Use REGBUF_NO_CHANGE at one more place in the hash index.
authorAmit Kapila <[email protected]>
Mon, 13 Nov 2023 08:38:26 +0000 (14:08 +0530)
committerAmit Kapila <[email protected]>
Mon, 13 Nov 2023 08:38:26 +0000 (14:08 +0530)
Commit 00d7fb5e2e started to use REGBUF_NO_CHANGE at a few places in the
code where we register the buffer before marking it dirty but missed
updating one of the code flows in the hash index where we free the overflow
page without any live tuples on it.

Author: Amit Kapila and Hayato Kuroda
Discussion: http://postgr.es/m/f045c8f7-ee24-ead6-3679-c04a43d21351@gmail.com

src/backend/access/hash/hash_xlog.c
src/backend/access/hash/hashovfl.c
src/test/regress/expected/hash_index.out
src/test/regress/sql/hash_index.sql

index e8e06c62a9566a22fdbca442dd5b6f7a73f16fe1..40debf402884ace2ea1238fba91c8bd60f118c34 100644 (file)
@@ -655,7 +655,10 @@ hash_xlog_squeeze_page(XLogReaderState *record)
                 */
                (void) XLogReadBufferForRedoExtended(record, 0, RBM_NORMAL, true, &bucketbuf);
 
-               action = XLogReadBufferForRedo(record, 1, &writebuf);
+               if (xldata->ntups > 0 || xldata->is_prev_bucket_same_wrt)
+                       action = XLogReadBufferForRedo(record, 1, &writebuf);
+               else
+                       action = BLK_NOTFOUND;
        }
 
        /* replay the record for adding entries in overflow buffer */
index 9d1ff20b922a7e6014b19f0fa608acf034078476..2bd4432265cb97b6786d2accf10c3ad58861d61e 100644 (file)
@@ -668,14 +668,31 @@ _hash_freeovflpage(Relation rel, Buffer bucketbuf, Buffer ovflbuf,
                        XLogRegisterBuffer(0, bucketbuf, flags);
                }
 
-               XLogRegisterBuffer(1, wbuf, REGBUF_STANDARD);
                if (xlrec.ntups > 0)
                {
+                       XLogRegisterBuffer(1, wbuf, REGBUF_STANDARD);
                        XLogRegisterBufData(1, (char *) itup_offsets,
                                                                nitups * sizeof(OffsetNumber));
                        for (i = 0; i < nitups; i++)
                                XLogRegisterBufData(1, (char *) itups[i], tups_size[i]);
                }
+               else if (xlrec.is_prim_bucket_same_wrt || xlrec.is_prev_bucket_same_wrt)
+               {
+                       uint8           wbuf_flags;
+
+                       /*
+                        * A write buffer needs to be registered even if no tuples are
+                        * added to it to ensure that we can acquire a cleanup lock on it
+                        * if it is the same as primary bucket buffer or update the
+                        * nextblkno if it is same as the previous bucket buffer.
+                        */
+                       Assert(xlrec.ntups == 0);
+
+                       wbuf_flags = REGBUF_STANDARD;
+                       if (!xlrec.is_prev_bucket_same_wrt)
+                               wbuf_flags |= REGBUF_NO_CHANGE;
+                       XLogRegisterBuffer(1, wbuf, wbuf_flags);
+               }
 
                XLogRegisterBuffer(2, ovflbuf, REGBUF_STANDARD);
 
index a2036a159721ab3aa643b94536b959ee6f528be1..0df348b5dd88245bc9854f301fa4ce3a1a9af800 100644 (file)
@@ -271,6 +271,35 @@ ALTER INDEX hash_split_index SET (fillfactor = 10);
 REINDEX INDEX hash_split_index;
 -- Clean up.
 DROP TABLE hash_split_heap;
+-- Testcases for removing overflow pages.
+CREATE TABLE hash_cleanup_heap(keycol INT);
+CREATE INDEX hash_cleanup_index on hash_cleanup_heap USING HASH (keycol);
+-- Insert tuples to both the primary bucket page and overflow pages.
+INSERT INTO hash_cleanup_heap SELECT 1 FROM generate_series(1, 500) as i;
+-- Fill overflow pages by "dead" tuples.
+BEGIN;
+INSERT INTO hash_cleanup_heap SELECT 1 FROM generate_series(1, 1000) as i;
+ROLLBACK;
+-- Checkpoint will ensure that all hash buffers are cleaned before we try
+-- to remove overflow pages.
+CHECKPOINT;
+-- This will squeeze the bucket and remove overflow pages.
+VACUUM hash_cleanup_heap;
+TRUNCATE hash_cleanup_heap;
+-- Insert a few tuples so that the primary bucket page doesn't get full and
+-- tuples can be moved to it.
+INSERT INTO hash_cleanup_heap SELECT 1 FROM generate_series(1, 50) as i;
+-- Fill overflow pages by "dead" tuples.
+BEGIN;
+INSERT INTO hash_cleanup_heap SELECT 1 FROM generate_series(1, 1500) as i;
+ROLLBACK;
+-- And insert some tuples again. During squeeze operation, these will be moved
+-- to the primary bucket allowing to test freeing intermediate overflow pages.
+INSERT INTO hash_cleanup_heap SELECT 1 FROM generate_series(1, 500) as i;
+CHECKPOINT;
+VACUUM hash_cleanup_heap;
+-- Clean up.
+DROP TABLE hash_cleanup_heap;
 -- Index on temp table.
 CREATE TEMP TABLE hash_temp_heap (x int, y int);
 INSERT INTO hash_temp_heap VALUES (1,1);
index 527024f7109265aedb61fd49c98275b5116d9514..943bd0ecf17e7baa0d1a0d5ac0ab1f82393a2d4b 100644 (file)
@@ -247,6 +247,46 @@ REINDEX INDEX hash_split_index;
 -- Clean up.
 DROP TABLE hash_split_heap;
 
+-- Testcases for removing overflow pages.
+CREATE TABLE hash_cleanup_heap(keycol INT);
+CREATE INDEX hash_cleanup_index on hash_cleanup_heap USING HASH (keycol);
+
+-- Insert tuples to both the primary bucket page and overflow pages.
+INSERT INTO hash_cleanup_heap SELECT 1 FROM generate_series(1, 500) as i;
+
+-- Fill overflow pages by "dead" tuples.
+BEGIN;
+INSERT INTO hash_cleanup_heap SELECT 1 FROM generate_series(1, 1000) as i;
+ROLLBACK;
+
+-- Checkpoint will ensure that all hash buffers are cleaned before we try
+-- to remove overflow pages.
+CHECKPOINT;
+
+-- This will squeeze the bucket and remove overflow pages.
+VACUUM hash_cleanup_heap;
+
+TRUNCATE hash_cleanup_heap;
+
+-- Insert a few tuples so that the primary bucket page doesn't get full and
+-- tuples can be moved to it.
+INSERT INTO hash_cleanup_heap SELECT 1 FROM generate_series(1, 50) as i;
+
+-- Fill overflow pages by "dead" tuples.
+BEGIN;
+INSERT INTO hash_cleanup_heap SELECT 1 FROM generate_series(1, 1500) as i;
+ROLLBACK;
+
+-- And insert some tuples again. During squeeze operation, these will be moved
+-- to the primary bucket allowing to test freeing intermediate overflow pages.
+INSERT INTO hash_cleanup_heap SELECT 1 FROM generate_series(1, 500) as i;
+
+CHECKPOINT;
+VACUUM hash_cleanup_heap;
+
+-- Clean up.
+DROP TABLE hash_cleanup_heap;
+
 -- Index on temp table.
 CREATE TEMP TABLE hash_temp_heap (x int, y int);
 INSERT INTO hash_temp_heap VALUES (1,1);