operations can use. The final limit is determined by
multiplying <varname>work_mem</varname> by
<varname>hash_mem_multiplier</varname>. The default value is
- 1.0, which makes hash-based operations subject to the same
- simple <varname>work_mem</varname> maximum as sort-based
- operations.
+ 2.0, which makes hash-based operations use twice the usual
+ <varname>work_mem</varname> base amount.
</para>
<para>
Consider increasing <varname>hash_mem_multiplier</varname> in
occurrence, especially when simply increasing
<varname>work_mem</varname> results in memory pressure (memory
pressure typically takes the form of intermittent out of
- memory errors). A setting of 1.5 or 2.0 may be effective with
+ memory errors). The default setting of 2.0 is often effective with
mixed workloads. Higher settings in the range of 2.0 - 8.0 or
more may be effective in environments where
<varname>work_mem</varname> has already been increased to 40MB
bool enableFsync = true;
bool allowSystemTableMods = false;
int work_mem = 4096;
-double hash_mem_multiplier = 1.0;
+double hash_mem_multiplier = 2.0;
int maintenance_work_mem = 65536;
int max_parallel_maintenance_workers = 2;
GUC_EXPLAIN
},
&hash_mem_multiplier,
- 1.0, 1.0, 1000.0,
+ 2.0, 1.0, 1000.0,
NULL, NULL, NULL
},
# Caution: it is not advisable to set max_prepared_transactions nonzero unless
# you actively intend to use prepared transactions.
#work_mem = 4MB # min 64kB
-#hash_mem_multiplier = 1.0 # 1-1000.0 multiplier on hash table work_mem
+#hash_mem_multiplier = 2.0 # 1-1000.0 multiplier on hash table work_mem
#maintenance_work_mem = 64MB # min 1MB
#autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem
#logical_decoding_work_mem = 64MB # min 64kB
-- test the knapsack
set enable_indexscan = false;
+set hash_mem_multiplier = 1.0;
set work_mem = '64kB';
explain (costs off)
select unique1,
from gs_data_1 group by cube (g1000, g100,g10);
set enable_sort = true;
set work_mem to default;
+set hash_mem_multiplier to default;
-- Compare results
(select * from gs_hash_1 except select * from gs_group_1)
union all
savepoint settings;
set local max_parallel_workers_per_gather = 0;
set local work_mem = '4MB';
+set local hash_mem_multiplier = 1.0;
explain (costs off)
select count(*) from simple r join simple s using (id);
QUERY PLAN
savepoint settings;
set local max_parallel_workers_per_gather = 2;
set local work_mem = '4MB';
+set local hash_mem_multiplier = 1.0;
set local enable_parallel_hash = off;
explain (costs off)
select count(*) from simple r join simple s using (id);
savepoint settings;
set local max_parallel_workers_per_gather = 2;
set local work_mem = '4MB';
+set local hash_mem_multiplier = 1.0;
set local enable_parallel_hash = on;
explain (costs off)
select count(*) from simple r join simple s using (id);
savepoint settings;
set local max_parallel_workers_per_gather = 0;
set local work_mem = '128kB';
+set local hash_mem_multiplier = 1.0;
explain (costs off)
select count(*) from simple r join simple s using (id);
QUERY PLAN
savepoint settings;
set local max_parallel_workers_per_gather = 2;
set local work_mem = '128kB';
+set local hash_mem_multiplier = 1.0;
set local enable_parallel_hash = off;
explain (costs off)
select count(*) from simple r join simple s using (id);
savepoint settings;
set local max_parallel_workers_per_gather = 2;
set local work_mem = '192kB';
+set local hash_mem_multiplier = 1.0;
set local enable_parallel_hash = on;
explain (costs off)
select count(*) from simple r join simple s using (id);
savepoint settings;
set local max_parallel_workers_per_gather = 0;
set local work_mem = '128kB';
+set local hash_mem_multiplier = 1.0;
explain (costs off)
select count(*) FROM simple r JOIN bigger_than_it_looks s USING (id);
QUERY PLAN
savepoint settings;
set local max_parallel_workers_per_gather = 2;
set local work_mem = '128kB';
+set local hash_mem_multiplier = 1.0;
set local enable_parallel_hash = off;
explain (costs off)
select count(*) from simple r join bigger_than_it_looks s using (id);
savepoint settings;
set local max_parallel_workers_per_gather = 1;
set local work_mem = '192kB';
+set local hash_mem_multiplier = 1.0;
set local enable_parallel_hash = on;
explain (costs off)
select count(*) from simple r join bigger_than_it_looks s using (id);
savepoint settings;
set local max_parallel_workers_per_gather = 0;
set local work_mem = '128kB';
+set local hash_mem_multiplier = 1.0;
explain (costs off)
select count(*) from simple r join extremely_skewed s using (id);
QUERY PLAN
savepoint settings;
set local max_parallel_workers_per_gather = 2;
set local work_mem = '128kB';
+set local hash_mem_multiplier = 1.0;
set local enable_parallel_hash = off;
explain (costs off)
select count(*) from simple r join extremely_skewed s using (id);
savepoint settings;
set local max_parallel_workers_per_gather = 1;
set local work_mem = '128kB';
+set local hash_mem_multiplier = 1.0;
set local enable_parallel_hash = on;
explain (costs off)
select count(*) from simple r join extremely_skewed s using (id);
savepoint settings;
set local max_parallel_workers_per_gather = 2;
set local work_mem = '4MB';
+set local hash_mem_multiplier = 1.0;
set local parallel_leader_participation = off;
select * from hash_join_batches(
$$
set enable_material = off;
set enable_mergejoin = off;
set work_mem = '64kB';
+set hash_mem_multiplier = 1.0;
explain (costs off)
select count(*) from join_foo
left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss
set enable_material = off;
set enable_mergejoin = off;
set work_mem = '4MB';
+set hash_mem_multiplier = 1.0;
explain (costs off)
select count(*) from join_foo
left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss
set enable_material = off;
set enable_mergejoin = off;
set work_mem = '64kB';
+set hash_mem_multiplier = 1.0;
explain (costs off)
select count(*) from join_foo
left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss
set enable_material = off;
set enable_mergejoin = off;
set work_mem = '4MB';
+set hash_mem_multiplier = 1.0;
explain (costs off)
select count(*) from join_foo
left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss
set max_parallel_workers_per_gather = 2;
set enable_parallel_hash = on;
set work_mem = '128kB';
+set hash_mem_multiplier = 1.0;
explain (costs off)
select length(max(s.t))
from wide left join (select id, coalesce(t, '') || '' as t from wide) s using (id);
1000 | 9.5000000000000000
(1 row)
--- Reduce work_mem so that we see some cache evictions
+-- Reduce work_mem and hash_mem_multiplier so that we see some cache evictions
SET work_mem TO '64kB';
+SET hash_mem_multiplier TO 1.0;
SET enable_mergejoin TO off;
-- Ensure we get some evictions. We're unable to validate the hits and misses
-- here as the number of entries that fit in the cache at once will vary
RESET enable_seqscan;
RESET enable_mergejoin;
RESET work_mem;
+RESET hash_mem_multiplier;
RESET enable_bitmapscan;
RESET enable_hashjoin;
-- Test parallel plans with Memoize
-- test the knapsack
set enable_indexscan = false;
+set hash_mem_multiplier = 1.0;
set work_mem = '64kB';
explain (costs off)
select unique1,
set enable_sort = true;
set work_mem to default;
+set hash_mem_multiplier to default;
-- Compare results
savepoint settings;
set local max_parallel_workers_per_gather = 0;
set local work_mem = '4MB';
+set local hash_mem_multiplier = 1.0;
explain (costs off)
select count(*) from simple r join simple s using (id);
select count(*) from simple r join simple s using (id);
savepoint settings;
set local max_parallel_workers_per_gather = 2;
set local work_mem = '4MB';
+set local hash_mem_multiplier = 1.0;
set local enable_parallel_hash = off;
explain (costs off)
select count(*) from simple r join simple s using (id);
savepoint settings;
set local max_parallel_workers_per_gather = 2;
set local work_mem = '4MB';
+set local hash_mem_multiplier = 1.0;
set local enable_parallel_hash = on;
explain (costs off)
select count(*) from simple r join simple s using (id);
savepoint settings;
set local max_parallel_workers_per_gather = 0;
set local work_mem = '128kB';
+set local hash_mem_multiplier = 1.0;
explain (costs off)
select count(*) from simple r join simple s using (id);
select count(*) from simple r join simple s using (id);
savepoint settings;
set local max_parallel_workers_per_gather = 2;
set local work_mem = '128kB';
+set local hash_mem_multiplier = 1.0;
set local enable_parallel_hash = off;
explain (costs off)
select count(*) from simple r join simple s using (id);
savepoint settings;
set local max_parallel_workers_per_gather = 2;
set local work_mem = '192kB';
+set local hash_mem_multiplier = 1.0;
set local enable_parallel_hash = on;
explain (costs off)
select count(*) from simple r join simple s using (id);
savepoint settings;
set local max_parallel_workers_per_gather = 0;
set local work_mem = '128kB';
+set local hash_mem_multiplier = 1.0;
explain (costs off)
select count(*) FROM simple r JOIN bigger_than_it_looks s USING (id);
select count(*) FROM simple r JOIN bigger_than_it_looks s USING (id);
savepoint settings;
set local max_parallel_workers_per_gather = 2;
set local work_mem = '128kB';
+set local hash_mem_multiplier = 1.0;
set local enable_parallel_hash = off;
explain (costs off)
select count(*) from simple r join bigger_than_it_looks s using (id);
savepoint settings;
set local max_parallel_workers_per_gather = 1;
set local work_mem = '192kB';
+set local hash_mem_multiplier = 1.0;
set local enable_parallel_hash = on;
explain (costs off)
select count(*) from simple r join bigger_than_it_looks s using (id);
savepoint settings;
set local max_parallel_workers_per_gather = 0;
set local work_mem = '128kB';
+set local hash_mem_multiplier = 1.0;
explain (costs off)
select count(*) from simple r join extremely_skewed s using (id);
select count(*) from simple r join extremely_skewed s using (id);
savepoint settings;
set local max_parallel_workers_per_gather = 2;
set local work_mem = '128kB';
+set local hash_mem_multiplier = 1.0;
set local enable_parallel_hash = off;
explain (costs off)
select count(*) from simple r join extremely_skewed s using (id);
savepoint settings;
set local max_parallel_workers_per_gather = 1;
set local work_mem = '128kB';
+set local hash_mem_multiplier = 1.0;
set local enable_parallel_hash = on;
explain (costs off)
select count(*) from simple r join extremely_skewed s using (id);
savepoint settings;
set local max_parallel_workers_per_gather = 2;
set local work_mem = '4MB';
+set local hash_mem_multiplier = 1.0;
set local parallel_leader_participation = off;
select * from hash_join_batches(
$$
set enable_material = off;
set enable_mergejoin = off;
set work_mem = '64kB';
+set hash_mem_multiplier = 1.0;
explain (costs off)
select count(*) from join_foo
left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss
set enable_material = off;
set enable_mergejoin = off;
set work_mem = '4MB';
+set hash_mem_multiplier = 1.0;
explain (costs off)
select count(*) from join_foo
left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss
set enable_material = off;
set enable_mergejoin = off;
set work_mem = '64kB';
+set hash_mem_multiplier = 1.0;
explain (costs off)
select count(*) from join_foo
left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss
set enable_material = off;
set enable_mergejoin = off;
set work_mem = '4MB';
+set hash_mem_multiplier = 1.0;
explain (costs off)
select count(*) from join_foo
left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss
set max_parallel_workers_per_gather = 2;
set enable_parallel_hash = on;
set work_mem = '128kB';
+set hash_mem_multiplier = 1.0;
explain (costs off)
select length(max(s.t))
from wide left join (select id, coalesce(t, '') || '' as t from wide) s using (id);
LATERAL (SELECT t2.unique1 FROM tenk1 t2 WHERE t1.twenty = t2.unique1) t2
WHERE t1.unique1 < 1000;
--- Reduce work_mem so that we see some cache evictions
+-- Reduce work_mem and hash_mem_multiplier so that we see some cache evictions
SET work_mem TO '64kB';
+SET hash_mem_multiplier TO 1.0;
SET enable_mergejoin TO off;
-- Ensure we get some evictions. We're unable to validate the hits and misses
-- here as the number of entries that fit in the cache at once will vary
RESET enable_seqscan;
RESET enable_mergejoin;
RESET work_mem;
+RESET hash_mem_multiplier;
RESET enable_bitmapscan;
RESET enable_hashjoin;