#include "catalog/pg_namespace.h"
#include "catalog/pg_parameter_acl.h"
#include "catalog/pg_replication_origin.h"
+#include "catalog/pg_seclabel.h"
#include "catalog/pg_shdepend.h"
#include "catalog/pg_shdescription.h"
#include "catalog/pg_shseclabel.h"
return (relid < (Oid) FirstUnpinnedObjectId);
}
+/*
+ * IsCatalogTextUniqueIndexOid
+ * True iff the relation identified by this OID is a catalog UNIQUE index
+ * having a column of type "text".
+ *
+ * The relcache must not use these indexes. Inserting into any UNIQUE
+ * index compares index keys while holding BUFFER_LOCK_EXCLUSIVE.
+ * bttextcmp() can search the COLLID catcache. Depending on concurrent
+ * invalidation traffic, catcache can reach relcache builds. A backend
+ * would self-deadlock on LWLocks if the relcache build read the
+ * exclusive-locked buffer.
+ *
+ * To avoid being itself the cause of self-deadlock, this doesn't read
+ * catalogs. Instead, it uses a hard-coded list with a supporting
+ * regression test.
+ */
+bool
+IsCatalogTextUniqueIndexOid(Oid relid)
+{
+ switch (relid)
+ {
+ case ParameterAclParnameIndexId:
+ case ReplicationOriginNameIndex:
+ case SecLabelObjectIndexId:
+ case SharedSecLabelObjectIndexId:
+ return true;
+ }
+ return false;
+}
+
/*
* IsInplaceUpdateRelation
* True iff core code performs inplace updates on the relation.
#include "access/tableam.h"
#include "access/xloginsert.h"
#include "access/xlogutils.h"
+#ifdef USE_ASSERT_CHECKING
+#include "catalog/pg_tablespace_d.h"
+#endif
#include "catalog/storage.h"
#include "catalog/storage_xlog.h"
#include "executor/instrument.h"
ForkNumber forkNum, bool permanent);
static void AtProcExit_Buffers(int code, Datum arg);
static void CheckForBufferLeaks(void);
+#ifdef USE_ASSERT_CHECKING
+static void AssertNotCatalogBufferLock(LWLock *lock, LWLockMode mode,
+ void *unused_context);
+#endif
static int rlocator_comparator(const void *p1, const void *p2);
static inline int buffertag_comparator(const BufferTag *ba, const BufferTag *bb);
static inline int ckpt_buforder_comparator(const CkptSortItem *a, const CkptSortItem *b);
#endif
}
+#ifdef USE_ASSERT_CHECKING
+/*
+ * Check for exclusive-locked catalog buffers. This is the core of
+ * AssertCouldGetRelation().
+ *
+ * A backend would self-deadlock on LWLocks if the catalog scan read the
+ * exclusive-locked buffer. The main threat is exclusive-locked buffers of
+ * catalogs used in relcache, because a catcache search on any catalog may
+ * build that catalog's relcache entry. We don't have an inventory of
+ * catalogs relcache uses, so just check buffers of most catalogs.
+ *
+ * It's better to minimize waits while holding an exclusive buffer lock, so it
+ * would be nice to broaden this check not to be catalog-specific. However,
+ * bttextcmp() accesses pg_collation, and non-core opclasses might similarly
+ * read tables. That is deadlock-free as long as there's no loop in the
+ * dependency graph: modifying table A may cause an opclass to read table B,
+ * but it must not cause a read of table A.
+ */
+void
+AssertBufferLocksPermitCatalogRead(void)
+{
+ ForEachLWLockHeldByMe(AssertNotCatalogBufferLock, NULL);
+}
+
+static void
+AssertNotCatalogBufferLock(LWLock *lock, LWLockMode mode,
+ void *unused_context)
+{
+ BufferDesc *bufHdr;
+ BufferTag tag;
+ Oid relid;
+
+ if (mode != LW_EXCLUSIVE)
+ return;
+
+ if (!((BufferDescPadded *) lock > BufferDescriptors &&
+ (BufferDescPadded *) lock < BufferDescriptors + NBuffers))
+ return; /* not a buffer lock */
+
+ bufHdr = (BufferDesc *)
+ ((char *) lock - offsetof(BufferDesc, content_lock));
+ tag = bufHdr->tag;
+
+ /*
+ * This relNumber==relid assumption holds until a catalog experiences
+ * VACUUM FULL or similar. After a command like that, relNumber will be
+ * in the normal (non-catalog) range, and we lose the ability to detect
+ * hazardous access to that catalog. Calling RelidByRelfilenumber() would
+ * close that gap, but RelidByRelfilenumber() might then deadlock with a
+ * held lock.
+ */
+ relid = tag.relNumber;
+
+ if (IsCatalogTextUniqueIndexOid(relid)) /* see comments at the callee */
+ return;
+
+ Assert(!IsCatalogRelationOid(relid));
+ /* Shared rels are always catalogs: detect even after VACUUM FULL. */
+ Assert(tag.spcOid != GLOBALTABLESPACE_OID);
+}
+#endif
+
+
/*
* Helper routine to issue warnings when a buffer is unexpectedly pinned
*/
}
+/*
+ * ForEachLWLockHeldByMe - run a callback for each held lock
+ *
+ * This is meant as debug support only.
+ */
+void
+ForEachLWLockHeldByMe(void (*callback) (LWLock *, LWLockMode, void *),
+ void *context)
+{
+ int i;
+
+ for (i = 0; i < num_held_lwlocks; i++)
+ callback(held_lwlocks[i].lock, held_lwlocks[i].mode, context);
+}
+
/*
* LWLockHeldByMe - test whether my process holds a lock in any mode
*
if (!OidIsValid(collid))
elog(ERROR, "cache lookup failed for collation %u", collid);
+ AssertCouldGetRelation();
+
if (last_collation_cache_oid == collid)
return last_collation_cache_locale;
cp->cc_lbucket = newbucket;
}
+/*
+ * ConditionalCatalogCacheInitializeCache
+ *
+ * Call CatalogCacheInitializeCache() if not yet done.
+ */
+pg_attribute_always_inline
+static void
+ConditionalCatalogCacheInitializeCache(CatCache *cache)
+{
+#ifdef USE_ASSERT_CHECKING
+ /*
+ * TypeCacheRelCallback() runs outside transactions and relies on TYPEOID
+ * for hashing. This isn't ideal. Since lookup_type_cache() both
+ * registers the callback and searches TYPEOID, reaching trouble likely
+ * requires OOM at an unlucky moment.
+ *
+ * InvalidateAttoptCacheCallback() runs outside transactions and likewise
+ * relies on ATTNUM. InitPostgres() initializes ATTNUM, so it's reliable.
+ */
+ if (!(cache->id == TYPEOID || cache->id == ATTNUM) ||
+ IsTransactionState())
+ AssertCouldGetRelation();
+ else
+ Assert(cache->cc_tupdesc != NULL);
+#endif
+
+ if (unlikely(cache->cc_tupdesc == NULL))
+ CatalogCacheInitializeCache(cache);
+}
+
/*
* CatalogCacheInitializeCache
*
* This function does final initialization of a catcache: obtain the tuple
- * descriptor and set up the hash and equality function links. We assume
- * that the relcache entry can be opened at this point!
+ * descriptor and set up the hash and equality function links.
*/
#ifdef CACHEDEBUG
#define CatalogCacheInitializeCache_DEBUG1 \
void
InitCatCachePhase2(CatCache *cache, bool touch_index)
{
- if (cache->cc_tupdesc == NULL)
- CatalogCacheInitializeCache(cache);
+ ConditionalCatalogCacheInitializeCache(cache);
if (touch_index &&
cache->id != AMOID &&
dlist_head *bucket;
CatCTup *ct;
- /* Make sure we're in an xact, even if this ends up being a cache hit */
- Assert(IsTransactionState());
-
Assert(cache->cc_nkeys == nkeys);
/*
* one-time startup overhead for each cache
*/
- if (unlikely(cache->cc_tupdesc == NULL))
- CatalogCacheInitializeCache(cache);
+ ConditionalCatalogCacheInitializeCache(cache);
#ifdef CATCACHE_STATS
cache->cc_searches++;
/*
* one-time startup overhead for each cache
*/
- if (cache->cc_tupdesc == NULL)
- CatalogCacheInitializeCache(cache);
+ ConditionalCatalogCacheInitializeCache(cache);
/*
* calculate the hash value
/*
* one-time startup overhead for each cache
*/
- if (unlikely(cache->cc_tupdesc == NULL))
- CatalogCacheInitializeCache(cache);
+ ConditionalCatalogCacheInitializeCache(cache);
Assert(nkeys > 0 && nkeys < cache->cc_nkeys);
continue;
/* Just in case cache hasn't finished initialization yet... */
- if (ccp->cc_tupdesc == NULL)
- CatalogCacheInitializeCache(ccp);
+ ConditionalCatalogCacheInitializeCache(ccp);
hashvalue = CatalogCacheComputeTupleHashValue(ccp, ccp->cc_nkeys, tuple);
dbid = ccp->cc_relisshared ? (Oid) 0 : MyDatabaseId;
{
TransInvalidationInfo *myInfo;
- Assert(IsTransactionState());
+ /* PrepareToInvalidateCacheTuple() needs relcache */
+ AssertCouldGetRelation();
/* Can't queue transactional message while collecting inplace messages. */
Assert(inplaceInvalInfo == NULL);
{
InvalidationInfo *myInfo;
- Assert(IsTransactionState());
+ AssertCouldGetRelation();
/* limit of one inplace update under assembly */
Assert(inplaceInvalInfo == NULL);
void
AcceptInvalidationMessages(void)
{
+#ifdef USE_ASSERT_CHECKING
+ /* message handlers shall access catalogs only during transactions */
+ if (IsTransactionState())
+ AssertCouldGetRelation();
+#endif
+
ReceiveSharedInvalidMessages(LocalExecuteInvalidationMessage,
InvalidateSystemCaches);
Oid databaseId;
Oid relationId;
+ /* PrepareToInvalidateCacheTuple() needs relcache */
+ AssertCouldGetRelation();
+
/* Do nothing during bootstrap */
if (IsBootstrapProcessingMode())
return;
relation->rd_isvalid = true;
}
+#ifdef USE_ASSERT_CHECKING
+/*
+ * AssertCouldGetRelation
+ *
+ * Check safety of calling RelationIdGetRelation().
+ *
+ * In code that reads catalogs in the event of a cache miss, call this
+ * before checking the cache.
+ */
+void
+AssertCouldGetRelation(void)
+{
+ Assert(IsTransactionState());
+ AssertBufferLocksPermitCatalogRead();
+}
+#endif
+
/* ----------------------------------------------------------------
* Relation Descriptor Lookup Interface
{
Relation rd;
- /* Make sure we're in an xact, even if this ends up being a cache hit */
- Assert(IsTransactionState());
+ AssertCouldGetRelation();
/*
* first try to find reldesc in the cache
Assert(relation->rd_isnailed);
/* nailed indexes are handled by RelationReloadIndexInfo() */
Assert(relation->rd_rel->relkind == RELKIND_RELATION);
- /* can only reread catalog contents in a transaction */
- Assert(IsTransactionState());
+ AssertCouldGetRelation();
/*
* Redo RelationInitPhysicalAddr in case it is a mapped relation whose
RelationRebuildRelation(Relation relation)
{
Assert(!RelationHasReferenceCountZero(relation));
- /* rebuilding requires access to the catalogs */
- Assert(IsTransactionState());
+ AssertCouldGetRelation();
/* there is no reason to ever rebuild a dropped relation */
Assert(relation->rd_droppedSubid == InvalidSubTransactionId);
{
Oid utf8_to_server_proc;
- Assert(IsTransactionState());
+ AssertCouldGetRelation();
utf8_to_server_proc =
FindDefaultConversionProc(PG_UTF8,
current_server_encoding);
extern bool IsToastClass(Form_pg_class reltuple);
extern bool IsCatalogRelationOid(Oid relid);
+extern bool IsCatalogTextUniqueIndexOid(Oid relid);
extern bool IsInplaceUpdateOid(Oid relid);
extern bool IsCatalogNamespace(Oid namespaceId);
extern void InitBufferManagerAccess(void);
extern void AtEOXact_Buffers(bool isCommit);
+#ifdef USE_ASSERT_CHECKING
+extern void AssertBufferLocksPermitCatalogRead(void);
+#endif
extern char *DebugPrintBufferRefcount(Buffer buffer);
extern void CheckPointBuffers(int flags);
extern BlockNumber BufferGetBlockNumber(Buffer buffer);
extern void LWLockReleaseAll(void);
extern void LWLockDisown(LWLock *lock);
extern void LWLockReleaseDisowned(LWLock *lock, LWLockMode mode);
+extern void ForEachLWLockHeldByMe(void (*callback) (LWLock *, LWLockMode, void *),
+ void *context);
extern bool LWLockHeldByMe(LWLock *lock);
extern bool LWLockAnyHeldByMe(LWLock *lock, int nlocks, size_t stride);
extern bool LWLockHeldByMeInMode(LWLock *lock, LWLockMode mode);
/*
* Routines to open (lookup) and close a relcache entry
*/
+#ifdef USE_ASSERT_CHECKING
+extern void AssertCouldGetRelation(void);
+#else
+static inline void
+AssertCouldGetRelation(void)
+{
+}
+#endif
extern Relation RelationIdGetRelation(Oid relationId);
extern void RelationClose(Relation relation);
-- that is OID or REGPROC fields that are not zero and do not match some
-- row in the linked-to table. However, if we want to enforce that a link
-- field can't be 0, we have to check it here.
+-- directory paths and dlsuffix are passed to us in environment variables
+\getenv libdir PG_LIBDIR
+\getenv dlsuffix PG_DLSUFFIX
+\set regresslib :libdir '/regress' :dlsuffix
-- **************** pg_type ****************
-- Look for illegal values in pg_type fields.
SELECT t1.oid, t1.typname
----------+---------+-----+---------
(0 rows)
+-- Look for IsCatalogTextUniqueIndexOid() omissions.
+CREATE FUNCTION is_catalog_text_unique_index_oid(oid) RETURNS bool
+ AS :'regresslib', 'is_catalog_text_unique_index_oid'
+ LANGUAGE C STRICT;
+SELECT indexrelid::regclass
+FROM pg_index
+WHERE (is_catalog_text_unique_index_oid(indexrelid) <>
+ (indisunique AND
+ indexrelid < 16384 AND
+ EXISTS (SELECT 1 FROM pg_attribute
+ WHERE attrelid = indexrelid AND atttypid = 'text'::regtype)));
+ indexrelid
+------------
+(0 rows)
+
-- **************** pg_range ****************
-- Look for illegal values in pg_range fields.
SELECT r.rngtypid, r.rngsubtype
#include "access/detoast.h"
#include "access/htup_details.h"
+#include "catalog/catalog.h"
#include "catalog/namespace.h"
#include "catalog/pg_operator.h"
#include "catalog/pg_type.h"
PG_RETURN_NULL();
}
+PG_FUNCTION_INFO_V1(is_catalog_text_unique_index_oid);
+Datum
+is_catalog_text_unique_index_oid(PG_FUNCTION_ARGS)
+{
+ return IsCatalogTextUniqueIndexOid(PG_GETARG_OID(0));
+}
+
PG_FUNCTION_INFO_V1(test_support_func);
Datum
test_support_func(PG_FUNCTION_ARGS)
-- row in the linked-to table. However, if we want to enforce that a link
-- field can't be 0, we have to check it here.
+-- directory paths and dlsuffix are passed to us in environment variables
+\getenv libdir PG_LIBDIR
+\getenv dlsuffix PG_DLSUFFIX
+
+\set regresslib :libdir '/regress' :dlsuffix
+
-- **************** pg_type ****************
-- Look for illegal values in pg_type fields.
a1.attbyval != t1.typbyval OR
(a1.attstorage != t1.typstorage AND a1.attstorage != 'p'));
+-- Look for IsCatalogTextUniqueIndexOid() omissions.
+
+CREATE FUNCTION is_catalog_text_unique_index_oid(oid) RETURNS bool
+ AS :'regresslib', 'is_catalog_text_unique_index_oid'
+ LANGUAGE C STRICT;
+
+SELECT indexrelid::regclass
+FROM pg_index
+WHERE (is_catalog_text_unique_index_oid(indexrelid) <>
+ (indisunique AND
+ indexrelid < 16384 AND
+ EXISTS (SELECT 1 FROM pg_attribute
+ WHERE attrelid = indexrelid AND atttypid = 'text'::regtype)));
+
-- **************** pg_range ****************
-- Look for illegal values in pg_range fields.