break;
}
+ case 'L': /* Lock information */
+ {
+ int i;
+
+ Assert(msg->len > 1 && (msg->len % sizeof(LOCALLOCKTAG)) == 1);
+
+ for (i = 1; i < msg->len; i += sizeof(LOCALLOCKTAG))
+ {
+ LOCALLOCKTAG locallocktag;
+
+ memcpy((char *) &locallocktag, &msg->data[i], sizeof(LOCALLOCKTAG));
+ /* XXX. Now what? */
+#if 0
+ ereport(NOTICE,
+ (errmsg("worker has lock %u/%u/%u/%u type %u method %u mode %u",
+ locallocktag.lock.locktag_field1,
+ locallocktag.lock.locktag_field2,
+ locallocktag.lock.locktag_field3,
+ locallocktag.lock.locktag_field4,
+ locallocktag.lock.locktag_type,
+ locallocktag.lock.locktag_lockmethodid,
+ locallocktag.mode)));
+#endif
+ }
+ break;
+ }
+
case 'X': /* Terminate, indicating clean exit */
{
pfree(pcxt->worker[i].error_mqh);
/* Must pop active snapshot so resowner.c doesn't complain. */
PopActiveSnapshot();
+ /* Send a message to the leader with the heavyweight locks we still retain. */
+ pq_beginmessage(&msgbuf, 'L');
+ if (GetMyLocks(&msgbuf) != 0)
+ pq_endmessage(&msgbuf);
+
/* Shut down the parallel-worker transaction. */
EndParallelWorkerTransaction();
lockMethodTable = LockMethods[lockmethodid];
if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
elog(ERROR, "unrecognized lock mode: %d", lockmode);
- Assert(!IsInParallelMode() || MyProc->lockGroupLeader != NULL);
if (RecoveryInProgress() && !InRecovery &&
(locktag->locktag_type == LOCKTAG_OBJECT ||
return data;
}
+/*
+ * GetMyLocks -- Write all locks we hold into the provided StringInfo.
+ *
+ * For each lock, we write the LOCKTAG and LOCKMODE, a concept conveniently
+ * encapsulated by the LOCALLOCKTAG data type.
+ *
+ * We return the number of locks written.
+ */
+int
+GetMyLocks(StringInfo buf)
+{
+ HASH_SEQ_STATUS status;
+ LOCALLOCK *locallock;
+ int count = 0;
+
+ /*
+ * We choose to implement this by iterating over the local lock table.
+ * This carries the intrinsic risk that if any our lock management code
+ * is buggy, we might enumerate a different set of locks here than the
+ * shared lock table believes we actually hold. We could eliminate that
+ * risk by doing this based on the shared memory data structures rather
+ * than our local bookkeeping, but that would require acquiring every lock
+ * manager partition lock in turn. We prefer to minimize contention.
+ */
+ hash_seq_init(&status, LockMethodLocalHash);
+
+ while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
+ {
+ /*
+ * Normally we remove entries from the hash table when nLocks == 0,
+ * but not if we run out of shared memory while setting up the lock.
+ * Skip any such unheld locks.
+ */
+ if (locallock->nLocks == 0)
+ continue;
+
+ appendBinaryStringInfo(buf, (char *) &locallock->tag,
+ sizeof(LOCALLOCKTAG));
+ ++count;
+ }
+
+ return count;
+}
+
/*
* Returns a list of currently held AccessExclusiveLocks, for use by
* LogStandbySnapshot(). The result is a palloc'd array,
#include "storage/lwlock.h"
#include "storage/shmem.h"
+/* avoid including lib/stringinfo.h */
+struct StringInfoData;
/* struct PGPROC is declared in proc.h, but must forward-reference it */
typedef struct PGPROC PGPROC;
extern void RemoveFromWaitQueue(PGPROC *proc, uint32 hashcode);
extern Size LockShmemSize(void);
extern LockData *GetLockStatusData(void);
+extern int GetMyLocks(struct StringInfoData *buf);
extern xl_standby_lock *GetRunningTransactionLocks(int *nlocks);
extern const char *GetLockmodeName(LOCKMETHODID lockmethodid, LOCKMODE mode);