ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_RESOURCES),
errmsg("could not register background process"),
- errhint("You may need to increase max_worker_processes.")));
+ errhint("You may need to increase \"max_worker_processes\".")));
status = WaitForBackgroundWorkerStartup(handle, &pid);
if (status != BGWH_STARTED)
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_RESOURCES),
errmsg("registering dynamic bgworker autoprewarm failed"),
- errhint("Consider increasing configuration parameter max_worker_processes.")));
+ errhint("Consider increasing configuration parameter \"max_worker_processes\".")));
/*
* Ignore return value; if it fails, postmaster has died, but we have
if (!pgss || !pgss_hash)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("pg_stat_statements must be loaded via shared_preload_libraries")));
+ errmsg("pg_stat_statements must be loaded via \"shared_preload_libraries\"")));
InitMaterializedSRF(fcinfo, 0);
if (!pgss || !pgss_hash)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("pg_stat_statements must be loaded via shared_preload_libraries")));
+ errmsg("pg_stat_statements must be loaded via \"shared_preload_libraries\"")));
/* Build a tuple descriptor for our result type */
if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
if (!pgss || !pgss_hash)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("pg_stat_statements must be loaded via shared_preload_libraries")));
+ errmsg("pg_stat_statements must be loaded via \"shared_preload_libraries\"")));
LWLockAcquire(pgss->lock, LW_EXCLUSIVE);
num_entries = hash_get_num_entries(pgss_hash);
if (IsUnderPostmaster)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("sepgsql must be loaded via shared_preload_libraries")));
+ errmsg("sepgsql must be loaded via \"shared_preload_libraries\"")));
/*
* Check availability of SELinux on the platform. If disabled, we cannot
-- released even when raise error during creating the target slot.
SELECT 'copy' FROM pg_copy_logical_replication_slot('orig_slot1', 'failed'); -- error
ERROR: all replication slots are in use
-HINT: Free one or increase max_replication_slots.
+HINT: Free one or increase "max_replication_slots".
-- temporary slots were dropped automatically
SELECT pg_drop_replication_slot('orig_slot1');
pg_drop_replication_slot
<title>Use of Quotes</title>
<para>
- Always use quotes to delimit file names, user-supplied identifiers, and
- other variables that might contain words. Do not use them to mark up
- variables that will not contain words (for example, operator names).
- </para>
-
- <para>
- In messages containing configuration variable names, do not include quotes
- when the names are visibly not natural English words, such as when they
- have underscores, are all-uppercase or have mixed case. Otherwise, quotes
- must be added. Do include quotes in a message where an arbitrary variable
- name is to be expanded.
+ Always use quotes to delimit file names, user-supplied identifiers,
+ configuration variable names, and other variables that might contain
+ words. Do not use them to mark up variables that will not contain words
+ (for example, operator names).
</para>
<para>
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
errmsg("posting list is too long"),
- errhint("Reduce maintenance_work_mem.")));
+ errhint("Reduce \"maintenance_work_mem\".")));
accum->allocatedMemory -= GetMemoryChunkSpace(eo->list);
eo->maxcount *= 2;
vacrel->dbname, vacrel->relnamespace, vacrel->relname,
vacrel->num_index_scans),
errdetail("The table's relfrozenxid or relminmxid is too far in the past."),
- errhint("Consider increasing configuration parameter maintenance_work_mem or autovacuum_work_mem.\n"
+ errhint("Consider increasing configuration parameter \"maintenance_work_mem\" or \"autovacuum_work_mem\".\n"
"You might also need to consider other ways for VACUUM to keep up with the allocation of transaction IDs.")));
/* Stop applying cost limits from this point on */
{
if (**newval == '\0')
{
- GUC_check_errdetail("%s cannot be empty.",
+ GUC_check_errdetail("\"%s\" cannot be empty.",
"default_table_access_method");
return false;
}
if (strlen(*newval) >= NAMEDATALEN)
{
- GUC_check_errdetail("%s is too long (maximum %d characters).",
+ GUC_check_errdetail("\"%s\" is too long (maximum %d characters).",
"default_table_access_method", NAMEDATALEN - 1);
return false;
}
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("could not get commit timestamp data"),
RecoveryInProgress() ?
- errhint("Make sure the configuration parameter %s is set on the primary server.",
+ errhint("Make sure the configuration parameter \"%s\" is set on the primary server.",
"track_commit_timestamp") :
- errhint("Make sure the configuration parameter %s is set.",
+ errhint("Make sure the configuration parameter \"%s\" is set.",
"track_commit_timestamp")));
}
MultiXactState->offsetStopLimit - nextOffset - 1,
nmembers,
MultiXactState->offsetStopLimit - nextOffset - 1),
- errhint("Execute a database-wide VACUUM in database with OID %u with reduced vacuum_multixact_freeze_min_age and vacuum_multixact_freeze_table_age settings.",
+ errhint("Execute a database-wide VACUUM in database with OID %u with reduced \"vacuum_multixact_freeze_min_age\" and \"vacuum_multixact_freeze_table_age\" settings.",
MultiXactState->oldestMultiXactDB)));
}
MultiXactState->offsetStopLimit - nextOffset + nmembers,
MultiXactState->oldestMultiXactDB,
MultiXactState->offsetStopLimit - nextOffset + nmembers),
- errhint("Execute a database-wide VACUUM in that database with reduced vacuum_multixact_freeze_min_age and vacuum_multixact_freeze_table_age settings.")));
+ errhint("Execute a database-wide VACUUM in that database with reduced \"vacuum_multixact_freeze_min_age\" and \"vacuum_multixact_freeze_table_age\" settings.")));
ExtendMultiXactMember(nextOffset, nmembers);
RmgrNotFound(RmgrId rmid)
{
ereport(ERROR, (errmsg("resource manager with ID %d not registered", rmid),
- errhint("Include the extension module that implements this resource manager in shared_preload_libraries.")));
+ errhint("Include the extension module that implements this resource manager in \"shared_preload_libraries\".")));
}
/*
if (!process_shared_preload_libraries_in_progress)
ereport(ERROR,
(errmsg("failed to register custom resource manager \"%s\" with ID %d", rmgr->rm_name, rmid),
- errdetail("Custom resource manager must be registered while initializing modules in shared_preload_libraries.")));
+ errdetail("Custom resource manager must be registered while initializing modules in \"shared_preload_libraries\".")));
if (RmgrTable[rmid].rm_name != NULL)
ereport(ERROR,
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("prepared transactions are disabled"),
- errhint("Set max_prepared_transactions to a nonzero value.")));
+ errhint("Set \"max_prepared_transactions\" to a nonzero value.")));
/* on first call, register the exit hook */
if (!twophaseExitRegistered)
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("maximum number of prepared transactions reached"),
- errhint("Increase max_prepared_transactions (currently %d).",
+ errhint("Increase \"max_prepared_transactions\" (currently %d).",
max_prepared_xacts)));
gxact = TwoPhaseState->freeGXacts;
TwoPhaseState->freeGXacts = gxact->next;
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("maximum number of prepared transactions reached"),
- errhint("Increase max_prepared_transactions (currently %d).",
+ errhint("Increase \"max_prepared_transactions\" (currently %d).",
max_prepared_xacts)));
gxact = TwoPhaseState->freeGXacts;
TwoPhaseState->freeGXacts = gxact->next;
/* check and update variables dependent on wal_segment_size */
if (ConvertToXSegs(min_wal_size_mb, wal_segment_size) < 2)
ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("min_wal_size must be at least twice wal_segment_size")));
+ errmsg("\"min_wal_size\" must be at least twice \"wal_segment_size\"")));
if (ConvertToXSegs(max_wal_size_mb, wal_segment_size) < 2)
ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("max_wal_size must be at least twice wal_segment_size")));
+ errmsg("\"max_wal_size\" must be at least twice \"wal_segment_size\"")));
UsableBytesInSegment =
(wal_segment_size / XLOG_BLCKSZ * UsableBytesInPage) -
{
ereport(FATAL,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("WAL was generated with wal_level=minimal, cannot continue recovering"),
- errdetail("This happens if you temporarily set wal_level=minimal on the server."),
- errhint("Use a backup taken after setting wal_level to higher than minimal.")));
+ errmsg("WAL was generated with \"wal_level=minimal\", cannot continue recovering"),
+ errdetail("This happens if you temporarily set \"wal_level=minimal\" on the server."),
+ errhint("Use a backup taken after setting \"wal_level\" to higher than \"minimal\".")));
}
/*
#endif
default:
/* can't happen (unless we are out of sync with option array) */
- elog(ERROR, "unrecognized wal_sync_method: %d", method);
+ elog(ERROR, "unrecognized \"wal_sync_method\": %d", method);
return 0; /* silence warning */
}
}
default:
ereport(PANIC,
errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg_internal("unrecognized wal_sync_method: %d", wal_sync_method));
+ errmsg_internal("unrecognized \"wal_sync_method\": %d", wal_sync_method));
break;
}
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("WAL level not sufficient for making an online backup"),
- errhint("wal_level must be set to \"replica\" or \"logical\" at server start.")));
+ errhint("\"wal_level\" must be set to \"replica\" or \"logical\" at server start.")));
if (strlen(backupidstr) > MAXPGPATH)
ereport(ERROR,
if (!checkpointfpw || state->startpoint <= recptr)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("WAL generated with full_page_writes=off was replayed "
+ errmsg("WAL generated with \"full_page_writes=off\" was replayed "
"since last restartpoint"),
errhint("This means that the backup being taken on the standby "
"is corrupt and should not be used. "
- "Enable full_page_writes and run CHECKPOINT on the primary, "
+ "Enable \"full_page_writes\" and run CHECKPOINT on the primary, "
"and then try an online backup again.")));
/*
if (state->startpoint <= recptr)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("WAL generated with full_page_writes=off was replayed "
+ errmsg("WAL generated with \"full_page_writes=off\" was replayed "
"during online backup"),
errhint("This means that the backup being taken on the standby "
"is corrupt and should not be used. "
- "Enable full_page_writes and run CHECKPOINT on the primary, "
+ "Enable \"full_page_writes\" and run CHECKPOINT on the primary, "
"and then try an online backup again.")));
ereport(WARNING,
(errmsg("still waiting for all required WAL segments to be archived (%d seconds elapsed)",
waits),
- errhint("Check that your archive_command is executing properly. "
+ errhint("Check that your \"archive_command\" is executing properly. "
"You can safely cancel this backup, "
"but the database backup will not be usable without all the WAL segments.")));
}
ereport(elevel,
(errcode_for_file_access(),
errmsg("could not stat file \"%s\": %m", xlogpath),
- errdetail("restore_command returned a zero exit status, but stat() failed.")));
+ errdetail("\"restore_command\" returned a zero exit status, but stat() failed.")));
}
}
if (!XLogStandbyInfoActive())
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("pg_log_standby_snapshot() can only be used if wal_level >= replica")));
+ errmsg("pg_log_standby_snapshot() can only be used if \"wal_level\" >= \"replica\"")));
recptr = LogStandbySnapshot();
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("WAL level not sufficient for creating a restore point"),
- errhint("wal_level must be set to \"replica\" or \"logical\" at server start.")));
+ errhint("\"wal_level\" must be set to \"replica\" or \"logical\" at server start.")));
restore_name_str = text_to_cstring(restore_name);
#ifndef USE_PREFETCH
if (*new_value == RECOVERY_PREFETCH_ON)
{
- GUC_check_errdetail("recovery_prefetch is not supported on platforms that lack posix_fadvise().");
+ GUC_check_errdetail("\"recovery_prefetch\" is not supported on platforms that lack posix_fadvise().");
return false;
}
#endif
if ((PrimaryConnInfo == NULL || strcmp(PrimaryConnInfo, "") == 0) &&
(recoveryRestoreCommand == NULL || strcmp(recoveryRestoreCommand, "") == 0))
ereport(WARNING,
- (errmsg("specified neither primary_conninfo nor restore_command"),
+ (errmsg("specified neither \"primary_conninfo\" nor \"restore_command\""),
errhint("The database server will regularly poll the pg_wal subdirectory to check for files placed there.")));
}
else
strcmp(recoveryRestoreCommand, "") == 0)
ereport(FATAL,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("must specify restore_command when standby mode is not enabled")));
+ errmsg("must specify \"restore_command\" when standby mode is not enabled")));
}
/*
errmsg("unexpected directory entry \"%s\" found in %s",
de->d_name, "pg_tblspc/"),
errdetail("All directory entries in pg_tblspc/ should be symbolic links."),
- errhint("Remove those directories, or set allow_in_place_tablespaces to ON transiently to let recovery complete.")));
+ errhint("Remove those directories, or set \"allow_in_place_tablespaces\" to ON transiently to let recovery complete.")));
}
}
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("multiple recovery targets specified"),
- errdetail("At most one of recovery_target, recovery_target_lsn, recovery_target_name, recovery_target_time, recovery_target_xid may be set.")));
+ errdetail("At most one of \"recovery_target\", \"recovery_target_lsn\", \"recovery_target_name\", \"recovery_target_time\", \"recovery_target_xid\" may be set.")));
}
/*
/* Use the value of newval directly */
if (strlen(*newval) >= MAXFNAMELEN)
{
- GUC_check_errdetail("%s is too long (maximum %d characters).",
+ GUC_check_errdetail("\"%s\" is too long (maximum %d characters).",
"recovery_target_name", MAXFNAMELEN - 1);
return false;
}
strtoul(*newval, NULL, 0);
if (errno == EINVAL || errno == ERANGE)
{
- GUC_check_errdetail("recovery_target_timeline is not a valid number.");
+ GUC_check_errdetail("\"recovery_target_timeline\" is not a valid number.");
return false;
}
}
if (wal_level != WAL_LEVEL_LOGICAL)
ereport(WARNING,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("wal_level is insufficient to publish logical changes"),
- errhint("Set wal_level to \"logical\" before creating subscriptions.")));
+ errmsg("\"wal_level\" is insufficient to publish logical changes"),
+ errhint("Set \"wal_level\" to \"logical\" before creating subscriptions.")));
return myself;
}
return true;
/* Value does not fall within any allowable range */
- GUC_check_errdetail("vacuum_buffer_usage_limit must be 0 or between %d kB and %d kB",
+ GUC_check_errdetail("\"vacuum_buffer_usage_limit\" must be 0 or between %d kB and %d kB",
MIN_BAS_VAC_RING_SIZE_KB, MAX_BAS_VAC_RING_SIZE_KB);
return false;
else
{
/* Provide a useful complaint */
- GUC_check_errdetail("Cannot change client_encoding now.");
+ GUC_check_errdetail("Cannot change \"client_encoding\" now.");
}
return false;
}
*/
ereport(ERROR,
(errcode(ERRCODE_INVALID_TRANSACTION_STATE),
- errmsg("cannot change client_encoding during a parallel operation")));
+ errmsg("cannot change \"client_encoding\" during a parallel operation")));
}
/* We do not expect an error if PrepareClientEncoding succeeded */
#ifndef USE_PREFETCH
if (*newval != 0)
{
- GUC_check_errdetail("effective_io_concurrency must be set to 0 on platforms that lack posix_fadvise().");
+ GUC_check_errdetail("\"effective_io_concurrency\" must be set to 0 on platforms that lack posix_fadvise().");
return false;
}
#endif /* USE_PREFETCH */
#ifndef USE_PREFETCH
if (*newval != 0)
{
- GUC_check_errdetail("maintenance_io_concurrency must be set to 0 on platforms that lack posix_fadvise().");
+ GUC_check_errdetail("\"maintenance_io_concurrency\" must be set to 0 on platforms that lack posix_fadvise().");
return false;
}
#endif /* USE_PREFETCH */
{
ereport(isServerStart ? FATAL : LOG,
/*- translator: first %s is a GUC option name, second %s is its value */
- (errmsg("%s setting \"%s\" not supported by this build",
+ (errmsg("\"%s\" setting \"%s\" not supported by this build",
"ssl_min_protocol_version",
GetConfigOption("ssl_min_protocol_version",
false, false))));
{
ereport(isServerStart ? FATAL : LOG,
(errmsg("could not set SSL protocol version range"),
- errdetail("%s cannot be higher than %s",
+ errdetail("\"%s\" cannot be higher than \"%s\"",
"ssl_min_protocol_version",
"ssl_max_protocol_version")));
goto error;
ereport(elevel,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
errmsg("hostssl record cannot match because SSL is disabled"),
- errhint("Set ssl = on in postgresql.conf."),
+ errhint("Set \"ssl = on\" in postgresql.conf."),
errcontext("line %d of configuration file \"%s\"",
line_num, file_name)));
*err_msg = "hostssl record cannot match because SSL is disabled";
if (Unix_socket_group[0] != '\0')
{
#ifdef WIN32
- elog(WARNING, "configuration item unix_socket_group is not supported on this platform");
+ elog(WARNING, "configuration item \"unix_socket_group\" is not supported on this platform");
#else
char *endptr;
unsigned long val;
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("unsafe use of string constant with Unicode escapes"),
- errdetail("String constants with Unicode escapes cannot be used when standard_conforming_strings is off."),
+ errdetail("String constants with Unicode escapes cannot be used when \"standard_conforming_strings\" is off."),
lexer_errposition()));
BEGIN(xus);
startlit();
"semaphore sets (SEMMNI), or the system wide maximum number of "
"semaphores (SEMMNS), would be exceeded. You need to raise the "
"respective kernel parameter. Alternatively, reduce PostgreSQL's "
- "consumption of semaphores by reducing its max_connections parameter.\n"
+ "consumption of semaphores by reducing its \"max_connections\" parameter.\n"
"The PostgreSQL documentation contains more information about "
"configuring your system for PostgreSQL.") : 0));
}
/* Recent enough Linux only, for now. See GetHugePageSize(). */
if (*newval != 0)
{
- GUC_check_errdetail("huge_page_size must be 0 on this platform.");
+ GUC_check_errdetail("\"huge_page_size\" must be 0 on this platform.");
return false;
}
#endif
"for a shared memory segment exceeded available memory, "
"swap space, or huge pages. To reduce the request size "
"(currently %zu bytes), reduce PostgreSQL's shared "
- "memory usage, perhaps by reducing shared_buffers or "
- "max_connections.",
+ "memory usage, perhaps by reducing \"shared_buffers\" or "
+ "\"max_connections\".",
allocsize) : 0));
}
if (huge_pages == HUGE_PAGES_ON && shared_memory_type != SHMEM_TYPE_MMAP)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("huge pages not supported with the current shared_memory_type setting")));
+ errmsg("huge pages not supported with the current \"shared_memory_type\" setting")));
/* Room for a header? */
Assert(size > MAXALIGN(sizeof(PGShmemHeader)));
{
if (*newval != 0)
{
- GUC_check_errdetail("huge_page_size must be 0 on this platform.");
+ GUC_check_errdetail("\"huge_page_size\" must be 0 on this platform.");
return false;
}
return true;
return;
ereport(LOG,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("background worker \"%s\": must be registered in shared_preload_libraries",
+ errmsg("background worker \"%s\": must be registered in \"shared_preload_libraries\"",
worker->bgw_name)));
return;
}
"checkpoints are occurring too frequently (%d seconds apart)",
elapsed_secs,
elapsed_secs),
- errhint("Consider increasing the configuration parameter max_wal_size.")));
+ errhint("Consider increasing the configuration parameter \"%s\".", "max_wal_size")));
/*
* Initialize checkpointer-private variables used during
!ArchiveCallbacks->check_configured_cb(archive_module_state))
{
ereport(WARNING,
- (errmsg("archive_mode enabled, yet archiving is not configured"),
+ (errmsg("\"archive_mode\" enabled, yet archiving is not configured"),
arch_module_check_errdetail_string ?
errdetail_internal("%s", arch_module_check_errdetail_string) : 0));
return;
if (XLogArchiveLibrary[0] != '\0' && XLogArchiveCommand[0] != '\0')
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("both archive_command and archive_library set"),
- errdetail("Only one of archive_command, archive_library may be set.")));
+ errmsg("both \"archive_command\" and \"archive_library\" set"),
+ errdetail("Only one of \"archive_command\", \"archive_library\" may be set.")));
archiveLibChanged = strcmp(XLogArchiveLibrary, archiveLib) != 0;
pfree(archiveLib);
if (XLogArchiveLibrary[0] != '\0' && XLogArchiveCommand[0] != '\0')
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("both archive_command and archive_library set"),
- errdetail("Only one of archive_command, archive_library may be set.")));
+ errmsg("both \"archive_command\" and \"archive_library\" set"),
+ errdetail("Only one of \"archive_command\", \"archive_library\" may be set.")));
/*
* If shell archiving is enabled, use our special initialization function.
*/
if (SuperuserReservedConnections + ReservedConnections >= MaxConnections)
{
- write_stderr("%s: superuser_reserved_connections (%d) plus reserved_connections (%d) must be less than max_connections (%d)\n",
+ write_stderr("%s: \"superuser_reserved_connections\" (%d) plus \"reserved_connections\" (%d) must be less than \"max_connections\" (%d)\n",
progname,
SuperuserReservedConnections, ReservedConnections,
MaxConnections);
}
if (XLogArchiveMode > ARCHIVE_MODE_OFF && wal_level == WAL_LEVEL_MINIMAL)
ereport(ERROR,
- (errmsg("WAL archival cannot be enabled when wal_level is \"minimal\"")));
+ (errmsg("WAL archival cannot be enabled when \"wal_level\" is \"minimal\"")));
if (max_wal_senders > 0 && wal_level == WAL_LEVEL_MINIMAL)
ereport(ERROR,
- (errmsg("WAL streaming (max_wal_senders > 0) requires wal_level \"replica\" or \"logical\"")));
+ (errmsg("WAL streaming (\"max_wal_senders\" > 0) requires \"wal_level\" to be \"replica\" or \"logical\"")));
if (summarize_wal && wal_level == WAL_LEVEL_MINIMAL)
ereport(ERROR,
- (errmsg("WAL cannot be summarized when wal_level is \"minimal\"")));
+ (errmsg("WAL cannot be summarized when \"wal_level\" is \"minimal\"")));
/*
* Other one-time internal sanity checks can go here, if they are fast.
if (!restart_after_crash)
{
ereport(LOG,
- (errmsg("shutting down because restart_after_crash is off")));
+ (errmsg("shutting down because \"restart_after_crash\" is off")));
ExitPostmaster(1);
}
}
Assert(RecoveryInProgress());
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("logical decoding on standby requires wal_level >= logical on the primary")));
+ errmsg("logical decoding on standby requires \"wal_level\" >= \"logical\" on the primary")));
}
break;
}
ereport(WARNING,
(errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED),
errmsg("out of logical replication worker slots"),
- errhint("You might need to increase %s.", "max_logical_replication_workers")));
+ errhint("You might need to increase \"%s\".", "max_logical_replication_workers")));
return false;
}
ereport(WARNING,
(errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED),
errmsg("out of background worker slots"),
- errhint("You might need to increase %s.", "max_worker_processes")));
+ errhint("You might need to increase \"%s\".", "max_worker_processes")));
return false;
}
if (wal_level < WAL_LEVEL_LOGICAL)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("logical decoding requires wal_level >= logical")));
+ errmsg("logical decoding requires \"wal_level\" >= \"logical\"")));
if (MyDatabaseId == InvalidOid)
ereport(ERROR,
if (GetActiveWalLevelOnStandby() < WAL_LEVEL_LOGICAL)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("logical decoding on standby requires wal_level >= logical on the primary")));
+ errmsg("logical decoding on standby requires \"wal_level\" >= \"logical\" on the primary")));
}
}
if (check_slots && max_replication_slots == 0)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("cannot query or manipulate replication origin when max_replication_slots = 0")));
+ errmsg("cannot query or manipulate replication origin when \"max_replication_slots\" is 0")));
if (!recoveryOK && RecoveryInProgress())
ereport(ERROR,
if (last_state == max_replication_slots)
ereport(PANIC,
(errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED),
- errmsg("could not find free replication state, increase max_replication_slots")));
+ errmsg("could not find free replication state, increase \"max_replication_slots\"")));
/* copy data to shared memory */
replication_states[last_state].roident = disk_state.roident;
(errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED),
errmsg("could not find free replication state slot for replication origin with ID %d",
node),
- errhint("Increase max_replication_slots and try again.")));
+ errhint("Increase \"max_replication_slots\" and try again.")));
if (replication_state == NULL)
{
(errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED),
errmsg("could not find free replication state slot for replication origin with ID %d",
node),
- errhint("Increase max_replication_slots and try again.")));
+ errhint("Increase \"max_replication_slots\" and try again.")));
else if (session_replication_state == NULL)
{
/* initialize new slot */
ereport(ERROR,
(errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED),
errmsg("all replication slots are in use"),
- errhint("Free one or increase max_replication_slots.")));
+ errhint("Free one or increase \"max_replication_slots\".")));
/*
* Since this slot is not in use, nobody should be looking at any part of
if (max_replication_slots == 0)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("replication slots can only be used if max_replication_slots > 0")));
+ errmsg("replication slots can only be used if \"max_replication_slots\" > 0")));
if (wal_level < WAL_LEVEL_REPLICA)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("replication slots can only be used if wal_level >= replica")));
+ errmsg("replication slots can only be used if \"wal_level\" >= \"replica\"")));
}
/*
break;
case RS_INVAL_WAL_LEVEL:
- appendStringInfoString(&err_detail, _("Logical decoding on standby requires wal_level >= logical on the primary server."));
+ appendStringInfoString(&err_detail, _("Logical decoding on standby requires \"wal_level\" >= \"logical\" on the primary server."));
break;
case RS_INVAL_NONE:
pg_unreachable();
errmsg("invalidating obsolete replication slot \"%s\"",
NameStr(slotname)),
errdetail_internal("%s", err_detail.data),
- hint ? errhint("You might need to increase %s.", "max_slot_wal_keep_size") : 0);
+ hint ? errhint("You might need to increase \"%s\".", "max_slot_wal_keep_size") : 0);
pfree(err_detail.data);
}
if (cp.slotdata.database != InvalidOid && wal_level < WAL_LEVEL_LOGICAL)
ereport(FATAL,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("logical replication slot \"%s\" exists, but wal_level < logical",
+ errmsg("logical replication slot \"%s\" exists, but \"wal_level\" < \"logical\"",
NameStr(cp.slotdata.name)),
- errhint("Change wal_level to be logical or higher.")));
+ errhint("Change \"wal_level\" to be \"logical\" or higher.")));
else if (wal_level < WAL_LEVEL_REPLICA)
ereport(FATAL,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("physical replication slot \"%s\" exists, but wal_level < replica",
+ errmsg("physical replication slot \"%s\" exists, but \"wal_level\" < \"replica\"",
NameStr(cp.slotdata.name)),
- errhint("Change wal_level to be replica or higher.")));
+ errhint("Change \"wal_level\" to be \"replica\" or higher.")));
/* nothing can be active yet, don't lock anything */
for (i = 0; i < max_replication_slots; i++)
if (!restored)
ereport(FATAL,
(errmsg("too many replication slots active before shutdown"),
- errhint("Increase max_replication_slots and try again.")));
+ errhint("Increase \"max_replication_slots\" and try again.")));
}
/*
if (syncrep_parse_error_msg)
GUC_check_errdetail("%s", syncrep_parse_error_msg);
else
- GUC_check_errdetail("synchronous_standby_names parser failed");
+ GUC_check_errdetail("\"synchronous_standby_names\" parser failed");
return false;
}
*/
if (source != PGC_S_TEST && NLocBuffer && NLocBuffer != *newval)
{
- GUC_check_errdetail("temp_buffers cannot be changed after any temporary tables have been accessed in the session.");
+ GUC_check_errdetail("\"temp_buffers\" cannot be changed after any temporary tables have been accessed in the session.");
return false;
}
return true;
#if PG_O_DIRECT == 0
if (strcmp(*newval, "") != 0)
{
- GUC_check_errdetail("debug_io_direct is not supported on this platform.");
+ GUC_check_errdetail("\"debug_io_direct\" is not supported on this platform.");
result = false;
}
flags = 0;
if (!SplitGUCList(rawstring, ',', &elemlist))
{
- GUC_check_errdetail("Invalid list syntax in parameter %s",
+ GUC_check_errdetail("Invalid list syntax in parameter \"%s\"",
"debug_io_direct");
pfree(rawstring);
list_free(elemlist);
#if XLOG_BLCKSZ < PG_IO_ALIGN_SIZE
if (result && (flags & (IO_DIRECT_WAL | IO_DIRECT_WAL_INIT)))
{
- GUC_check_errdetail("debug_io_direct is not supported for WAL because XLOG_BLCKSZ is too small");
+ GUC_check_errdetail("\"debug_io_direct\" is not supported for WAL because XLOG_BLCKSZ is too small");
result = false;
}
#endif
#if BLCKSZ < PG_IO_ALIGN_SIZE
if (result && (flags & IO_DIRECT_DATA))
{
- GUC_check_errdetail("debug_io_direct is not supported for data because BLCKSZ is too small");
+ GUC_check_errdetail("\"debug_io_direct\" is not supported for data because BLCKSZ is too small");
result = false;
}
#endif
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of shared memory"),
- errhint("You might need to increase %s.", "max_locks_per_transaction")));
+ errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
else
return LOCKACQUIRE_NOT_AVAIL;
}
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of shared memory"),
- errhint("You might need to increase %s.", "max_locks_per_transaction")));
+ errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
else
return LOCKACQUIRE_NOT_AVAIL;
}
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of shared memory"),
- errhint("You might need to increase %s.", "max_locks_per_transaction")));
+ errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
}
GrantLock(proclock->tag.myLock, proclock, lockmode);
FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of shared memory"),
- errhint("You might need to increase %s.", "max_locks_per_transaction")));
+ errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
}
/*
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of shared memory"),
- errhint("You might need to increase %s.", "max_locks_per_transaction")));
+ errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
}
/*
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of shared memory"),
- errhint("You might need to increase %s.", "max_locks_per_transaction")));
+ errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
}
GrantLock(proclock->tag.myLock, proclock, ExclusiveLock);
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("not enough elements in RWConflictPool to record a read/write conflict"),
- errhint("You might need to run fewer transactions at a time or increase max_connections.")));
+ errhint("You might need to run fewer transactions at a time or increase \"max_connections\".")));
conflict = dlist_head_element(RWConflictData, outLink, &RWConflictPool->availableList);
dlist_delete(&conflict->outLink);
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("not enough elements in RWConflictPool to record a potential read/write conflict"),
- errhint("You might need to run fewer transactions at a time or increase max_connections.")));
+ errhint("You might need to run fewer transactions at a time or increase \"max_connections\".")));
conflict = dlist_head_element(RWConflictData, outLink, &RWConflictPool->availableList);
dlist_delete(&conflict->outLink);
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot use serializable mode in a hot standby"),
- errdetail("default_transaction_isolation is set to \"serializable\"."),
+ errdetail("\"default_transaction_isolation\" is set to \"serializable\"."),
errhint("You can use \"SET default_transaction_isolation = 'repeatable read'\" to change the default.")));
/*
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of shared memory"),
- errhint("You might need to increase %s.", "max_pred_locks_per_transaction")));
+ errhint("You might need to increase \"%s\".", "max_pred_locks_per_transaction")));
if (!found)
dlist_init(&target->predicateLocks);
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of shared memory"),
- errhint("You might need to increase %s.", "max_pred_locks_per_transaction")));
+ errhint("You might need to increase \"%s\".", "max_pred_locks_per_transaction")));
if (!found)
{
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of shared memory"),
- errhint("You might need to increase %s.", "max_pred_locks_per_transaction")));
+ errhint("You might need to increase \"%s\".", "max_pred_locks_per_transaction")));
if (found)
{
Assert(predlock->commitSeqNo != 0);
if (AmWalSenderProcess())
ereport(FATAL,
(errcode(ERRCODE_TOO_MANY_CONNECTIONS),
- errmsg("number of requested standby connections exceeds max_wal_senders (currently %d)",
+ errmsg("number of requested standby connections exceeds \"max_wal_senders\" (currently %d)",
max_wal_senders)));
ereport(FATAL,
(errcode(ERRCODE_TOO_MANY_CONNECTIONS),
ereport(ERROR,
(errcode(ERRCODE_STATEMENT_TOO_COMPLEX),
errmsg("stack depth limit exceeded"),
- errhint("Increase the configuration parameter max_stack_depth (currently %dkB), "
+ errhint("Increase the configuration parameter \"max_stack_depth\" (currently %dkB), "
"after ensuring the platform's stack depth limit is adequate.",
max_stack_depth)));
}
if (stack_rlimit > 0 && newval_bytes > stack_rlimit - STACK_DEPTH_SLOP)
{
- GUC_check_errdetail("max_stack_depth must not exceed %ldkB.",
+ GUC_check_errdetail("\"max_stack_depth\" must not exceed %ldkB.",
(stack_rlimit - STACK_DEPTH_SLOP) / 1024L);
GUC_check_errhint("Increase the platform's stack depth limit via \"ulimit -s\" or local equivalent.");
return false;
{
if (!WaitEventSetCanReportClosed() && *newval != 0)
{
- GUC_check_errdetail("client_connection_check_interval must be set to 0 on this platform.");
+ GUC_check_errdetail("\"client_connection_check_interval\" must be set to 0 on this platform.");
return false;
}
return true;
if (*newval &&
(log_parser_stats || log_planner_stats || log_executor_stats))
{
- GUC_check_errdetail("Cannot enable log_statement_stats when "
- "log_parser_stats, log_planner_stats, "
- "or log_executor_stats is true.");
+ GUC_check_errdetail("Cannot enable \"log_statement_stats\" when "
+ "\"log_parser_stats\", \"log_planner_stats\", "
+ "or \"log_executor_stats\" is true.");
return false;
}
return true;
ereport(elevel,
(errmsg("could not get language from ICU locale \"%s\": %s",
loc_str, u_errorName(status)),
- errhint("To disable ICU locale validation, set the parameter %s to \"%s\".",
+ errhint("To disable ICU locale validation, set the parameter \"%s\" to \"%s\".",
"icu_validation_level", "disabled")));
return;
}
ereport(elevel,
(errmsg("ICU locale \"%s\" has unknown language \"%s\"",
loc_str, lang),
- errhint("To disable ICU locale validation, set the parameter %s to \"%s\".",
+ errhint("To disable ICU locale validation, set the parameter \"%s\" to \"%s\".",
"icu_validation_level", "disabled")));
/* check that it can be opened */
}
else
{
- elog(ERROR, "unrecognized bytea_output setting: %d",
+ elog(ERROR, "unrecognized \"bytea_output\" setting: %d",
bytea_output);
rp = result = NULL; /* keep compiler quiet */
}
if (piece == p)
ereport(ERROR,
(errcode(ERRCODE_INVALID_NAME),
- errmsg("zero-length component in parameter dynamic_library_path")));
+ errmsg("zero-length component in parameter \"dynamic_library_path\"")));
if (piece == NULL)
len = strlen(p);
if (!is_absolute_path(mangled))
ereport(ERROR,
(errcode(ERRCODE_INVALID_NAME),
- errmsg("component in parameter dynamic_library_path is not an absolute path")));
+ errmsg("component in parameter \"dynamic_library_path\" is not an absolute path")));
full = palloc(strlen(mangled) + 1 + baselen + 1);
sprintf(full, "%s/%s", mangled, basename);
else
{
write_stderr("%s does not know where to find the database system data.\n"
- "This can be specified as data_directory in \"%s\", "
+ "This can be specified as \"data_directory\" in \"%s\", "
"or by the -D invocation option, or by the "
"PGDATA environment variable.\n",
progname, ConfigFileName);
},
{
{"ssl_passphrase_command_supports_reload", PGC_SIGHUP, CONN_AUTH_SSL,
- gettext_noop("Controls whether ssl_passphrase_command is called during server reload."),
+ gettext_noop("Controls whether \"ssl_passphrase_command\" is called during server reload."),
NULL
},
&ssl_passphrase_command_supports_reload,
gettext_noop("Continues processing past damaged page headers."),
gettext_noop("Detection of a damaged page header normally causes PostgreSQL to "
"report an error, aborting the current transaction. Setting "
- "zero_damaged_pages to true causes the system to instead report a "
+ "\"zero_damaged_page\" to true causes the system to instead report a "
"warning, zero out the damaged page, and continue processing. This "
"behavior will destroy data, namely all the rows on the damaged page."),
GUC_NOT_IN_SAMPLE
gettext_noop("Detection of WAL records having references to "
"invalid pages during recovery causes PostgreSQL to "
"raise a PANIC-level error, aborting the recovery. "
- "Setting ignore_invalid_pages to true causes "
+ "Setting \"ignore_invalid_pages\" to true causes "
"the system to ignore invalid page references "
"in WAL records (but still report a warning), "
"and continue recovery. This behavior may cause "
{"max_locks_per_transaction", PGC_POSTMASTER, LOCK_MANAGEMENT,
gettext_noop("Sets the maximum number of locks per transaction."),
gettext_noop("The shared lock table is sized on the assumption that at most "
- "max_locks_per_transaction objects per server process or prepared "
+ "\"max_locks_per_transaction\" objects per server process or prepared "
"transaction will need to be locked at any one time.")
},
&max_locks_per_xact,
{"max_pred_locks_per_transaction", PGC_POSTMASTER, LOCK_MANAGEMENT,
gettext_noop("Sets the maximum number of predicate locks per transaction."),
gettext_noop("The shared predicate lock table is sized on the assumption that "
- "at most max_pred_locks_per_transaction objects per server process "
+ "at most \"max_pred_locks_per_transaction\" objects per server process "
"or prepared transaction will need to be locked at any one time.")
},
&max_predicate_locks_per_xact,
{
{"commit_siblings", PGC_USERSET, WAL_SETTINGS,
gettext_noop("Sets the minimum number of concurrent open transactions "
- "required before performing commit_delay."),
+ "required before performing \"commit_delay\"."),
NULL
},
&CommitSiblings,
{"maintenance_io_concurrency",
PGC_USERSET,
RESOURCES_ASYNCHRONOUS,
- gettext_noop("A variant of effective_io_concurrency that is used for maintenance work."),
+ gettext_noop("A variant of \"effective_io_concurrency\" that is used for maintenance work."),
NULL,
GUC_EXPLAIN
},
{
{"hash_mem_multiplier", PGC_USERSET, RESOURCES_MEM,
- gettext_noop("Multiple of work_mem to use for hash tables."),
+ gettext_noop("Multiple of \"work_mem\" to use for hash tables."),
NULL,
GUC_EXPLAIN
},
{
{"log_statement_sample_rate", PGC_SUSET, LOGGING_WHEN,
- gettext_noop("Fraction of statements exceeding log_min_duration_sample to be logged."),
+ gettext_noop("Fraction of statements exceeding \"log_min_duration_sample\" to be logged."),
gettext_noop("Use a value between 0.0 (never log) and 1.0 (always log).")
},
&log_statement_sample_rate,
{
{"archive_command", PGC_SIGHUP, WAL_ARCHIVING,
gettext_noop("Sets the shell command that will be called to archive a WAL file."),
- gettext_noop("This is used only if archive_library is not set.")
+ gettext_noop("This is used only if \"archive_library\" is not set.")
},
&XLogArchiveCommand,
"",
{
{"archive_library", PGC_SIGHUP, WAL_ARCHIVING,
gettext_noop("Sets the library that will be called to archive a WAL file."),
- gettext_noop("An empty string indicates that archive_command should be used.")
+ gettext_noop("An empty string indicates that \"archive_command\" should be used.")
},
&XLogArchiveLibrary,
"",
{
{"archive_mode", PGC_POSTMASTER, WAL_ARCHIVING,
- gettext_noop("Allows archiving of WAL files using archive_command."),
+ gettext_noop("Allows archiving of WAL files using \"archive_command\"."),
NULL
},
&XLogArchiveMode,
* Probe for max_connections before shared_buffers, since it is subject to
* more constraints than shared_buffers.
*/
- printf(_("selecting default max_connections ... "));
+ printf(_("selecting default \"max_connections\" ... "));
fflush(stdout);
for (i = 0; i < connslen; i++)
printf("%d\n", n_connections);
- printf(_("selecting default shared_buffers ... "));
+ printf(_("selecting default \"shared_buffers\" ... "));
fflush(stdout);
for (i = 0; i < bufslen; i++)
res = PQexec(tmpconn, ALWAYS_SECURE_SEARCH_PATH_SQL);
if (PQresultStatus(res) != PGRES_TUPLES_OK)
{
- pg_log_error("could not clear search_path: %s",
+ pg_log_error("could not clear \"search_path\": %s",
PQerrorMessage(tmpconn));
PQclear(res);
PQfinish(tmpconn);
tmpparam = PQparameterStatus(tmpconn, "integer_datetimes");
if (!tmpparam)
{
- pg_log_error("could not determine server setting for integer_datetimes");
+ pg_log_error("could not determine server setting for \"integer_datetimes\"");
PQfinish(tmpconn);
exit(1);
}
if (strcmp(tmpparam, "on") != 0)
{
- pg_log_error("integer_datetimes compile flag does not match server");
+ pg_log_error("\"integer_datetimes\" compile flag does not match server");
PQfinish(tmpconn);
exit(1);
}
case WAL_LEVEL_LOGICAL:
return "logical";
}
- return _("unrecognized wal_level");
+ return _("unrecognized \"wal_level\"");
}
if (!res || PQresultStatus(res) != PGRES_COMMAND_OK)
warn_or_exit_horribly(AH,
- "could not set search_path to \"%s\": %s",
+ "could not set \"search_path\" to \"%s\": %s",
schemaName, PQerrorMessage(AH->connection));
PQclear(res);
if (!res || PQresultStatus(res) != PGRES_COMMAND_OK)
warn_or_exit_horribly(AH,
- "could not set default_tablespace to %s: %s",
+ "could not set \"default_tablespace\" to %s: %s",
fmtId(want), PQerrorMessage(AH->connection));
PQclear(res);
if (!res || PQresultStatus(res) != PGRES_COMMAND_OK)
warn_or_exit_horribly(AH,
- "could not set default_table_access_method: %s",
+ "could not set \"default_table_access_method\": %s",
PQerrorMessage(AH->connection));
PQclear(res);
const char *stdstrings = AH->std_strings ? "on" : "off";
PQExpBuffer qry = createPQExpBuffer();
- pg_log_info("saving standard_conforming_strings = %s",
+ pg_log_info("saving \"standard_conforming_strings = %s\"",
stdstrings);
appendPQExpBuffer(qry, "SET standard_conforming_strings = '%s';\n",
appendStringLiteralAH(qry, path->data, AH);
appendPQExpBufferStr(qry, ", false);\n");
- pg_log_info("saving search_path = %s", path->data);
+ pg_log_info("saving \"search_path = %s\"", path->data);
ArchiveEntry(AH, nilCatalogId, createDumpId(),
ARCHIVE_OPTS(.tag = "SEARCHPATH",
/* secure search_path */
res = PQexec(conn, ALWAYS_SECURE_SEARCH_PATH_SQL);
if (PQresultStatus(res) != PGRES_TUPLES_OK)
- pg_fatal("could not clear search_path: %s",
+ pg_fatal("could not clear \"search_path\": %s",
PQresultErrorMessage(res));
PQclear(res);
*/
str = run_simple_query(conn, "SHOW full_page_writes");
if (strcmp(str, "on") != 0)
- pg_fatal("full_page_writes must be enabled in the source server");
+ pg_fatal("\"full_page_writes\" must be enabled in the source server");
pg_free(str);
/* Prepare a statement we'll use to fetch files */
printf(_("%s resynchronizes a PostgreSQL cluster with another copy of the cluster.\n\n"), progname);
printf(_("Usage:\n %s [OPTION]...\n\n"), progname);
printf(_("Options:\n"));
- printf(_(" -c, --restore-target-wal use restore_command in target configuration to\n"
+ printf(_(" -c, --restore-target-wal use \"restore_command\" in target configuration to\n"
" retrieve WAL files from archives\n"));
printf(_(" -D, --target-pgdata=DIRECTORY existing data directory to modify\n"));
printf(_(" --source-pgdata=DIRECTORY source data directory to synchronize with\n"));
(void) pg_strip_crlf(restore_command);
if (strcmp(restore_command, "") == 0)
- pg_fatal("restore_command is not set in the target cluster");
+ pg_fatal("\"restore_command\" is not set in the target cluster");
- pg_log_debug("using for rewind restore_command = \'%s\'",
+ pg_log_debug("using for rewind \"restore_command = \'%s\'\"",
restore_command);
destroyPQExpBuffer(postgres_cmd);
printf(_("\nCompare file sync methods using one %dkB write:\n"), XLOG_BLCKSZ_K);
else
printf(_("\nCompare file sync methods using two %dkB writes:\n"), XLOG_BLCKSZ_K);
- printf(_("(in wal_sync_method preference order, except fdatasync is Linux's default)\n"));
+ printf(_("(in \"wal_sync_method\" preference order, except fdatasync is Linux's default)\n"));
/*
* Test open_datasync if available
wal_level = PQgetvalue(res, 0, 0);
if (strcmp(wal_level, "logical") != 0)
- pg_fatal("wal_level must be \"logical\", but is set to \"%s\"",
+ pg_fatal("\"wal_level\" must be \"logical\", but is set to \"%s\"",
wal_level);
max_replication_slots = atoi(PQgetvalue(res, 1, 0));
if (nslots_on_old > max_replication_slots)
- pg_fatal("max_replication_slots (%d) must be greater than or equal to the number of "
+ pg_fatal("\"max_replication_slots\" (%d) must be greater than or equal to the number of "
"logical replication slots (%d) on the old cluster",
max_replication_slots, nslots_on_old);
max_replication_slots = atoi(PQgetvalue(res, 0, 0));
if (nsubs_on_old > max_replication_slots)
- pg_fatal("max_replication_slots (%d) must be greater than or equal to the number of "
+ pg_fatal("\"max_replication_slots\" (%d) must be greater than or equal to the number of "
"subscriptions (%d) on the old cluster",
max_replication_slots, nsubs_on_old);
[@pg_upgrade_cmd],
1,
[
- qr/max_replication_slots \(1\) must be greater than or equal to the number of logical replication slots \(2\) on the old cluster/
+ qr/"max_replication_slots" \(1\) must be greater than or equal to the number of logical replication slots \(2\) on the old cluster/
],
[qr//],
- 'run of pg_upgrade where the new cluster has insufficient max_replication_slots'
+ 'run of pg_upgrade where the new cluster has insufficient "max_replication_slots"'
);
ok(-d $newpub->data_dir . "/pg_upgrade_output.d",
"pg_upgrade_output.d/ not removed after pg_upgrade failure");
],
1,
[
- qr/max_replication_slots \(0\) must be greater than or equal to the number of subscriptions \(1\) on the old cluster/
+ qr/"max_replication_slots" \(0\) must be greater than or equal to the number of subscriptions \(1\) on the old cluster/
],
[qr//],
'run of pg_upgrade where the new cluster has insufficient max_replication_slots'
* This case is unlikely as pgbench already found "pgbench_branches"
* above to compute the scale.
*/
- pg_log_error("no pgbench_accounts table found in search_path");
+ pg_log_error("no pgbench_accounts table found in \"search_path\"");
pg_log_error_hint("Perhaps you need to do initialization (\"pgbench -i\") in database \"%s\".", PQdb(con));
exit(1);
}
* fatal too.
*/
if (wait_result_is_any_signal(rc, true))
- pg_fatal("restore_command failed: %s",
+ pg_fatal("\"restore_command\" failed: %s",
wait_result_to_str(rc));
/*
if (strlen(val) > MAX_ALGORITHM_NAME_LEN)
{
PQclear(res);
- libpq_append_conn_error(conn, "password_encryption value too long");
+ libpq_append_conn_error(conn, "\"password_encryption\" value too long");
return NULL;
}
strcpy(algobuf, val);
if (!sslVerifyProtocolVersion(conn->ssl_min_protocol_version))
{
conn->status = CONNECTION_BAD;
- libpq_append_conn_error(conn, "invalid %s value: \"%s\"",
+ libpq_append_conn_error(conn, "invalid \"%s\" value: \"%s\"",
"ssl_min_protocol_version",
conn->ssl_min_protocol_version);
return false;
if (!sslVerifyProtocolVersion(conn->ssl_max_protocol_version))
{
conn->status = CONNECTION_BAD;
- libpq_append_conn_error(conn, "invalid %s value: \"%s\"",
+ libpq_append_conn_error(conn, "invalid \"%s\" value: \"%s\"",
"ssl_max_protocol_version",
conn->ssl_max_protocol_version);
return false;
FROM committs_test
ORDER BY id;
ERROR: could not get commit timestamp data
-HINT: Make sure the configuration parameter track_commit_timestamp is set.
+HINT: Make sure the configuration parameter "track_commit_timestamp" is set.
DROP TABLE committs_test;
SELECT pg_xact_commit_timestamp('0'::xid);
ERROR: cannot retrieve commit timestamp for transaction 0
roident != 0 AS valid_roident
FROM pg_last_committed_xact() x;
ERROR: could not get commit timestamp data
-HINT: Make sure the configuration parameter track_commit_timestamp is set.
+HINT: Make sure the configuration parameter "track_commit_timestamp" is set.
-- Test non-normal transaction ids.
SELECT * FROM pg_xact_commit_timestamp_origin(NULL); -- ok, NULL
timestamp | roident
roident != 0 AS valid_roident
FROM pg_last_committed_xact() x;
ERROR: could not get commit timestamp data
-HINT: Make sure the configuration parameter track_commit_timestamp is set.
+HINT: Make sure the configuration parameter "track_commit_timestamp" is set.
SELECT x.timestamp > '-infinity'::timestamptz AS ts_low,
x.timestamp <= now() AS ts_high,
roident != 0 AS valid_roident
FROM pg_xact_commit_timestamp_origin(:'txid_no_origin') x;
ERROR: could not get commit timestamp data
-HINT: Make sure the configuration parameter track_commit_timestamp is set.
+HINT: Make sure the configuration parameter "track_commit_timestamp" is set.
-- Test transaction with replication origin
SELECT pg_replication_origin_create('regress_commit_ts: get_origin') != 0
AS valid_roident;
FROM pg_last_committed_xact() x, pg_replication_origin r
WHERE r.roident = x.roident;
ERROR: could not get commit timestamp data
-HINT: Make sure the configuration parameter track_commit_timestamp is set.
+HINT: Make sure the configuration parameter "track_commit_timestamp" is set.
SELECT x.timestamp > '-infinity'::timestamptz AS ts_low,
x.timestamp <= now() AS ts_high,
r.roname
FROM pg_xact_commit_timestamp_origin(:'txid_with_origin') x, pg_replication_origin r
WHERE r.roident = x.roident;
ERROR: could not get commit timestamp data
-HINT: Make sure the configuration parameter track_commit_timestamp is set.
+HINT: Make sure the configuration parameter "track_commit_timestamp" is set.
SELECT pg_replication_origin_session_reset();
pg_replication_origin_session_reset
-------------------------------------
res = PQexec(conn, "SET lc_messages TO \"C\"");
if (PQresultStatus(res) != PGRES_COMMAND_OK)
- pg_fatal("failed to set lc_messages: %s", PQerrorMessage(conn));
+ pg_fatal("failed to set \"lc_messages\": %s", PQerrorMessage(conn));
res = PQexec(conn, "SET debug_parallel_query = off");
if (PQresultStatus(res) != PGRES_COMMAND_OK)
- pg_fatal("failed to set debug_parallel_query: %s", PQerrorMessage(conn));
+ pg_fatal("failed to set \"debug_parallel_query\": %s", PQerrorMessage(conn));
/* Set the trace file, if requested */
if (tracefile != NULL)
/* warn if the user has set ssl_passphrase_command */
if (ssl_passphrase_command[0])
ereport(WARNING,
- (errmsg("ssl_passphrase_command setting ignored by ssl_passphrase_func module")));
+ (errmsg("\"ssl_passphrase_command\" setting ignored by ssl_passphrase_func module")));
SSL_CTX_set_default_passwd_cb(context, rot13_passphrase);
}
like(
$log_contents,
- qr/WARNING.*ssl_passphrase_command setting ignored by ssl_passphrase_func module/,
+ qr/WARNING.*"ssl_passphrase_command" setting ignored by ssl_passphrase_func module/,
"ssl_passphrase_command set warning");
# set the wrong passphrase
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_RESOURCES),
errmsg("could not register background process"),
- errhint("You may need to increase max_worker_processes.")));
+ errhint("You may need to increase \"max_worker_processes\".")));
++wstate->nworkers;
}
if (!process_shared_preload_libraries_in_progress)
ereport(ERROR,
(errmsg("cannot load \"%s\" after startup", "test_slru"),
- errdetail("\"%s\" must be loaded with shared_preload_libraries.",
+ errdetail("\"%s\" must be loaded with \"shared_preload_libraries\".",
"test_slru")));
prev_shmem_request_hook = shmem_request_hook;
# Confirm that the archive recovery fails with an expected error
my $logfile = slurp_file($recovery_node->logfile());
ok( $logfile =~
- qr/FATAL: .* WAL was generated with wal_level=minimal, cannot continue recovering/,
- "$node_text ends with an error because it finds WAL generated with wal_level=minimal"
+ qr/FATAL: .* WAL was generated with "wal_level=minimal", cannot continue recovering/,
+ "$node_text ends with an error because it finds WAL generated with \"wal_level=minimal\""
);
}
make_slot_active($node_standby, 'wal_level_', 0, \$stdout, \$stderr);
# We are not able to read from the slot as it requires wal_level >= logical on the primary server
check_pg_recvlogical_stderr($handle,
- "logical decoding on standby requires wal_level >= logical on the primary"
+ "logical decoding on standby requires \"wal_level\" >= \"logical\" on the primary"
);
# Restore primary wal_level
SET icu_validation_level = ERROR;
CREATE COLLATION testx (provider = icu, locale = 'nonsense-nowhere'); -- fails
ERROR: ICU locale "nonsense-nowhere" has unknown language "nonsense"
-HINT: To disable ICU locale validation, set the parameter icu_validation_level to "disabled".
+HINT: To disable ICU locale validation, set the parameter "icu_validation_level" to "disabled".
CREATE COLLATION testx (provider = icu, locale = '@colStrength=primary;nonsense=yes'); -- fails
ERROR: could not convert locale name "@colStrength=primary;nonsense=yes" to language tag: U_ILLEGAL_ARGUMENT_ERROR
RESET icu_validation_level;
WARNING: could not convert locale name "@colStrength=primary;nonsense=yes" to language tag: U_ILLEGAL_ARGUMENT_ERROR
CREATE COLLATION testx (provider = icu, locale = 'nonsense-nowhere'); DROP COLLATION testx;
WARNING: ICU locale "nonsense-nowhere" has unknown language "nonsense"
-HINT: To disable ICU locale validation, set the parameter icu_validation_level to "disabled".
+HINT: To disable ICU locale validation, set the parameter "icu_validation_level" to "disabled".
CREATE COLLATION test4 FROM nonsense;
ERROR: collation "nonsense" for encoding "UTF8" does not exist
CREATE COLLATION test5 FROM test0;
-- prevent empty values
SET default_table_access_method = '';
ERROR: invalid value for parameter "default_table_access_method": ""
-DETAIL: default_table_access_method cannot be empty.
+DETAIL: "default_table_access_method" cannot be empty.
-- prevent nonexistent values
SET default_table_access_method = 'I do not exist AM';
ERROR: invalid value for parameter "default_table_access_method": "I do not exist AM"
SET max_stack_depth = '100kB';
SELECT repeat('[', 10000)::json;
ERROR: stack depth limit exceeded
-HINT: Increase the configuration parameter max_stack_depth (currently 100kB), after ensuring the platform's stack depth limit is adequate.
+HINT: Increase the configuration parameter "max_stack_depth" (currently 100kB), after ensuring the platform's stack depth limit is adequate.
SELECT repeat('{"a":', 10000)::json;
ERROR: stack depth limit exceeded
-HINT: Increase the configuration parameter max_stack_depth (currently 100kB), after ensuring the platform's stack depth limit is adequate.
+HINT: Increase the configuration parameter "max_stack_depth" (currently 100kB), after ensuring the platform's stack depth limit is adequate.
RESET max_stack_depth;
-- Miscellaneous stuff.
SELECT 'true'::json; -- OK
SET max_stack_depth = '100kB';
SELECT repeat('[', 10000)::jsonb;
ERROR: stack depth limit exceeded
-HINT: Increase the configuration parameter max_stack_depth (currently 100kB), after ensuring the platform's stack depth limit is adequate.
+HINT: Increase the configuration parameter "max_stack_depth" (currently 100kB), after ensuring the platform's stack depth limit is adequate.
SELECT repeat('{"a":', 10000)::jsonb;
ERROR: stack depth limit exceeded
-HINT: Increase the configuration parameter max_stack_depth (currently 100kB), after ensuring the platform's stack depth limit is adequate.
+HINT: Increase the configuration parameter "max_stack_depth" (currently 100kB), after ensuring the platform's stack depth limit is adequate.
RESET max_stack_depth;
-- Miscellaneous stuff.
SELECT 'true'::jsonb; -- OK
PREPARE TRANSACTION 'regress_foo1';
ERROR: prepared transactions are disabled
-HINT: Set max_prepared_transactions to a nonzero value.
+HINT: Set "max_prepared_transactions" to a nonzero value.
SELECT * FROM pxtest1;
foobar
--------
PREPARE TRANSACTION 'regress_foo2';
ERROR: prepared transactions are disabled
-HINT: Set max_prepared_transactions to a nonzero value.
+HINT: Set "max_prepared_transactions" to a nonzero value.
SELECT * FROM pxtest1;
foobar
--------
PREPARE TRANSACTION 'regress_foo3';
ERROR: prepared transactions are disabled
-HINT: Set max_prepared_transactions to a nonzero value.
+HINT: Set "max_prepared_transactions" to a nonzero value.
SELECT gid FROM pg_prepared_xacts WHERE gid ~ '^regress_' ORDER BY gid;
gid
-----
-- This should fail, because the gid foo3 is already in use
PREPARE TRANSACTION 'regress_foo3';
ERROR: prepared transactions are disabled
-HINT: Set max_prepared_transactions to a nonzero value.
+HINT: Set "max_prepared_transactions" to a nonzero value.
SELECT * FROM pxtest1;
foobar
--------
PREPARE TRANSACTION 'regress_foo4';
ERROR: prepared transactions are disabled
-HINT: Set max_prepared_transactions to a nonzero value.
+HINT: Set "max_prepared_transactions" to a nonzero value.
SELECT gid FROM pg_prepared_xacts WHERE gid ~ '^regress_' ORDER BY gid;
gid
-----
INSERT INTO pxtest1 VALUES ('fff');
PREPARE TRANSACTION 'regress_foo5';
ERROR: prepared transactions are disabled
-HINT: Set max_prepared_transactions to a nonzero value.
+HINT: Set "max_prepared_transactions" to a nonzero value.
SELECT gid FROM pg_prepared_xacts WHERE gid ~ '^regress_' ORDER BY gid;
gid
-----
PREPARE TRANSACTION 'regress_foo6'; -- fails
ERROR: prepared transactions are disabled
-HINT: Set max_prepared_transactions to a nonzero value.
+HINT: Set "max_prepared_transactions" to a nonzero value.
-- Test subtransactions
BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE;
CREATE TABLE pxtest2 (a int);
INSERT INTO pxtest2 VALUES (3);
PREPARE TRANSACTION 'regress_sub1';
ERROR: prepared transactions are disabled
-HINT: Set max_prepared_transactions to a nonzero value.
+HINT: Set "max_prepared_transactions" to a nonzero value.
CREATE TABLE pxtest3(fff int);
-- Test shared invalidation
BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE;
PREPARE TRANSACTION 'regress_sub2';
ERROR: prepared transactions are disabled
-HINT: Set max_prepared_transactions to a nonzero value.
+HINT: Set "max_prepared_transactions" to a nonzero value.
-- No such cursor
FETCH 1 FROM foo;
ERROR: cursor "foo" does not exist
ERROR: unsafe use of string constant with Unicode escapes
LINE 1: SELECT U&'d\0061t\+000061' AS U&"d\0061t\+000061";
^
-DETAIL: String constants with Unicode escapes cannot be used when standard_conforming_strings is off.
+DETAIL: String constants with Unicode escapes cannot be used when "standard_conforming_strings" is off.
SELECT U&'d!0061t\+000061' UESCAPE '!' AS U&"d*0061t\+000061" UESCAPE '*';
ERROR: unsafe use of string constant with Unicode escapes
LINE 1: SELECT U&'d!0061t\+000061' UESCAPE '!' AS U&"d*0061t\+000061...
^
-DETAIL: String constants with Unicode escapes cannot be used when standard_conforming_strings is off.
+DETAIL: String constants with Unicode escapes cannot be used when "standard_conforming_strings" is off.
SELECT U&' \' UESCAPE '!' AS "tricky";
ERROR: unsafe use of string constant with Unicode escapes
LINE 1: SELECT U&' \' UESCAPE '!' AS "tricky";
^
-DETAIL: String constants with Unicode escapes cannot be used when standard_conforming_strings is off.
+DETAIL: String constants with Unicode escapes cannot be used when "standard_conforming_strings" is off.
SELECT 'tricky' AS U&"\" UESCAPE '!';
\
--------
ERROR: unsafe use of string constant with Unicode escapes
LINE 1: SELECT U&'wrong: \061';
^
-DETAIL: String constants with Unicode escapes cannot be used when standard_conforming_strings is off.
+DETAIL: String constants with Unicode escapes cannot be used when "standard_conforming_strings" is off.
SELECT U&'wrong: \+0061';
ERROR: unsafe use of string constant with Unicode escapes
LINE 1: SELECT U&'wrong: \+0061';
^
-DETAIL: String constants with Unicode escapes cannot be used when standard_conforming_strings is off.
+DETAIL: String constants with Unicode escapes cannot be used when "standard_conforming_strings" is off.
SELECT U&'wrong: +0061' UESCAPE '+';
ERROR: unsafe use of string constant with Unicode escapes
LINE 1: SELECT U&'wrong: +0061' UESCAPE '+';
^
-DETAIL: String constants with Unicode escapes cannot be used when standard_conforming_strings is off.
+DETAIL: String constants with Unicode escapes cannot be used when "standard_conforming_strings" is off.
RESET standard_conforming_strings;
-- bytea
SET bytea_output TO hex;
$node->connect_fails(
"$common_connstr sslrootcert=ssl/root+server_ca.crt sslmode=require ssl_min_protocol_version=incorrect_tls",
"connection failure with an incorrect SSL protocol minimum bound",
- expected_stderr => qr/invalid ssl_min_protocol_version value/);
+ expected_stderr => qr/invalid "ssl_min_protocol_version" value/);
$node->connect_fails(
"$common_connstr sslrootcert=ssl/root+server_ca.crt sslmode=require ssl_max_protocol_version=incorrect_tls",
"connection failure with an incorrect SSL protocol maximum bound",
- expected_stderr => qr/invalid ssl_max_protocol_version value/);
+ expected_stderr => qr/invalid "ssl_max_protocol_version" value/);
### Server-side tests.
###
ROLLBACK;
});
ok( $reterr =~
- m/WARNING: wal_level is insufficient to publish logical changes/,
- 'CREATE PUBLICATION while wal_level=minimal');
+ m/WARNING: "wal_level" is insufficient to publish logical changes/,
+ 'CREATE PUBLICATION while "wal_level=minimal"');
done_testing();