Revise GUC names quoting in messages again
authorPeter Eisentraut <[email protected]>
Fri, 17 May 2024 09:23:08 +0000 (11:23 +0200)
committerPeter Eisentraut <[email protected]>
Fri, 17 May 2024 09:44:26 +0000 (11:44 +0200)
After further review, we want to move in the direction of always
quoting GUC names in error messages, rather than the previous (PG16)
wildly mixed practice or the intermittent (mid-PG17) idea of doing
this depending on how possibly confusing the GUC name is.

This commit applies appropriate quotes to (almost?) all mentions of
GUC names in error messages.  It partially supersedes a243569bf65 and
8d9978a7176, which had moved things a bit in the opposite direction
but which then were abandoned in a partial state.

Author: Peter Smith <[email protected]>
Discussion: https://www.postgresql.org/message-id/flat/CAHut%2BPv-kSN8SkxSdoHano_wPubqcg5789ejhCDZAcLFceBR-w%40mail.gmail.com

79 files changed:
contrib/pg_prewarm/autoprewarm.c
contrib/pg_stat_statements/pg_stat_statements.c
contrib/sepgsql/hooks.c
contrib/test_decoding/expected/slot.out
doc/src/sgml/sources.sgml
src/backend/access/gin/ginbulk.c
src/backend/access/heap/vacuumlazy.c
src/backend/access/table/tableamapi.c
src/backend/access/transam/commit_ts.c
src/backend/access/transam/multixact.c
src/backend/access/transam/rmgr.c
src/backend/access/transam/twophase.c
src/backend/access/transam/xlog.c
src/backend/access/transam/xlogarchive.c
src/backend/access/transam/xlogfuncs.c
src/backend/access/transam/xlogprefetcher.c
src/backend/access/transam/xlogrecovery.c
src/backend/commands/publicationcmds.c
src/backend/commands/vacuum.c
src/backend/commands/variable.c
src/backend/libpq/be-secure-openssl.c
src/backend/libpq/hba.c
src/backend/libpq/pqcomm.c
src/backend/parser/scan.l
src/backend/port/sysv_sema.c
src/backend/port/sysv_shmem.c
src/backend/port/win32_shmem.c
src/backend/postmaster/bgworker.c
src/backend/postmaster/checkpointer.c
src/backend/postmaster/pgarch.c
src/backend/postmaster/postmaster.c
src/backend/replication/logical/decode.c
src/backend/replication/logical/launcher.c
src/backend/replication/logical/logical.c
src/backend/replication/logical/origin.c
src/backend/replication/slot.c
src/backend/replication/syncrep.c
src/backend/storage/buffer/localbuf.c
src/backend/storage/file/fd.c
src/backend/storage/lmgr/lock.c
src/backend/storage/lmgr/predicate.c
src/backend/storage/lmgr/proc.c
src/backend/tcop/postgres.c
src/backend/utils/adt/pg_locale.c
src/backend/utils/adt/varlena.c
src/backend/utils/fmgr/dfmgr.c
src/backend/utils/misc/guc.c
src/backend/utils/misc/guc_tables.c
src/bin/initdb/initdb.c
src/bin/pg_basebackup/streamutil.c
src/bin/pg_controldata/pg_controldata.c
src/bin/pg_dump/pg_backup_archiver.c
src/bin/pg_dump/pg_dump.c
src/bin/pg_rewind/libpq_source.c
src/bin/pg_rewind/pg_rewind.c
src/bin/pg_test_fsync/pg_test_fsync.c
src/bin/pg_upgrade/check.c
src/bin/pg_upgrade/t/003_logical_slots.pl
src/bin/pg_upgrade/t/004_subscription.pl
src/bin/pgbench/pgbench.c
src/fe_utils/archive.c
src/interfaces/libpq/fe-auth.c
src/interfaces/libpq/fe-connect.c
src/test/modules/commit_ts/expected/commit_timestamp_1.out
src/test/modules/libpq_pipeline/libpq_pipeline.c
src/test/modules/ssl_passphrase_callback/ssl_passphrase_func.c
src/test/modules/ssl_passphrase_callback/t/001_testfunc.pl
src/test/modules/test_shm_mq/setup.c
src/test/modules/test_slru/test_slru.c
src/test/recovery/t/024_archive_recovery.pl
src/test/recovery/t/035_standby_logical_decoding.pl
src/test/regress/expected/collate.icu.utf8.out
src/test/regress/expected/create_am.out
src/test/regress/expected/json.out
src/test/regress/expected/jsonb.out
src/test/regress/expected/prepared_xacts_1.out
src/test/regress/expected/strings.out
src/test/ssl/t/001_ssltests.pl
src/test/subscription/t/001_rep_changes.pl

index 1c8804dc434573ec36021dd88ec201cc0c9b4616..961d3b8e9ddb5dbb98c0ad9293094cf2ff3eb052 100644 (file)
@@ -831,7 +831,7 @@ apw_start_leader_worker(void)
        ereport(ERROR,
                (errcode(ERRCODE_INSUFFICIENT_RESOURCES),
                 errmsg("could not register background process"),
-                errhint("You may need to increase max_worker_processes.")));
+                errhint("You may need to increase \"max_worker_processes\".")));
 
    status = WaitForBackgroundWorkerStartup(handle, &pid);
    if (status != BGWH_STARTED)
@@ -867,7 +867,7 @@ apw_start_database_worker(void)
        ereport(ERROR,
                (errcode(ERRCODE_INSUFFICIENT_RESOURCES),
                 errmsg("registering dynamic bgworker autoprewarm failed"),
-                errhint("Consider increasing configuration parameter max_worker_processes.")));
+                errhint("Consider increasing configuration parameter \"max_worker_processes\".")));
 
    /*
     * Ignore return value; if it fails, postmaster has died, but we have
index 67cec865ba1b59543989102b1d39b1a74346761c..d4197ae0f7ebe9737cd90a435ce3668b83262f32 100644 (file)
@@ -1660,7 +1660,7 @@ pg_stat_statements_internal(FunctionCallInfo fcinfo,
    if (!pgss || !pgss_hash)
        ereport(ERROR,
                (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
-                errmsg("pg_stat_statements must be loaded via shared_preload_libraries")));
+                errmsg("pg_stat_statements must be loaded via \"shared_preload_libraries\"")));
 
    InitMaterializedSRF(fcinfo, 0);
 
@@ -1989,7 +1989,7 @@ pg_stat_statements_info(PG_FUNCTION_ARGS)
    if (!pgss || !pgss_hash)
        ereport(ERROR,
                (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
-                errmsg("pg_stat_statements must be loaded via shared_preload_libraries")));
+                errmsg("pg_stat_statements must be loaded via \"shared_preload_libraries\"")));
 
    /* Build a tuple descriptor for our result type */
    if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
@@ -2671,7 +2671,7 @@ entry_reset(Oid userid, Oid dbid, uint64 queryid, bool minmax_only)
    if (!pgss || !pgss_hash)
        ereport(ERROR,
                (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
-                errmsg("pg_stat_statements must be loaded via shared_preload_libraries")));
+                errmsg("pg_stat_statements must be loaded via \"shared_preload_libraries\"")));
 
    LWLockAcquire(pgss->lock, LW_EXCLUSIVE);
    num_entries = hash_get_num_entries(pgss_hash);
index a6b2a3d9baf0a51c5fb5c566fcc3f8c0802d20a4..0f206b1093d816a2038b6b5ee3ed40152a0b9cce 100644 (file)
@@ -406,7 +406,7 @@ _PG_init(void)
    if (IsUnderPostmaster)
        ereport(ERROR,
                (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
-                errmsg("sepgsql must be loaded via shared_preload_libraries")));
+                errmsg("sepgsql must be loaded via \"shared_preload_libraries\"")));
 
    /*
     * Check availability of SELinux on the platform. If disabled, we cannot
index 349ab2d38092f657c15126f9a43748fb8122f240..7de03c79f6f017246ef94d285e5d693f03c69265 100644 (file)
@@ -220,7 +220,7 @@ ORDER BY o.slot_name, c.slot_name;
 -- released even when raise error during creating the target slot.
 SELECT 'copy' FROM pg_copy_logical_replication_slot('orig_slot1', 'failed'); -- error
 ERROR:  all replication slots are in use
-HINT:  Free one or increase max_replication_slots.
+HINT:  Free one or increase "max_replication_slots".
 -- temporary slots were dropped automatically
 SELECT pg_drop_replication_slot('orig_slot1');
  pg_drop_replication_slot 
index 0dae4d9158f09a4a88c1d3536961b898c72ccb6a..fa68d4d024a93f38321d0c40d7aefe731afb8757 100644 (file)
@@ -533,17 +533,10 @@ Hint:       The addendum, written as a complete sentence.
    <title>Use of Quotes</title>
 
    <para>
-    Always use quotes to delimit file names, user-supplied identifiers, and
-    other variables that might contain words.  Do not use them to mark up
-    variables that will not contain words (for example, operator names).
-   </para>
-
-   <para>
-    In messages containing configuration variable names, do not include quotes
-    when the names are visibly not natural English words, such as when they
-    have underscores, are all-uppercase or have mixed case. Otherwise, quotes
-    must be added. Do include quotes in a message where an arbitrary variable
-    name is to be expanded.
+    Always use quotes to delimit file names, user-supplied identifiers,
+    configuration variable names, and other variables that might contain
+    words.  Do not use them to mark up variables that will not contain words
+    (for example, operator names).
    </para>
 
    <para>
index a522801c2f718edfc2c02d05c7bee98b451e0da4..7f89cd5e8261e844c8ec831b495bfc442ebfc4cd 100644 (file)
@@ -42,7 +42,7 @@ ginCombineData(RBTNode *existing, const RBTNode *newdata, void *arg)
            ereport(ERROR,
                    (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
                     errmsg("posting list is too long"),
-                    errhint("Reduce maintenance_work_mem.")));
+                    errhint("Reduce \"maintenance_work_mem\".")));
 
        accum->allocatedMemory -= GetMemoryChunkSpace(eo->list);
        eo->maxcount *= 2;
index 84cc983b6e6424a06bd082c03f602e3c4af9671d..8145ea8fc3f57ce58931f1ef89d1a2efc611dd41 100644 (file)
@@ -2327,7 +2327,7 @@ lazy_check_wraparound_failsafe(LVRelState *vacrel)
                        vacrel->dbname, vacrel->relnamespace, vacrel->relname,
                        vacrel->num_index_scans),
                 errdetail("The table's relfrozenxid or relminmxid is too far in the past."),
-                errhint("Consider increasing configuration parameter maintenance_work_mem or autovacuum_work_mem.\n"
+                errhint("Consider increasing configuration parameter \"maintenance_work_mem\" or \"autovacuum_work_mem\".\n"
                         "You might also need to consider other ways for VACUUM to keep up with the allocation of transaction IDs.")));
 
        /* Stop applying cost limits from this point on */
index ce637a5a5d9931163bd1126b1826129e143aab07..e9b598256fbe054681ee5b8dbefa40579372349d 100644 (file)
@@ -106,14 +106,14 @@ check_default_table_access_method(char **newval, void **extra, GucSource source)
 {
    if (**newval == '\0')
    {
-       GUC_check_errdetail("%s cannot be empty.",
+       GUC_check_errdetail("\"%s\" cannot be empty.",
                            "default_table_access_method");
        return false;
    }
 
    if (strlen(*newval) >= NAMEDATALEN)
    {
-       GUC_check_errdetail("%s is too long (maximum %d characters).",
+       GUC_check_errdetail("\"%s\" is too long (maximum %d characters).",
                            "default_table_access_method", NAMEDATALEN - 1);
        return false;
    }
index f22149468743df065abd9264ec79177ae9fc690e..77e1899d7ad218501fa993301993e78657e93871 100644 (file)
@@ -384,9 +384,9 @@ error_commit_ts_disabled(void)
            (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
             errmsg("could not get commit timestamp data"),
             RecoveryInProgress() ?
-            errhint("Make sure the configuration parameter %s is set on the primary server.",
+            errhint("Make sure the configuration parameter \"%s\" is set on the primary server.",
                     "track_commit_timestamp") :
-            errhint("Make sure the configuration parameter %s is set.",
+            errhint("Make sure the configuration parameter \"%s\" is set.",
                     "track_commit_timestamp")));
 }
 
index 380c866d7140845c2c6bc598b526d92ca0e7894b..54c916e0347f4b6d125e0f144e9c70083d6da5ee 100644 (file)
@@ -1151,7 +1151,7 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset)
                                  MultiXactState->offsetStopLimit - nextOffset - 1,
                                  nmembers,
                                  MultiXactState->offsetStopLimit - nextOffset - 1),
-                errhint("Execute a database-wide VACUUM in database with OID %u with reduced vacuum_multixact_freeze_min_age and vacuum_multixact_freeze_table_age settings.",
+                errhint("Execute a database-wide VACUUM in database with OID %u with reduced \"vacuum_multixact_freeze_min_age\" and \"vacuum_multixact_freeze_table_age\" settings.",
                         MultiXactState->oldestMultiXactDB)));
    }
 
@@ -1187,7 +1187,7 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset)
                               MultiXactState->offsetStopLimit - nextOffset + nmembers,
                               MultiXactState->oldestMultiXactDB,
                               MultiXactState->offsetStopLimit - nextOffset + nmembers),
-                errhint("Execute a database-wide VACUUM in that database with reduced vacuum_multixact_freeze_min_age and vacuum_multixact_freeze_table_age settings.")));
+                errhint("Execute a database-wide VACUUM in that database with reduced \"vacuum_multixact_freeze_min_age\" and \"vacuum_multixact_freeze_table_age\" settings.")));
 
    ExtendMultiXactMember(nextOffset, nmembers);
 
index 3e2f1d4a237715596667de33b9af792206cdffb1..1b7499726eb02df5c05d6522943ec9104aa1cd79 100644 (file)
@@ -91,7 +91,7 @@ void
 RmgrNotFound(RmgrId rmid)
 {
    ereport(ERROR, (errmsg("resource manager with ID %d not registered", rmid),
-                   errhint("Include the extension module that implements this resource manager in shared_preload_libraries.")));
+                   errhint("Include the extension module that implements this resource manager in \"shared_preload_libraries\".")));
 }
 
 /*
@@ -118,7 +118,7 @@ RegisterCustomRmgr(RmgrId rmid, const RmgrData *rmgr)
    if (!process_shared_preload_libraries_in_progress)
        ereport(ERROR,
                (errmsg("failed to register custom resource manager \"%s\" with ID %d", rmgr->rm_name, rmid),
-                errdetail("Custom resource manager must be registered while initializing modules in shared_preload_libraries.")));
+                errdetail("Custom resource manager must be registered while initializing modules in \"shared_preload_libraries\".")));
 
    if (RmgrTable[rmid].rm_name != NULL)
        ereport(ERROR,
index 8090ac9fc19e54e0f1418a2a9f0212fb30ce0636..bf451d42ffb5dd76f23f53e1561def475199f3f4 100644 (file)
@@ -373,7 +373,7 @@ MarkAsPreparing(TransactionId xid, const char *gid,
        ereport(ERROR,
                (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
                 errmsg("prepared transactions are disabled"),
-                errhint("Set max_prepared_transactions to a nonzero value.")));
+                errhint("Set \"max_prepared_transactions\" to a nonzero value.")));
 
    /* on first call, register the exit hook */
    if (!twophaseExitRegistered)
@@ -402,7 +402,7 @@ MarkAsPreparing(TransactionId xid, const char *gid,
        ereport(ERROR,
                (errcode(ERRCODE_OUT_OF_MEMORY),
                 errmsg("maximum number of prepared transactions reached"),
-                errhint("Increase max_prepared_transactions (currently %d).",
+                errhint("Increase \"max_prepared_transactions\" (currently %d).",
                         max_prepared_xacts)));
    gxact = TwoPhaseState->freeGXacts;
    TwoPhaseState->freeGXacts = gxact->next;
@@ -2539,7 +2539,7 @@ PrepareRedoAdd(char *buf, XLogRecPtr start_lsn,
        ereport(ERROR,
                (errcode(ERRCODE_OUT_OF_MEMORY),
                 errmsg("maximum number of prepared transactions reached"),
-                errhint("Increase max_prepared_transactions (currently %d).",
+                errhint("Increase \"max_prepared_transactions\" (currently %d).",
                         max_prepared_xacts)));
    gxact = TwoPhaseState->freeGXacts;
    TwoPhaseState->freeGXacts = gxact->next;
index c3fd9c1eaed511199d0741e3df185959332cde3a..330e058c5f28ab4f0a7a532bd2e4134d4d2a9369 100644 (file)
@@ -4501,11 +4501,11 @@ ReadControlFile(void)
    /* check and update variables dependent on wal_segment_size */
    if (ConvertToXSegs(min_wal_size_mb, wal_segment_size) < 2)
        ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-                       errmsg("min_wal_size must be at least twice wal_segment_size")));
+                       errmsg("\"min_wal_size\" must be at least twice \"wal_segment_size\"")));
 
    if (ConvertToXSegs(max_wal_size_mb, wal_segment_size) < 2)
        ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-                       errmsg("max_wal_size must be at least twice wal_segment_size")));
+                       errmsg("\"max_wal_size\" must be at least twice \"wal_segment_size\"")));
 
    UsableBytesInSegment =
        (wal_segment_size / XLOG_BLCKSZ * UsableBytesInPage) -
@@ -5351,9 +5351,9 @@ CheckRequiredParameterValues(void)
    {
        ereport(FATAL,
                (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
-                errmsg("WAL was generated with wal_level=minimal, cannot continue recovering"),
-                errdetail("This happens if you temporarily set wal_level=minimal on the server."),
-                errhint("Use a backup taken after setting wal_level to higher than minimal.")));
+                errmsg("WAL was generated with \"wal_level=minimal\", cannot continue recovering"),
+                errdetail("This happens if you temporarily set \"wal_level=minimal\" on the server."),
+                errhint("Use a backup taken after setting \"wal_level\" to higher than \"minimal\".")));
    }
 
    /*
@@ -8549,7 +8549,7 @@ get_sync_bit(int method)
 #endif
        default:
            /* can't happen (unless we are out of sync with option array) */
-           elog(ERROR, "unrecognized wal_sync_method: %d", method);
+           elog(ERROR, "unrecognized \"wal_sync_method\": %d", method);
            return 0;           /* silence warning */
    }
 }
@@ -8647,7 +8647,7 @@ issue_xlog_fsync(int fd, XLogSegNo segno, TimeLineID tli)
        default:
            ereport(PANIC,
                    errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-                   errmsg_internal("unrecognized wal_sync_method: %d", wal_sync_method));
+                   errmsg_internal("unrecognized \"wal_sync_method\": %d", wal_sync_method));
            break;
    }
 
@@ -8725,7 +8725,7 @@ do_pg_backup_start(const char *backupidstr, bool fast, List **tablespaces,
        ereport(ERROR,
                (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
                 errmsg("WAL level not sufficient for making an online backup"),
-                errhint("wal_level must be set to \"replica\" or \"logical\" at server start.")));
+                errhint("\"wal_level\" must be set to \"replica\" or \"logical\" at server start.")));
 
    if (strlen(backupidstr) > MAXPGPATH)
        ereport(ERROR,
@@ -8851,11 +8851,11 @@ do_pg_backup_start(const char *backupidstr, bool fast, List **tablespaces,
                if (!checkpointfpw || state->startpoint <= recptr)
                    ereport(ERROR,
                            (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
-                            errmsg("WAL generated with full_page_writes=off was replayed "
+                            errmsg("WAL generated with \"full_page_writes=off\" was replayed "
                                    "since last restartpoint"),
                             errhint("This means that the backup being taken on the standby "
                                     "is corrupt and should not be used. "
-                                    "Enable full_page_writes and run CHECKPOINT on the primary, "
+                                    "Enable \"full_page_writes\" and run CHECKPOINT on the primary, "
                                     "and then try an online backup again.")));
 
                /*
@@ -9147,11 +9147,11 @@ do_pg_backup_stop(BackupState *state, bool waitforarchive)
        if (state->startpoint <= recptr)
            ereport(ERROR,
                    (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
-                    errmsg("WAL generated with full_page_writes=off was replayed "
+                    errmsg("WAL generated with \"full_page_writes=off\" was replayed "
                            "during online backup"),
                     errhint("This means that the backup being taken on the standby "
                             "is corrupt and should not be used. "
-                            "Enable full_page_writes and run CHECKPOINT on the primary, "
+                            "Enable \"full_page_writes\" and run CHECKPOINT on the primary, "
                             "and then try an online backup again.")));
 
 
@@ -9279,7 +9279,7 @@ do_pg_backup_stop(BackupState *state, bool waitforarchive)
                ereport(WARNING,
                        (errmsg("still waiting for all required WAL segments to be archived (%d seconds elapsed)",
                                waits),
-                        errhint("Check that your archive_command is executing properly.  "
+                        errhint("Check that your \"archive_command\" is executing properly.  "
                                 "You can safely cancel this backup, "
                                 "but the database backup will not be usable without all the WAL segments.")));
            }
index caa1f03d93455881bc258d25d7b0c99b336bfed9..81999b48200bed1df2be56bd8273b71ed79ae6bf 100644 (file)
@@ -233,7 +233,7 @@ RestoreArchivedFile(char *path, const char *xlogfname,
            ereport(elevel,
                    (errcode_for_file_access(),
                     errmsg("could not stat file \"%s\": %m", xlogpath),
-                    errdetail("restore_command returned a zero exit status, but stat() failed.")));
+                    errdetail("\"restore_command\" returned a zero exit status, but stat() failed.")));
        }
    }
 
index 92bdb17ed5220bfa82a6e9b51ed0a84e53541901..4e46baaebdf7a7983f038161ef2db51d2d384135 100644 (file)
@@ -212,7 +212,7 @@ pg_log_standby_snapshot(PG_FUNCTION_ARGS)
    if (!XLogStandbyInfoActive())
        ereport(ERROR,
                (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
-                errmsg("pg_log_standby_snapshot() can only be used if wal_level >= replica")));
+                errmsg("pg_log_standby_snapshot() can only be used if \"wal_level\" >= \"replica\"")));
 
    recptr = LogStandbySnapshot();
 
@@ -245,7 +245,7 @@ pg_create_restore_point(PG_FUNCTION_ARGS)
        ereport(ERROR,
                (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
                 errmsg("WAL level not sufficient for creating a restore point"),
-                errhint("wal_level must be set to \"replica\" or \"logical\" at server start.")));
+                errhint("\"wal_level\" must be set to \"replica\" or \"logical\" at server start.")));
 
    restore_name_str = text_to_cstring(restore_name);
 
index fc80c37e55445af04f6dadf9d61ce89a2879402d..84023d61baf302cf3e69ef426dd6b429eb37f3ba 100644 (file)
@@ -1085,7 +1085,7 @@ check_recovery_prefetch(int *new_value, void **extra, GucSource source)
 #ifndef USE_PREFETCH
    if (*new_value == RECOVERY_PREFETCH_ON)
    {
-       GUC_check_errdetail("recovery_prefetch is not supported on platforms that lack posix_fadvise().");
+       GUC_check_errdetail("\"recovery_prefetch\" is not supported on platforms that lack posix_fadvise().");
        return false;
    }
 #endif
index 29c5bec084771d9b233056f7887eef7d10b799b0..b45b83317200536f0809f398f5f2a222cf659545 100644 (file)
@@ -1119,7 +1119,7 @@ validateRecoveryParameters(void)
        if ((PrimaryConnInfo == NULL || strcmp(PrimaryConnInfo, "") == 0) &&
            (recoveryRestoreCommand == NULL || strcmp(recoveryRestoreCommand, "") == 0))
            ereport(WARNING,
-                   (errmsg("specified neither primary_conninfo nor restore_command"),
+                   (errmsg("specified neither \"primary_conninfo\" nor \"restore_command\""),
                     errhint("The database server will regularly poll the pg_wal subdirectory to check for files placed there.")));
    }
    else
@@ -1128,7 +1128,7 @@ validateRecoveryParameters(void)
            strcmp(recoveryRestoreCommand, "") == 0)
            ereport(FATAL,
                    (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-                    errmsg("must specify restore_command when standby mode is not enabled")));
+                    errmsg("must specify \"restore_command\" when standby mode is not enabled")));
    }
 
    /*
@@ -2162,7 +2162,7 @@ CheckTablespaceDirectory(void)
                     errmsg("unexpected directory entry \"%s\" found in %s",
                            de->d_name, "pg_tblspc/"),
                     errdetail("All directory entries in pg_tblspc/ should be symbolic links."),
-                    errhint("Remove those directories, or set allow_in_place_tablespaces to ON transiently to let recovery complete.")));
+                    errhint("Remove those directories, or set \"allow_in_place_tablespaces\" to ON transiently to let recovery complete.")));
    }
 }
 
@@ -4771,7 +4771,7 @@ error_multiple_recovery_targets(void)
    ereport(ERROR,
            (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
             errmsg("multiple recovery targets specified"),
-            errdetail("At most one of recovery_target, recovery_target_lsn, recovery_target_name, recovery_target_time, recovery_target_xid may be set.")));
+            errdetail("At most one of \"recovery_target\", \"recovery_target_lsn\", \"recovery_target_name\", \"recovery_target_time\", \"recovery_target_xid\" may be set.")));
 }
 
 /*
@@ -4855,7 +4855,7 @@ check_recovery_target_name(char **newval, void **extra, GucSource source)
    /* Use the value of newval directly */
    if (strlen(*newval) >= MAXFNAMELEN)
    {
-       GUC_check_errdetail("%s is too long (maximum %d characters).",
+       GUC_check_errdetail("\"%s\" is too long (maximum %d characters).",
                            "recovery_target_name", MAXFNAMELEN - 1);
        return false;
    }
@@ -4979,7 +4979,7 @@ check_recovery_target_timeline(char **newval, void **extra, GucSource source)
        strtoul(*newval, NULL, 0);
        if (errno == EINVAL || errno == ERANGE)
        {
-           GUC_check_errdetail("recovery_target_timeline is not a valid number.");
+           GUC_check_errdetail("\"recovery_target_timeline\" is not a valid number.");
            return false;
        }
    }
index 9bcc22fdd7e18d584bf9885e47cab444a39b327e..6ea709988ee7d7d5b8543906e978f99c51f4e970 100644 (file)
@@ -858,8 +858,8 @@ CreatePublication(ParseState *pstate, CreatePublicationStmt *stmt)
    if (wal_level != WAL_LEVEL_LOGICAL)
        ereport(WARNING,
                (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
-                errmsg("wal_level is insufficient to publish logical changes"),
-                errhint("Set wal_level to \"logical\" before creating subscriptions.")));
+                errmsg("\"wal_level\" is insufficient to publish logical changes"),
+                errhint("Set \"wal_level\" to \"logical\" before creating subscriptions.")));
 
    return myself;
 }
index 521ee74586a6e4e39dc1779c9d27df7cfdd4f6b4..48f8eab20226417bf979cd36d51f7170007a72ca 100644 (file)
@@ -131,7 +131,7 @@ check_vacuum_buffer_usage_limit(int *newval, void **extra,
        return true;
 
    /* Value does not fall within any allowable range */
-   GUC_check_errdetail("vacuum_buffer_usage_limit must be 0 or between %d kB and %d kB",
+   GUC_check_errdetail("\"vacuum_buffer_usage_limit\" must be 0 or between %d kB and %d kB",
                        MIN_BAS_VAC_RING_SIZE_KB, MAX_BAS_VAC_RING_SIZE_KB);
 
    return false;
index 01151ca2b5ad2243a7d15dc117c06b0b317abe5d..9345131711ed89965f1291e89833100d943d830d 100644 (file)
@@ -717,7 +717,7 @@ check_client_encoding(char **newval, void **extra, GucSource source)
        else
        {
            /* Provide a useful complaint */
-           GUC_check_errdetail("Cannot change client_encoding now.");
+           GUC_check_errdetail("Cannot change \"client_encoding\" now.");
        }
        return false;
    }
@@ -778,7 +778,7 @@ assign_client_encoding(const char *newval, void *extra)
         */
        ereport(ERROR,
                (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
-                errmsg("cannot change client_encoding during a parallel operation")));
+                errmsg("cannot change \"client_encoding\" during a parallel operation")));
    }
 
    /* We do not expect an error if PrepareClientEncoding succeeded */
@@ -1202,7 +1202,7 @@ check_effective_io_concurrency(int *newval, void **extra, GucSource source)
 #ifndef USE_PREFETCH
    if (*newval != 0)
    {
-       GUC_check_errdetail("effective_io_concurrency must be set to 0 on platforms that lack posix_fadvise().");
+       GUC_check_errdetail("\"effective_io_concurrency\" must be set to 0 on platforms that lack posix_fadvise().");
        return false;
    }
 #endif                         /* USE_PREFETCH */
@@ -1215,7 +1215,7 @@ check_maintenance_io_concurrency(int *newval, void **extra, GucSource source)
 #ifndef USE_PREFETCH
    if (*newval != 0)
    {
-       GUC_check_errdetail("maintenance_io_concurrency must be set to 0 on platforms that lack posix_fadvise().");
+       GUC_check_errdetail("\"maintenance_io_concurrency\" must be set to 0 on platforms that lack posix_fadvise().");
        return false;
    }
 #endif                         /* USE_PREFETCH */
index 60cf68aac4ab286b2a2e6e444896ebc1b58d7e6a..0caad6bed3d26273dabf2672a448b2627dadd570 100644 (file)
@@ -201,7 +201,7 @@ be_tls_init(bool isServerStart)
        {
            ereport(isServerStart ? FATAL : LOG,
            /*- translator: first %s is a GUC option name, second %s is its value */
-                   (errmsg("%s setting \"%s\" not supported by this build",
+                   (errmsg("\"%s\" setting \"%s\" not supported by this build",
                            "ssl_min_protocol_version",
                            GetConfigOption("ssl_min_protocol_version",
                                            false, false))));
@@ -251,7 +251,7 @@ be_tls_init(bool isServerStart)
        {
            ereport(isServerStart ? FATAL : LOG,
                    (errmsg("could not set SSL protocol version range"),
-                    errdetail("%s cannot be higher than %s",
+                    errdetail("\"%s\" cannot be higher than \"%s\"",
                               "ssl_min_protocol_version",
                               "ssl_max_protocol_version")));
            goto error;
index d506c3c0b75ddeb03b94a6de7439ef795a1507df..18271def2e8c1668829709f91db7f475d0b94ed7 100644 (file)
@@ -1378,7 +1378,7 @@ parse_hba_line(TokenizedAuthLine *tok_line, int elevel)
                ereport(elevel,
                        (errcode(ERRCODE_CONFIG_FILE_ERROR),
                         errmsg("hostssl record cannot match because SSL is disabled"),
-                        errhint("Set ssl = on in postgresql.conf."),
+                        errhint("Set \"ssl = on\" in postgresql.conf."),
                         errcontext("line %d of configuration file \"%s\"",
                                    line_num, file_name)));
                *err_msg = "hostssl record cannot match because SSL is disabled";
index 2cee49a2085066afcda3a9437d2463c2c7e2d7b4..daa0696146d0e832b87422c6cc265f070dba4556 100644 (file)
@@ -731,7 +731,7 @@ Setup_AF_UNIX(const char *sock_path)
    if (Unix_socket_group[0] != '\0')
    {
 #ifdef WIN32
-       elog(WARNING, "configuration item unix_socket_group is not supported on this platform");
+       elog(WARNING, "configuration item \"unix_socket_group\" is not supported on this platform");
 #else
        char       *endptr;
        unsigned long val;
index b499975e9c450978316190dab9cd04789875452c..9b33fb8d722d770ae5cf2d2a9f972bce6df9880e 100644 (file)
@@ -565,7 +565,7 @@ other           .
                        ereport(ERROR,
                                (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
                                 errmsg("unsafe use of string constant with Unicode escapes"),
-                                errdetail("String constants with Unicode escapes cannot be used when standard_conforming_strings is off."),
+                                errdetail("String constants with Unicode escapes cannot be used when \"standard_conforming_strings\" is off."),
                                 lexer_errposition()));
                    BEGIN(xus);
                    startlit();
index 647045e8c5345407e1309e1e6cf092f8618a2c3d..1454f96b5f320927c4f8a401d9a1e120453d9278 100644 (file)
@@ -127,7 +127,7 @@ InternalIpcSemaphoreCreate(IpcSemaphoreKey semKey, int numSems)
                         "semaphore sets (SEMMNI), or the system wide maximum number of "
                         "semaphores (SEMMNS), would be exceeded.  You need to raise the "
                         "respective kernel parameter.  Alternatively, reduce PostgreSQL's "
-                        "consumption of semaphores by reducing its max_connections parameter.\n"
+                        "consumption of semaphores by reducing its \"max_connections\" parameter.\n"
                         "The PostgreSQL documentation contains more information about "
                         "configuring your system for PostgreSQL.") : 0));
    }
index 1a6d8fa0fbcb4d4b8af1a152997bf20108ce6bab..362a37d3b3a21c17dc63216cca6bc627d0f0d0cd 100644 (file)
@@ -581,7 +581,7 @@ check_huge_page_size(int *newval, void **extra, GucSource source)
    /* Recent enough Linux only, for now.  See GetHugePageSize(). */
    if (*newval != 0)
    {
-       GUC_check_errdetail("huge_page_size must be 0 on this platform.");
+       GUC_check_errdetail("\"huge_page_size\" must be 0 on this platform.");
        return false;
    }
 #endif
@@ -658,8 +658,8 @@ CreateAnonymousSegment(Size *size)
                         "for a shared memory segment exceeded available memory, "
                         "swap space, or huge pages. To reduce the request size "
                         "(currently %zu bytes), reduce PostgreSQL's shared "
-                        "memory usage, perhaps by reducing shared_buffers or "
-                        "max_connections.",
+                        "memory usage, perhaps by reducing \"shared_buffers\" or "
+                        "\"max_connections\".",
                         allocsize) : 0));
    }
 
@@ -729,7 +729,7 @@ PGSharedMemoryCreate(Size size,
    if (huge_pages == HUGE_PAGES_ON && shared_memory_type != SHMEM_TYPE_MMAP)
        ereport(ERROR,
                (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-                errmsg("huge pages not supported with the current shared_memory_type setting")));
+                errmsg("huge pages not supported with the current \"shared_memory_type\" setting")));
 
    /* Room for a header? */
    Assert(size > MAXALIGN(sizeof(PGShmemHeader)));
index 90bed0146dd49fa5d61b756848705ef478975265..3bcce9d3b63296b6ecbe302f15a220b58db9cf9e 100644 (file)
@@ -643,7 +643,7 @@ check_huge_page_size(int *newval, void **extra, GucSource source)
 {
    if (*newval != 0)
    {
-       GUC_check_errdetail("huge_page_size must be 0 on this platform.");
+       GUC_check_errdetail("\"huge_page_size\" must be 0 on this platform.");
        return false;
    }
    return true;
index cf64a4beb2016ac0dbb9bbcc0d01faea5a0d71bd..97f9f28424af2912a0601cd6edb0eae3a4319ec5 100644 (file)
@@ -885,7 +885,7 @@ RegisterBackgroundWorker(BackgroundWorker *worker)
            return;
        ereport(LOG,
                (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-                errmsg("background worker \"%s\": must be registered in shared_preload_libraries",
+                errmsg("background worker \"%s\": must be registered in \"shared_preload_libraries\"",
                        worker->bgw_name)));
        return;
    }
index 8ef600ae72aa3529628618fa17f4ab0275669ea2..3c68a9904db5e3135349cbc51276c96d240f96fa 100644 (file)
@@ -442,7 +442,7 @@ CheckpointerMain(char *startup_data, size_t startup_data_len)
                                       "checkpoints are occurring too frequently (%d seconds apart)",
                                       elapsed_secs,
                                       elapsed_secs),
-                        errhint("Consider increasing the configuration parameter max_wal_size.")));
+                        errhint("Consider increasing the configuration parameter \"%s\".", "max_wal_size")));
 
            /*
             * Initialize checkpointer-private variables used during
index d82bcc2cfd54960a69fb0af52dfd0b51412ad904..3fc8fe7d105b4f2b9e684173d2910fc48a99a71d 100644 (file)
@@ -425,7 +425,7 @@ pgarch_ArchiverCopyLoop(void)
                !ArchiveCallbacks->check_configured_cb(archive_module_state))
            {
                ereport(WARNING,
-                       (errmsg("archive_mode enabled, yet archiving is not configured"),
+                       (errmsg("\"archive_mode\" enabled, yet archiving is not configured"),
                         arch_module_check_errdetail_string ?
                         errdetail_internal("%s", arch_module_check_errdetail_string) : 0));
                return;
@@ -876,8 +876,8 @@ HandlePgArchInterrupts(void)
        if (XLogArchiveLibrary[0] != '\0' && XLogArchiveCommand[0] != '\0')
            ereport(ERROR,
                    (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-                    errmsg("both archive_command and archive_library set"),
-                    errdetail("Only one of archive_command, archive_library may be set.")));
+                    errmsg("both \"archive_command\" and \"archive_library\" set"),
+                    errdetail("Only one of \"archive_command\", \"archive_library\" may be set.")));
 
        archiveLibChanged = strcmp(XLogArchiveLibrary, archiveLib) != 0;
        pfree(archiveLib);
@@ -915,8 +915,8 @@ LoadArchiveLibrary(void)
    if (XLogArchiveLibrary[0] != '\0' && XLogArchiveCommand[0] != '\0')
        ereport(ERROR,
                (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-                errmsg("both archive_command and archive_library set"),
-                errdetail("Only one of archive_command, archive_library may be set.")));
+                errmsg("both \"archive_command\" and \"archive_library\" set"),
+                errdetail("Only one of \"archive_command\", \"archive_library\" may be set.")));
 
    /*
     * If shell archiving is enabled, use our special initialization function.
index 7f3170a8f06fc2eb22c6e1326f78a4351ac4b4b0..bf0241aed0cedf9a18edd3625704673b9dfc6ba5 100644 (file)
@@ -822,7 +822,7 @@ PostmasterMain(int argc, char *argv[])
     */
    if (SuperuserReservedConnections + ReservedConnections >= MaxConnections)
    {
-       write_stderr("%s: superuser_reserved_connections (%d) plus reserved_connections (%d) must be less than max_connections (%d)\n",
+       write_stderr("%s: \"superuser_reserved_connections\" (%d) plus \"reserved_connections\" (%d) must be less than \"max_connections\" (%d)\n",
                     progname,
                     SuperuserReservedConnections, ReservedConnections,
                     MaxConnections);
@@ -830,13 +830,13 @@ PostmasterMain(int argc, char *argv[])
    }
    if (XLogArchiveMode > ARCHIVE_MODE_OFF && wal_level == WAL_LEVEL_MINIMAL)
        ereport(ERROR,
-               (errmsg("WAL archival cannot be enabled when wal_level is \"minimal\"")));
+               (errmsg("WAL archival cannot be enabled when \"wal_level\" is \"minimal\"")));
    if (max_wal_senders > 0 && wal_level == WAL_LEVEL_MINIMAL)
        ereport(ERROR,
-               (errmsg("WAL streaming (max_wal_senders > 0) requires wal_level \"replica\" or \"logical\"")));
+               (errmsg("WAL streaming (\"max_wal_senders\" > 0) requires \"wal_level\" to be \"replica\" or \"logical\"")));
    if (summarize_wal && wal_level == WAL_LEVEL_MINIMAL)
        ereport(ERROR,
-               (errmsg("WAL cannot be summarized when wal_level is \"minimal\"")));
+               (errmsg("WAL cannot be summarized when \"wal_level\" is \"minimal\"")));
 
    /*
     * Other one-time internal sanity checks can go here, if they are fast.
@@ -3359,7 +3359,7 @@ PostmasterStateMachine(void)
        if (!restart_after_crash)
        {
            ereport(LOG,
-                   (errmsg("shutting down because restart_after_crash is off")));
+                   (errmsg("shutting down because \"restart_after_crash\" is off")));
            ExitPostmaster(1);
        }
    }
index 7a86f8481db979719faac1cac79ca0f372351fe6..8ec5adfd9099a012c13fdb717ffc4ddca1747ff5 100644 (file)
@@ -174,7 +174,7 @@ xlog_decode(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
                    Assert(RecoveryInProgress());
                    ereport(ERROR,
                            (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
-                            errmsg("logical decoding on standby requires wal_level >= logical on the primary")));
+                            errmsg("logical decoding on standby requires \"wal_level\" >= \"logical\" on the primary")));
                }
                break;
            }
index 66070e9131c07c9c71c67d8f8f46c33dfd2b3021..27c3a91fb75ea6c769d12ab2d2b737873f9fd51c 100644 (file)
@@ -425,7 +425,7 @@ retry:
        ereport(WARNING,
                (errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED),
                 errmsg("out of logical replication worker slots"),
-                errhint("You might need to increase %s.", "max_logical_replication_workers")));
+                errhint("You might need to increase \"%s\".", "max_logical_replication_workers")));
        return false;
    }
 
@@ -511,7 +511,7 @@ retry:
        ereport(WARNING,
                (errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED),
                 errmsg("out of background worker slots"),
-                errhint("You might need to increase %s.", "max_worker_processes")));
+                errhint("You might need to increase \"%s\".", "max_worker_processes")));
        return false;
    }
 
index 97a4d99c4e74f20f4ce97a2e96d4b536b0d50d65..99f31849bb171dac0d25bea0f7fbfc2b86f1c65c 100644 (file)
@@ -118,7 +118,7 @@ CheckLogicalDecodingRequirements(void)
    if (wal_level < WAL_LEVEL_LOGICAL)
        ereport(ERROR,
                (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
-                errmsg("logical decoding requires wal_level >= logical")));
+                errmsg("logical decoding requires \"wal_level\" >= \"logical\"")));
 
    if (MyDatabaseId == InvalidOid)
        ereport(ERROR,
@@ -138,7 +138,7 @@ CheckLogicalDecodingRequirements(void)
        if (GetActiveWalLevelOnStandby() < WAL_LEVEL_LOGICAL)
            ereport(ERROR,
                    (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
-                    errmsg("logical decoding on standby requires wal_level >= logical on the primary")));
+                    errmsg("logical decoding on standby requires \"wal_level\" >= \"logical\" on the primary")));
    }
 }
 
index a529da983ae6c9d6fce63021c19068c7b7d53a25..419e4814f057749e67ff046e768c5e8cbd30e15c 100644 (file)
@@ -187,7 +187,7 @@ replorigin_check_prerequisites(bool check_slots, bool recoveryOK)
    if (check_slots && max_replication_slots == 0)
        ereport(ERROR,
                (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
-                errmsg("cannot query or manipulate replication origin when max_replication_slots = 0")));
+                errmsg("cannot query or manipulate replication origin when \"max_replication_slots\" is 0")));
 
    if (!recoveryOK && RecoveryInProgress())
        ereport(ERROR,
@@ -795,7 +795,7 @@ StartupReplicationOrigin(void)
        if (last_state == max_replication_slots)
            ereport(PANIC,
                    (errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED),
-                    errmsg("could not find free replication state, increase max_replication_slots")));
+                    errmsg("could not find free replication state, increase \"max_replication_slots\"")));
 
        /* copy data to shared memory */
        replication_states[last_state].roident = disk_state.roident;
@@ -954,7 +954,7 @@ replorigin_advance(RepOriginId node,
                (errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED),
                 errmsg("could not find free replication state slot for replication origin with ID %d",
                        node),
-                errhint("Increase max_replication_slots and try again.")));
+                errhint("Increase \"max_replication_slots\" and try again.")));
 
    if (replication_state == NULL)
    {
@@ -1155,7 +1155,7 @@ replorigin_session_setup(RepOriginId node, int acquired_by)
                (errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED),
                 errmsg("could not find free replication state slot for replication origin with ID %d",
                        node),
-                errhint("Increase max_replication_slots and try again.")));
+                errhint("Increase \"max_replication_slots\" and try again.")));
    else if (session_replication_state == NULL)
    {
        /* initialize new slot */
index aa4ea387da0020fd56f0483ef2620c3832885c2e..0e54ea5bb9abf4ab9fa713fffa011b8238a18b66 100644 (file)
@@ -378,7 +378,7 @@ ReplicationSlotCreate(const char *name, bool db_specific,
        ereport(ERROR,
                (errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED),
                 errmsg("all replication slots are in use"),
-                errhint("Free one or increase max_replication_slots.")));
+                errhint("Free one or increase \"max_replication_slots\".")));
 
    /*
     * Since this slot is not in use, nobody should be looking at any part of
@@ -1369,12 +1369,12 @@ CheckSlotRequirements(void)
    if (max_replication_slots == 0)
        ereport(ERROR,
                (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
-                errmsg("replication slots can only be used if max_replication_slots > 0")));
+                errmsg("replication slots can only be used if \"max_replication_slots\" > 0")));
 
    if (wal_level < WAL_LEVEL_REPLICA)
        ereport(ERROR,
                (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
-                errmsg("replication slots can only be used if wal_level >= replica")));
+                errmsg("replication slots can only be used if \"wal_level\" >= \"replica\"")));
 }
 
 /*
@@ -1508,7 +1508,7 @@ ReportSlotInvalidation(ReplicationSlotInvalidationCause cause,
            break;
 
        case RS_INVAL_WAL_LEVEL:
-           appendStringInfoString(&err_detail, _("Logical decoding on standby requires wal_level >= logical on the primary server."));
+           appendStringInfoString(&err_detail, _("Logical decoding on standby requires \"wal_level\" >= \"logical\" on the primary server."));
            break;
        case RS_INVAL_NONE:
            pg_unreachable();
@@ -1521,7 +1521,7 @@ ReportSlotInvalidation(ReplicationSlotInvalidationCause cause,
            errmsg("invalidating obsolete replication slot \"%s\"",
                   NameStr(slotname)),
            errdetail_internal("%s", err_detail.data),
-           hint ? errhint("You might need to increase %s.", "max_slot_wal_keep_size") : 0);
+           hint ? errhint("You might need to increase \"%s\".", "max_slot_wal_keep_size") : 0);
 
    pfree(err_detail.data);
 }
@@ -2332,15 +2332,15 @@ RestoreSlotFromDisk(const char *name)
    if (cp.slotdata.database != InvalidOid && wal_level < WAL_LEVEL_LOGICAL)
        ereport(FATAL,
                (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
-                errmsg("logical replication slot \"%s\" exists, but wal_level < logical",
+                errmsg("logical replication slot \"%s\" exists, but \"wal_level\" < \"logical\"",
                        NameStr(cp.slotdata.name)),
-                errhint("Change wal_level to be logical or higher.")));
+                errhint("Change \"wal_level\" to be \"logical\" or higher.")));
    else if (wal_level < WAL_LEVEL_REPLICA)
        ereport(FATAL,
                (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
-                errmsg("physical replication slot \"%s\" exists, but wal_level < replica",
+                errmsg("physical replication slot \"%s\" exists, but \"wal_level\" < \"replica\"",
                        NameStr(cp.slotdata.name)),
-                errhint("Change wal_level to be replica or higher.")));
+                errhint("Change \"wal_level\" to be \"replica\" or higher.")));
 
    /* nothing can be active yet, don't lock anything */
    for (i = 0; i < max_replication_slots; i++)
@@ -2383,7 +2383,7 @@ RestoreSlotFromDisk(const char *name)
    if (!restored)
        ereport(FATAL,
                (errmsg("too many replication slots active before shutdown"),
-                errhint("Increase max_replication_slots and try again.")));
+                errhint("Increase \"max_replication_slots\" and try again.")));
 }
 
 /*
index 77917b848a493f10dca78b26b6b5575a015ead6d..fa5988c824ea087978a944213ea63ded47357864 100644 (file)
@@ -1010,7 +1010,7 @@ check_synchronous_standby_names(char **newval, void **extra, GucSource source)
            if (syncrep_parse_error_msg)
                GUC_check_errdetail("%s", syncrep_parse_error_msg);
            else
-               GUC_check_errdetail("synchronous_standby_names parser failed");
+               GUC_check_errdetail("\"synchronous_standby_names\" parser failed");
            return false;
        }
 
index 985a2c7049c8039667598b7f36c5f0c59dd708e1..8da7dd6c98ae49d57cf48dd036e9f501bdb9a680 100644 (file)
@@ -709,7 +709,7 @@ check_temp_buffers(int *newval, void **extra, GucSource source)
     */
    if (source != PGC_S_TEST && NLocBuffer && NLocBuffer != *newval)
    {
-       GUC_check_errdetail("temp_buffers cannot be changed after any temporary tables have been accessed in the session.");
+       GUC_check_errdetail("\"temp_buffers\" cannot be changed after any temporary tables have been accessed in the session.");
        return false;
    }
    return true;
index 8c8e81f899bf16c171382c5519fc9c0470efd0e7..a7c05b0a6fd86c3063f5475e06d975ea3f559fcb 100644 (file)
@@ -3947,7 +3947,7 @@ check_debug_io_direct(char **newval, void **extra, GucSource source)
 #if PG_O_DIRECT == 0
    if (strcmp(*newval, "") != 0)
    {
-       GUC_check_errdetail("debug_io_direct is not supported on this platform.");
+       GUC_check_errdetail("\"debug_io_direct\" is not supported on this platform.");
        result = false;
    }
    flags = 0;
@@ -3961,7 +3961,7 @@ check_debug_io_direct(char **newval, void **extra, GucSource source)
 
    if (!SplitGUCList(rawstring, ',', &elemlist))
    {
-       GUC_check_errdetail("Invalid list syntax in parameter %s",
+       GUC_check_errdetail("Invalid list syntax in parameter \"%s\"",
                            "debug_io_direct");
        pfree(rawstring);
        list_free(elemlist);
@@ -3994,14 +3994,14 @@ check_debug_io_direct(char **newval, void **extra, GucSource source)
 #if XLOG_BLCKSZ < PG_IO_ALIGN_SIZE
    if (result && (flags & (IO_DIRECT_WAL | IO_DIRECT_WAL_INIT)))
    {
-       GUC_check_errdetail("debug_io_direct is not supported for WAL because XLOG_BLCKSZ is too small");
+       GUC_check_errdetail("\"debug_io_direct\" is not supported for WAL because XLOG_BLCKSZ is too small");
        result = false;
    }
 #endif
 #if BLCKSZ < PG_IO_ALIGN_SIZE
    if (result && (flags & IO_DIRECT_DATA))
    {
-       GUC_check_errdetail("debug_io_direct is not supported for data because BLCKSZ is too small");
+       GUC_check_errdetail("\"debug_io_direct\" is not supported for data because BLCKSZ is too small");
        result = false;
    }
 #endif
index 5154353c84474f54435e20f01ec1dcb623777a9a..9e4ddf72258c9b38323fa93b9c76282f2d9cbe18 100644 (file)
@@ -960,7 +960,7 @@ LockAcquireExtended(const LOCKTAG *locktag,
                ereport(ERROR,
                        (errcode(ERRCODE_OUT_OF_MEMORY),
                         errmsg("out of shared memory"),
-                        errhint("You might need to increase %s.", "max_locks_per_transaction")));
+                        errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
            else
                return LOCKACQUIRE_NOT_AVAIL;
        }
@@ -998,7 +998,7 @@ LockAcquireExtended(const LOCKTAG *locktag,
            ereport(ERROR,
                    (errcode(ERRCODE_OUT_OF_MEMORY),
                     errmsg("out of shared memory"),
-                    errhint("You might need to increase %s.", "max_locks_per_transaction")));
+                    errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
        else
            return LOCKACQUIRE_NOT_AVAIL;
    }
@@ -2801,7 +2801,7 @@ FastPathGetRelationLockEntry(LOCALLOCK *locallock)
            ereport(ERROR,
                    (errcode(ERRCODE_OUT_OF_MEMORY),
                     errmsg("out of shared memory"),
-                    errhint("You might need to increase %s.", "max_locks_per_transaction")));
+                    errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
        }
        GrantLock(proclock->tag.myLock, proclock, lockmode);
        FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
@@ -4186,7 +4186,7 @@ lock_twophase_recover(TransactionId xid, uint16 info,
        ereport(ERROR,
                (errcode(ERRCODE_OUT_OF_MEMORY),
                 errmsg("out of shared memory"),
-                errhint("You might need to increase %s.", "max_locks_per_transaction")));
+                errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
    }
 
    /*
@@ -4251,7 +4251,7 @@ lock_twophase_recover(TransactionId xid, uint16 info,
        ereport(ERROR,
                (errcode(ERRCODE_OUT_OF_MEMORY),
                 errmsg("out of shared memory"),
-                errhint("You might need to increase %s.", "max_locks_per_transaction")));
+                errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
    }
 
    /*
@@ -4601,7 +4601,7 @@ VirtualXactLock(VirtualTransactionId vxid, bool wait)
            ereport(ERROR,
                    (errcode(ERRCODE_OUT_OF_MEMORY),
                     errmsg("out of shared memory"),
-                    errhint("You might need to increase %s.", "max_locks_per_transaction")));
+                    errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
        }
        GrantLock(proclock->tag.myLock, proclock, ExclusiveLock);
 
index d5bbfbd4c6f0ec8cc5420e4dce47efd13f0b61ae..93841654db339fe471ea4eb2b03b38e3ce2e2e9b 100644 (file)
@@ -651,7 +651,7 @@ SetRWConflict(SERIALIZABLEXACT *reader, SERIALIZABLEXACT *writer)
        ereport(ERROR,
                (errcode(ERRCODE_OUT_OF_MEMORY),
                 errmsg("not enough elements in RWConflictPool to record a read/write conflict"),
-                errhint("You might need to run fewer transactions at a time or increase max_connections.")));
+                errhint("You might need to run fewer transactions at a time or increase \"max_connections\".")));
 
    conflict = dlist_head_element(RWConflictData, outLink, &RWConflictPool->availableList);
    dlist_delete(&conflict->outLink);
@@ -676,7 +676,7 @@ SetPossibleUnsafeConflict(SERIALIZABLEXACT *roXact,
        ereport(ERROR,
                (errcode(ERRCODE_OUT_OF_MEMORY),
                 errmsg("not enough elements in RWConflictPool to record a potential read/write conflict"),
-                errhint("You might need to run fewer transactions at a time or increase max_connections.")));
+                errhint("You might need to run fewer transactions at a time or increase \"max_connections\".")));
 
    conflict = dlist_head_element(RWConflictData, outLink, &RWConflictPool->availableList);
    dlist_delete(&conflict->outLink);
@@ -1678,7 +1678,7 @@ GetSerializableTransactionSnapshot(Snapshot snapshot)
        ereport(ERROR,
                (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
                 errmsg("cannot use serializable mode in a hot standby"),
-                errdetail("default_transaction_isolation is set to \"serializable\"."),
+                errdetail("\"default_transaction_isolation\" is set to \"serializable\"."),
                 errhint("You can use \"SET default_transaction_isolation = 'repeatable read'\" to change the default.")));
 
    /*
@@ -2461,7 +2461,7 @@ CreatePredicateLock(const PREDICATELOCKTARGETTAG *targettag,
        ereport(ERROR,
                (errcode(ERRCODE_OUT_OF_MEMORY),
                 errmsg("out of shared memory"),
-                errhint("You might need to increase %s.", "max_pred_locks_per_transaction")));
+                errhint("You might need to increase \"%s\".", "max_pred_locks_per_transaction")));
    if (!found)
        dlist_init(&target->predicateLocks);
 
@@ -2476,7 +2476,7 @@ CreatePredicateLock(const PREDICATELOCKTARGETTAG *targettag,
        ereport(ERROR,
                (errcode(ERRCODE_OUT_OF_MEMORY),
                 errmsg("out of shared memory"),
-                errhint("You might need to increase %s.", "max_pred_locks_per_transaction")));
+                errhint("You might need to increase \"%s\".", "max_pred_locks_per_transaction")));
 
    if (!found)
    {
@@ -3873,7 +3873,7 @@ ReleaseOneSerializableXact(SERIALIZABLEXACT *sxact, bool partial,
                ereport(ERROR,
                        (errcode(ERRCODE_OUT_OF_MEMORY),
                         errmsg("out of shared memory"),
-                        errhint("You might need to increase %s.", "max_pred_locks_per_transaction")));
+                        errhint("You might need to increase \"%s\".", "max_pred_locks_per_transaction")));
            if (found)
            {
                Assert(predlock->commitSeqNo != 0);
index e4f256c63c7cf344d854bf99a6096dd50f90d7f5..a2900b6014a8efdb2e501bb5a0f21702dbdca71b 100644 (file)
@@ -345,7 +345,7 @@ InitProcess(void)
        if (AmWalSenderProcess())
            ereport(FATAL,
                    (errcode(ERRCODE_TOO_MANY_CONNECTIONS),
-                    errmsg("number of requested standby connections exceeds max_wal_senders (currently %d)",
+                    errmsg("number of requested standby connections exceeds \"max_wal_senders\" (currently %d)",
                            max_wal_senders)));
        ereport(FATAL,
                (errcode(ERRCODE_TOO_MANY_CONNECTIONS),
index 2dff28afcef734446fde04fbd17761866bfd6242..45a3794b8e3a82757b7be8f5999007c8a0efa59e 100644 (file)
@@ -3535,7 +3535,7 @@ check_stack_depth(void)
        ereport(ERROR,
                (errcode(ERRCODE_STATEMENT_TOO_COMPLEX),
                 errmsg("stack depth limit exceeded"),
-                errhint("Increase the configuration parameter max_stack_depth (currently %dkB), "
+                errhint("Increase the configuration parameter \"max_stack_depth\" (currently %dkB), "
                         "after ensuring the platform's stack depth limit is adequate.",
                         max_stack_depth)));
    }
@@ -3582,7 +3582,7 @@ check_max_stack_depth(int *newval, void **extra, GucSource source)
 
    if (stack_rlimit > 0 && newval_bytes > stack_rlimit - STACK_DEPTH_SLOP)
    {
-       GUC_check_errdetail("max_stack_depth must not exceed %ldkB.",
+       GUC_check_errdetail("\"max_stack_depth\" must not exceed %ldkB.",
                            (stack_rlimit - STACK_DEPTH_SLOP) / 1024L);
        GUC_check_errhint("Increase the platform's stack depth limit via \"ulimit -s\" or local equivalent.");
        return false;
@@ -3607,7 +3607,7 @@ check_client_connection_check_interval(int *newval, void **extra, GucSource sour
 {
    if (!WaitEventSetCanReportClosed() && *newval != 0)
    {
-       GUC_check_errdetail("client_connection_check_interval must be set to 0 on this platform.");
+       GUC_check_errdetail("\"client_connection_check_interval\" must be set to 0 on this platform.");
        return false;
    }
    return true;
@@ -3643,9 +3643,9 @@ check_log_stats(bool *newval, void **extra, GucSource source)
    if (*newval &&
        (log_parser_stats || log_planner_stats || log_executor_stats))
    {
-       GUC_check_errdetail("Cannot enable log_statement_stats when "
-                           "log_parser_stats, log_planner_stats, "
-                           "or log_executor_stats is true.");
+       GUC_check_errdetail("Cannot enable \"log_statement_stats\" when "
+                           "\"log_parser_stats\", \"log_planner_stats\", "
+                           "or \"log_executor_stats\" is true.");
        return false;
    }
    return true;
index 8d95b5d42ab59811e16509119e2b7ef9adefb8de..7e5bb2b703a72aafeee68b56bdb47e29cac4a03b 100644 (file)
@@ -3000,7 +3000,7 @@ icu_validate_locale(const char *loc_str)
        ereport(elevel,
                (errmsg("could not get language from ICU locale \"%s\": %s",
                        loc_str, u_errorName(status)),
-                errhint("To disable ICU locale validation, set the parameter %s to \"%s\".",
+                errhint("To disable ICU locale validation, set the parameter \"%s\" to \"%s\".",
                         "icu_validation_level", "disabled")));
        return;
    }
@@ -3029,7 +3029,7 @@ icu_validate_locale(const char *loc_str)
        ereport(elevel,
                (errmsg("ICU locale \"%s\" has unknown language \"%s\"",
                        loc_str, lang),
-                errhint("To disable ICU locale validation, set the parameter %s to \"%s\".",
+                errhint("To disable ICU locale validation, set the parameter \"%s\" to \"%s\".",
                         "icu_validation_level", "disabled")));
 
    /* check that it can be opened */
index dccd130c9110529f1aab6df88d7d46874a174a82..d2e2e9bbba0b3b4754158264f4a671b8f0ea1c26 100644 (file)
@@ -456,7 +456,7 @@ byteaout(PG_FUNCTION_ARGS)
    }
    else
    {
-       elog(ERROR, "unrecognized bytea_output setting: %d",
+       elog(ERROR, "unrecognized \"bytea_output\" setting: %d",
             bytea_output);
        rp = result = NULL;     /* keep compiler quiet */
    }
index eafa0128ef05678e9e4d44b41a439a4b71417c43..092004dcf3b3fb7e91f040a02d59effbd5ee072d 100644 (file)
@@ -538,7 +538,7 @@ find_in_dynamic_libpath(const char *basename)
        if (piece == p)
            ereport(ERROR,
                    (errcode(ERRCODE_INVALID_NAME),
-                    errmsg("zero-length component in parameter dynamic_library_path")));
+                    errmsg("zero-length component in parameter \"dynamic_library_path\"")));
 
        if (piece == NULL)
            len = strlen(p);
@@ -557,7 +557,7 @@ find_in_dynamic_libpath(const char *basename)
        if (!is_absolute_path(mangled))
            ereport(ERROR,
                    (errcode(ERRCODE_INVALID_NAME),
-                    errmsg("component in parameter dynamic_library_path is not an absolute path")));
+                    errmsg("component in parameter \"dynamic_library_path\" is not an absolute path")));
 
        full = palloc(strlen(mangled) + 1 + baselen + 1);
        sprintf(full, "%s/%s", mangled, basename);
index 3fb68039986f6a2f553cf456a2e8f7d93511455f..547cecde24091cb361d33ffe07a4e96ecf0c2e39 100644 (file)
@@ -1879,7 +1879,7 @@ SelectConfigFiles(const char *userDoption, const char *progname)
    else
    {
        write_stderr("%s does not know where to find the database system data.\n"
-                    "This can be specified as data_directory in \"%s\", "
+                    "This can be specified as \"data_directory\" in \"%s\", "
                     "or by the -D invocation option, or by the "
                     "PGDATA environment variable.\n",
                     progname, ConfigFileName);
index ea2b0577bc6f5ccc295914ac208088e0b615f075..85c8d54d4fc5594260fdbe479c9a659a18fb9fc8 100644 (file)
@@ -1066,7 +1066,7 @@ struct config_bool ConfigureNamesBool[] =
    },
    {
        {"ssl_passphrase_command_supports_reload", PGC_SIGHUP, CONN_AUTH_SSL,
-           gettext_noop("Controls whether ssl_passphrase_command is called during server reload."),
+           gettext_noop("Controls whether \"ssl_passphrase_command\" is called during server reload."),
            NULL
        },
        &ssl_passphrase_command_supports_reload,
@@ -1114,7 +1114,7 @@ struct config_bool ConfigureNamesBool[] =
            gettext_noop("Continues processing past damaged page headers."),
            gettext_noop("Detection of a damaged page header normally causes PostgreSQL to "
                         "report an error, aborting the current transaction. Setting "
-                        "zero_damaged_pages to true causes the system to instead report a "
+                        "\"zero_damaged_page\" to true causes the system to instead report a "
                         "warning, zero out the damaged page, and continue processing. This "
                         "behavior will destroy data, namely all the rows on the damaged page."),
            GUC_NOT_IN_SAMPLE
@@ -1129,7 +1129,7 @@ struct config_bool ConfigureNamesBool[] =
            gettext_noop("Detection of WAL records having references to "
                         "invalid pages during recovery causes PostgreSQL to "
                         "raise a PANIC-level error, aborting the recovery. "
-                        "Setting ignore_invalid_pages to true causes "
+                        "Setting \"ignore_invalid_pages\" to true causes "
                         "the system to ignore invalid page references "
                         "in WAL records (but still report a warning), "
                         "and continue recovery. This behavior may cause "
@@ -2713,7 +2713,7 @@ struct config_int ConfigureNamesInt[] =
        {"max_locks_per_transaction", PGC_POSTMASTER, LOCK_MANAGEMENT,
            gettext_noop("Sets the maximum number of locks per transaction."),
            gettext_noop("The shared lock table is sized on the assumption that at most "
-                        "max_locks_per_transaction objects per server process or prepared "
+                        "\"max_locks_per_transaction\" objects per server process or prepared "
                         "transaction will need to be locked at any one time.")
        },
        &max_locks_per_xact,
@@ -2725,7 +2725,7 @@ struct config_int ConfigureNamesInt[] =
        {"max_pred_locks_per_transaction", PGC_POSTMASTER, LOCK_MANAGEMENT,
            gettext_noop("Sets the maximum number of predicate locks per transaction."),
            gettext_noop("The shared predicate lock table is sized on the assumption that "
-                        "at most max_pred_locks_per_transaction objects per server process "
+                        "at most \"max_pred_locks_per_transaction\" objects per server process "
                         "or prepared transaction will need to be locked at any one time.")
        },
        &max_predicate_locks_per_xact,
@@ -2976,7 +2976,7 @@ struct config_int ConfigureNamesInt[] =
    {
        {"commit_siblings", PGC_USERSET, WAL_SETTINGS,
            gettext_noop("Sets the minimum number of concurrent open transactions "
-                        "required before performing commit_delay."),
+                        "required before performing \"commit_delay\"."),
            NULL
        },
        &CommitSiblings,
@@ -3108,7 +3108,7 @@ struct config_int ConfigureNamesInt[] =
        {"maintenance_io_concurrency",
            PGC_USERSET,
            RESOURCES_ASYNCHRONOUS,
-           gettext_noop("A variant of effective_io_concurrency that is used for maintenance work."),
+           gettext_noop("A variant of \"effective_io_concurrency\" that is used for maintenance work."),
            NULL,
            GUC_EXPLAIN
        },
@@ -3815,7 +3815,7 @@ struct config_real ConfigureNamesReal[] =
 
    {
        {"hash_mem_multiplier", PGC_USERSET, RESOURCES_MEM,
-           gettext_noop("Multiple of work_mem to use for hash tables."),
+           gettext_noop("Multiple of \"work_mem\" to use for hash tables."),
            NULL,
            GUC_EXPLAIN
        },
@@ -3909,7 +3909,7 @@ struct config_real ConfigureNamesReal[] =
 
    {
        {"log_statement_sample_rate", PGC_SUSET, LOGGING_WHEN,
-           gettext_noop("Fraction of statements exceeding log_min_duration_sample to be logged."),
+           gettext_noop("Fraction of statements exceeding \"log_min_duration_sample\" to be logged."),
            gettext_noop("Use a value between 0.0 (never log) and 1.0 (always log).")
        },
        &log_statement_sample_rate,
@@ -3940,7 +3940,7 @@ struct config_string ConfigureNamesString[] =
    {
        {"archive_command", PGC_SIGHUP, WAL_ARCHIVING,
            gettext_noop("Sets the shell command that will be called to archive a WAL file."),
-           gettext_noop("This is used only if archive_library is not set.")
+           gettext_noop("This is used only if \"archive_library\" is not set.")
        },
        &XLogArchiveCommand,
        "",
@@ -3950,7 +3950,7 @@ struct config_string ConfigureNamesString[] =
    {
        {"archive_library", PGC_SIGHUP, WAL_ARCHIVING,
            gettext_noop("Sets the library that will be called to archive a WAL file."),
-           gettext_noop("An empty string indicates that archive_command should be used.")
+           gettext_noop("An empty string indicates that \"archive_command\" should be used.")
        },
        &XLogArchiveLibrary,
        "",
@@ -4895,7 +4895,7 @@ struct config_enum ConfigureNamesEnum[] =
 
    {
        {"archive_mode", PGC_POSTMASTER, WAL_ARCHIVING,
-           gettext_noop("Allows archiving of WAL files using archive_command."),
+           gettext_noop("Allows archiving of WAL files using \"archive_command\"."),
            NULL
        },
        &XLogArchiveMode,
index 5e89b3c8e8b4c3fbb26259525dc9be0819b6de63..12ae194067f5da7d382cb58321f4c29428134af2 100644 (file)
@@ -1092,7 +1092,7 @@ test_config_settings(void)
     * Probe for max_connections before shared_buffers, since it is subject to
     * more constraints than shared_buffers.
     */
-   printf(_("selecting default max_connections ... "));
+   printf(_("selecting default \"max_connections\" ... "));
    fflush(stdout);
 
    for (i = 0; i < connslen; i++)
@@ -1112,7 +1112,7 @@ test_config_settings(void)
 
    printf("%d\n", n_connections);
 
-   printf(_("selecting default shared_buffers ... "));
+   printf(_("selecting default \"shared_buffers\" ... "));
    fflush(stdout);
 
    for (i = 0; i < bufslen; i++)
index d0efd8600ca96178007ca35ccf377034b77ebcba..feee451d5951ed772717116b06faeb67c4ac9bfb 100644 (file)
@@ -227,7 +227,7 @@ GetConnection(void)
        res = PQexec(tmpconn, ALWAYS_SECURE_SEARCH_PATH_SQL);
        if (PQresultStatus(res) != PGRES_TUPLES_OK)
        {
-           pg_log_error("could not clear search_path: %s",
+           pg_log_error("could not clear \"search_path\": %s",
                         PQerrorMessage(tmpconn));
            PQclear(res);
            PQfinish(tmpconn);
@@ -243,14 +243,14 @@ GetConnection(void)
    tmpparam = PQparameterStatus(tmpconn, "integer_datetimes");
    if (!tmpparam)
    {
-       pg_log_error("could not determine server setting for integer_datetimes");
+       pg_log_error("could not determine server setting for \"integer_datetimes\"");
        PQfinish(tmpconn);
        exit(1);
    }
 
    if (strcmp(tmpparam, "on") != 0)
    {
-       pg_log_error("integer_datetimes compile flag does not match server");
+       pg_log_error("\"integer_datetimes\" compile flag does not match server");
        PQfinish(tmpconn);
        exit(1);
    }
index 93e0837947cf81b2ad8bab929307a21cb75f68ca..93a05d80ca7b63a33591ea1c663f97d08f00bf8e 100644 (file)
@@ -81,7 +81,7 @@ wal_level_str(WalLevel wal_level)
        case WAL_LEVEL_LOGICAL:
            return "logical";
    }
-   return _("unrecognized wal_level");
+   return _("unrecognized \"wal_level\"");
 }
 
 
index 56e06881547d53ef5cabb8772085e968d5bf3547..68e321212d9223da19f644a25476da3d11e2950c 100644 (file)
@@ -3454,7 +3454,7 @@ _selectOutputSchema(ArchiveHandle *AH, const char *schemaName)
 
        if (!res || PQresultStatus(res) != PGRES_COMMAND_OK)
            warn_or_exit_horribly(AH,
-                                 "could not set search_path to \"%s\": %s",
+                                 "could not set \"search_path\" to \"%s\": %s",
                                  schemaName, PQerrorMessage(AH->connection));
 
        PQclear(res);
@@ -3515,7 +3515,7 @@ _selectTablespace(ArchiveHandle *AH, const char *tablespace)
 
        if (!res || PQresultStatus(res) != PGRES_COMMAND_OK)
            warn_or_exit_horribly(AH,
-                                 "could not set default_tablespace to %s: %s",
+                                 "could not set \"default_tablespace\" to %s: %s",
                                  fmtId(want), PQerrorMessage(AH->connection));
 
        PQclear(res);
@@ -3564,7 +3564,7 @@ _selectTableAccessMethod(ArchiveHandle *AH, const char *tableam)
 
        if (!res || PQresultStatus(res) != PGRES_COMMAND_OK)
            warn_or_exit_horribly(AH,
-                                 "could not set default_table_access_method: %s",
+                                 "could not set \"default_table_access_method\": %s",
                                  PQerrorMessage(AH->connection));
 
        PQclear(res);
index cb14fcafeacbf115407ece309960a7b3d930cc65..e3240708284d75e2abdd8fadf3c5d91272e84bf8 100644 (file)
@@ -3534,7 +3534,7 @@ dumpStdStrings(Archive *AH)
    const char *stdstrings = AH->std_strings ? "on" : "off";
    PQExpBuffer qry = createPQExpBuffer();
 
-   pg_log_info("saving standard_conforming_strings = %s",
+   pg_log_info("saving \"standard_conforming_strings = %s\"",
                stdstrings);
 
    appendPQExpBuffer(qry, "SET standard_conforming_strings = '%s';\n",
@@ -3592,7 +3592,7 @@ dumpSearchPath(Archive *AH)
    appendStringLiteralAH(qry, path->data, AH);
    appendPQExpBufferStr(qry, ", false);\n");
 
-   pg_log_info("saving search_path = %s", path->data);
+   pg_log_info("saving \"search_path = %s\"", path->data);
 
    ArchiveEntry(AH, nilCatalogId, createDumpId(),
                 ARCHIVE_OPTS(.tag = "SEARCHPATH",
index 7d898c3b501c9058ab922be62b484aedf7198cfc..9378266d28e2f013bd286d0d8f88b1166081be52 100644 (file)
@@ -128,7 +128,7 @@ init_libpq_conn(PGconn *conn)
    /* secure search_path */
    res = PQexec(conn, ALWAYS_SECURE_SEARCH_PATH_SQL);
    if (PQresultStatus(res) != PGRES_TUPLES_OK)
-       pg_fatal("could not clear search_path: %s",
+       pg_fatal("could not clear \"search_path\": %s",
                 PQresultErrorMessage(res));
    PQclear(res);
 
@@ -139,7 +139,7 @@ init_libpq_conn(PGconn *conn)
     */
    str = run_simple_query(conn, "SHOW full_page_writes");
    if (strcmp(str, "on") != 0)
-       pg_fatal("full_page_writes must be enabled in the source server");
+       pg_fatal("\"full_page_writes\" must be enabled in the source server");
    pg_free(str);
 
    /* Prepare a statement we'll use to fetch files */
index 8449ae78ef7a49386097b58f784ac810e6d13d08..8dfea05846e9f99ad8eb685ff930b3aabb9aabe8 100644 (file)
@@ -94,7 +94,7 @@ usage(const char *progname)
    printf(_("%s resynchronizes a PostgreSQL cluster with another copy of the cluster.\n\n"), progname);
    printf(_("Usage:\n  %s [OPTION]...\n\n"), progname);
    printf(_("Options:\n"));
-   printf(_("  -c, --restore-target-wal       use restore_command in target configuration to\n"
+   printf(_("  -c, --restore-target-wal       use \"restore_command\" in target configuration to\n"
             "                                 retrieve WAL files from archives\n"));
    printf(_("  -D, --target-pgdata=DIRECTORY  existing data directory to modify\n"));
    printf(_("      --source-pgdata=DIRECTORY  source data directory to synchronize with\n"));
@@ -1111,9 +1111,9 @@ getRestoreCommand(const char *argv0)
    (void) pg_strip_crlf(restore_command);
 
    if (strcmp(restore_command, "") == 0)
-       pg_fatal("restore_command is not set in the target cluster");
+       pg_fatal("\"restore_command\" is not set in the target cluster");
 
-   pg_log_debug("using for rewind restore_command = \'%s\'",
+   pg_log_debug("using for rewind \"restore_command = \'%s\'\"",
                 restore_command);
 
    destroyPQExpBuffer(postgres_cmd);
index 5c0da425fbb82ce1817320b03a60d9ed06eb7d95..cbf587116eafb1def3aff6fb3b752a68efc893e5 100644 (file)
@@ -298,7 +298,7 @@ test_sync(int writes_per_op)
        printf(_("\nCompare file sync methods using one %dkB write:\n"), XLOG_BLCKSZ_K);
    else
        printf(_("\nCompare file sync methods using two %dkB writes:\n"), XLOG_BLCKSZ_K);
-   printf(_("(in wal_sync_method preference order, except fdatasync is Linux's default)\n"));
+   printf(_("(in \"wal_sync_method\" preference order, except fdatasync is Linux's default)\n"));
 
    /*
     * Test open_datasync if available
index 259b1109b866bf4b61ae34ffb955b652a59310f7..27924159d671c101a30200000d3cb9f05b897028 100644 (file)
@@ -1769,13 +1769,13 @@ check_new_cluster_logical_replication_slots(void)
    wal_level = PQgetvalue(res, 0, 0);
 
    if (strcmp(wal_level, "logical") != 0)
-       pg_fatal("wal_level must be \"logical\", but is set to \"%s\"",
+       pg_fatal("\"wal_level\" must be \"logical\", but is set to \"%s\"",
                 wal_level);
 
    max_replication_slots = atoi(PQgetvalue(res, 1, 0));
 
    if (nslots_on_old > max_replication_slots)
-       pg_fatal("max_replication_slots (%d) must be greater than or equal to the number of "
+       pg_fatal("\"max_replication_slots\" (%d) must be greater than or equal to the number of "
                 "logical replication slots (%d) on the old cluster",
                 max_replication_slots, nslots_on_old);
 
@@ -1822,7 +1822,7 @@ check_new_cluster_subscription_configuration(void)
 
    max_replication_slots = atoi(PQgetvalue(res, 0, 0));
    if (nsubs_on_old > max_replication_slots)
-       pg_fatal("max_replication_slots (%d) must be greater than or equal to the number of "
+       pg_fatal("\"max_replication_slots\" (%d) must be greater than or equal to the number of "
                 "subscriptions (%d) on the old cluster",
                 max_replication_slots, nsubs_on_old);
 
index f9394f97b1fa67b88e54186636f8fdab90dd6377..87c471a6eade7cadaa621691c03fe378390aca3c 100644 (file)
@@ -77,10 +77,10 @@ command_checks_all(
    [@pg_upgrade_cmd],
    1,
    [
-       qr/max_replication_slots \(1\) must be greater than or equal to the number of logical replication slots \(2\) on the old cluster/
+       qr/"max_replication_slots" \(1\) must be greater than or equal to the number of logical replication slots \(2\) on the old cluster/
    ],
    [qr//],
-   'run of pg_upgrade where the new cluster has insufficient max_replication_slots'
+   'run of pg_upgrade where the new cluster has insufficient "max_replication_slots"'
 );
 ok(-d $newpub->data_dir . "/pg_upgrade_output.d",
    "pg_upgrade_output.d/ not removed after pg_upgrade failure");
index ba782c3bd99e1e6f606cfa250778d350cedc8e76..c59b83af9cc10a4d76fc09b5dede8f8c8c88d6f0 100644 (file)
@@ -66,7 +66,7 @@ command_checks_all(
    ],
    1,
    [
-       qr/max_replication_slots \(0\) must be greater than or equal to the number of subscriptions \(1\) on the old cluster/
+       qr/"max_replication_slots" \(0\) must be greater than or equal to the number of subscriptions \(1\) on the old cluster/
    ],
    [qr//],
    'run of pg_upgrade where the new cluster has insufficient max_replication_slots'
index af776b31d8f785ececc091b8f5a90216a296ca9e..86ffb3c8683d04e003c330f953baf8ea4e68e0f4 100644 (file)
@@ -5376,7 +5376,7 @@ GetTableInfo(PGconn *con, bool scale_given)
         * This case is unlikely as pgbench already found "pgbench_branches"
         * above to compute the scale.
         */
-       pg_log_error("no pgbench_accounts table found in search_path");
+       pg_log_error("no pgbench_accounts table found in \"search_path\"");
        pg_log_error_hint("Perhaps you need to do initialization (\"pgbench -i\") in database \"%s\".", PQdb(con));
        exit(1);
    }
index 490a50813648a4a615e2aeb0ea0bb31503e166ec..f194809d53b652eac7ed6a184e4d9fc021cd8c2e 100644 (file)
@@ -95,7 +95,7 @@ RestoreArchivedFile(const char *path, const char *xlogfname,
     * fatal too.
     */
    if (wait_result_is_any_signal(rc, true))
-       pg_fatal("restore_command failed: %s",
+       pg_fatal("\"restore_command\" failed: %s",
                 wait_result_to_str(rc));
 
    /*
index 81ec08485d2e566e25c9ac33839a8935211770dc..256f596e6bb0d928c2f2ebfea534420633cfb18e 100644 (file)
@@ -1313,7 +1313,7 @@ PQencryptPasswordConn(PGconn *conn, const char *passwd, const char *user,
        if (strlen(val) > MAX_ALGORITHM_NAME_LEN)
        {
            PQclear(res);
-           libpq_append_conn_error(conn, "password_encryption value too long");
+           libpq_append_conn_error(conn, "\"password_encryption\" value too long");
            return NULL;
        }
        strcpy(algobuf, val);
index a6b75ad6ac8e044f783e107d7bc7aadc7bff974c..548ad118fb1ae1a303e05a35041e1dd746fb4bff 100644 (file)
@@ -1657,7 +1657,7 @@ pqConnectOptions2(PGconn *conn)
    if (!sslVerifyProtocolVersion(conn->ssl_min_protocol_version))
    {
        conn->status = CONNECTION_BAD;
-       libpq_append_conn_error(conn, "invalid %s value: \"%s\"",
+       libpq_append_conn_error(conn, "invalid \"%s\" value: \"%s\"",
                                "ssl_min_protocol_version",
                                conn->ssl_min_protocol_version);
        return false;
@@ -1665,7 +1665,7 @@ pqConnectOptions2(PGconn *conn)
    if (!sslVerifyProtocolVersion(conn->ssl_max_protocol_version))
    {
        conn->status = CONNECTION_BAD;
-       libpq_append_conn_error(conn, "invalid %s value: \"%s\"",
+       libpq_append_conn_error(conn, "invalid \"%s\" value: \"%s\"",
                                "ssl_max_protocol_version",
                                conn->ssl_max_protocol_version);
        return false;
index 4c62bc95f9fe2bc7e3effe2c8dcee7b3fe6cb08f..f37e701f37ae479c7025b07f8f2f4e9c29d15231 100644 (file)
@@ -18,7 +18,7 @@ SELECT id,
 FROM committs_test
 ORDER BY id;
 ERROR:  could not get commit timestamp data
-HINT:  Make sure the configuration parameter track_commit_timestamp is set.
+HINT:  Make sure the configuration parameter "track_commit_timestamp" is set.
 DROP TABLE committs_test;
 SELECT pg_xact_commit_timestamp('0'::xid);
 ERROR:  cannot retrieve commit timestamp for transaction 0
@@ -40,7 +40,7 @@ SELECT x.xid::text::bigint > 0 as xid_valid,
        roident != 0 AS valid_roident
   FROM pg_last_committed_xact() x;
 ERROR:  could not get commit timestamp data
-HINT:  Make sure the configuration parameter track_commit_timestamp is set.
+HINT:  Make sure the configuration parameter "track_commit_timestamp" is set.
 -- Test non-normal transaction ids.
 SELECT * FROM pg_xact_commit_timestamp_origin(NULL); -- ok, NULL
  timestamp | roident 
@@ -69,13 +69,13 @@ SELECT x.timestamp > '-infinity'::timestamptz AS ts_low,
        roident != 0 AS valid_roident
   FROM pg_last_committed_xact() x;
 ERROR:  could not get commit timestamp data
-HINT:  Make sure the configuration parameter track_commit_timestamp is set.
+HINT:  Make sure the configuration parameter "track_commit_timestamp" is set.
 SELECT x.timestamp > '-infinity'::timestamptz AS ts_low,
        x.timestamp <= now() AS ts_high,
        roident != 0 AS valid_roident
   FROM pg_xact_commit_timestamp_origin(:'txid_no_origin') x;
 ERROR:  could not get commit timestamp data
-HINT:  Make sure the configuration parameter track_commit_timestamp is set.
+HINT:  Make sure the configuration parameter "track_commit_timestamp" is set.
 -- Test transaction with replication origin
 SELECT pg_replication_origin_create('regress_commit_ts: get_origin') != 0
   AS valid_roident;
@@ -97,14 +97,14 @@ SELECT x.timestamp > '-infinity'::timestamptz AS ts_low,
   FROM pg_last_committed_xact() x, pg_replication_origin r
   WHERE r.roident = x.roident;
 ERROR:  could not get commit timestamp data
-HINT:  Make sure the configuration parameter track_commit_timestamp is set.
+HINT:  Make sure the configuration parameter "track_commit_timestamp" is set.
 SELECT x.timestamp > '-infinity'::timestamptz AS ts_low,
        x.timestamp <= now() AS ts_high,
        r.roname
   FROM pg_xact_commit_timestamp_origin(:'txid_with_origin') x, pg_replication_origin r
   WHERE r.roident = x.roident;
 ERROR:  could not get commit timestamp data
-HINT:  Make sure the configuration parameter track_commit_timestamp is set.
+HINT:  Make sure the configuration parameter "track_commit_timestamp" is set.
 SELECT pg_replication_origin_session_reset();
  pg_replication_origin_session_reset 
 -------------------------------------
index 928ef6b1700b38f4d8a97d80e06187e4bc133233..ac4d26302cc87f1845b103833729e93cb0dd53a8 100644 (file)
@@ -2227,10 +2227,10 @@ main(int argc, char **argv)
 
    res = PQexec(conn, "SET lc_messages TO \"C\"");
    if (PQresultStatus(res) != PGRES_COMMAND_OK)
-       pg_fatal("failed to set lc_messages: %s", PQerrorMessage(conn));
+       pg_fatal("failed to set \"lc_messages\": %s", PQerrorMessage(conn));
    res = PQexec(conn, "SET debug_parallel_query = off");
    if (PQresultStatus(res) != PGRES_COMMAND_OK)
-       pg_fatal("failed to set debug_parallel_query: %s", PQerrorMessage(conn));
+       pg_fatal("failed to set \"debug_parallel_query\": %s", PQerrorMessage(conn));
 
    /* Set the trace file, if requested */
    if (tracefile != NULL)
index 948706af85221311b4f76ebb3b32a663dac3261c..d5992149821d7fa6521493954ce681fe19a6eff2 100644 (file)
@@ -58,7 +58,7 @@ set_rot13(SSL_CTX *context, bool isServerStart)
    /* warn if the user has set ssl_passphrase_command */
    if (ssl_passphrase_command[0])
        ereport(WARNING,
-               (errmsg("ssl_passphrase_command setting ignored by ssl_passphrase_func module")));
+               (errmsg("\"ssl_passphrase_command\" setting ignored by ssl_passphrase_func module")));
 
    SSL_CTX_set_default_passwd_cb(context, rot13_passphrase);
 }
index a2bfb645760dcd2c658680b0dcb7f90edc1d04f4..7a63539f39c48448ed394e4630151618b7e149dc 100644 (file)
@@ -56,7 +56,7 @@ my $log_contents = slurp_file($log);
 
 like(
    $log_contents,
-   qr/WARNING.*ssl_passphrase_command setting ignored by ssl_passphrase_func module/,
+   qr/WARNING.*"ssl_passphrase_command" setting ignored by ssl_passphrase_func module/,
    "ssl_passphrase_command set warning");
 
 # set the wrong passphrase
index 3de5d01e3054a90ae76288cdbcd40cd5078aa354..b3dac44d97a80842632463d9343979e0a5b50e78 100644 (file)
@@ -233,7 +233,7 @@ setup_background_workers(int nworkers, dsm_segment *seg)
            ereport(ERROR,
                    (errcode(ERRCODE_INSUFFICIENT_RESOURCES),
                     errmsg("could not register background process"),
-                    errhint("You may need to increase max_worker_processes.")));
+                    errhint("You may need to increase \"max_worker_processes\".")));
        ++wstate->nworkers;
    }
 
index 068a21f125f7877dd196b2662071a10c5e3a142b..d227b0670342a750c69ead3215e96120c3fa1a1c 100644 (file)
@@ -251,7 +251,7 @@ _PG_init(void)
    if (!process_shared_preload_libraries_in_progress)
        ereport(ERROR,
                (errmsg("cannot load \"%s\" after startup", "test_slru"),
-                errdetail("\"%s\" must be loaded with shared_preload_libraries.",
+                errdetail("\"%s\" must be loaded with \"shared_preload_libraries\".",
                           "test_slru")));
 
    prev_shmem_request_hook = shmem_request_hook;
index c7318d92e8cb17c72623c4948f447f2dc3e9f05d..c6480bbdcd61bf4023fa20b5a739b1add674c1c7 100644 (file)
@@ -91,8 +91,8 @@ sub test_recovery_wal_level_minimal
    # Confirm that the archive recovery fails with an expected error
    my $logfile = slurp_file($recovery_node->logfile());
    ok( $logfile =~
-         qr/FATAL: .* WAL was generated with wal_level=minimal, cannot continue recovering/,
-       "$node_text ends with an error because it finds WAL generated with wal_level=minimal"
+         qr/FATAL: .* WAL was generated with "wal_level=minimal", cannot continue recovering/,
+       "$node_text ends with an error because it finds WAL generated with \"wal_level=minimal\""
    );
 }
 
index 07ff5231d338aaa64cf3e912537ccbb3ec119f09..4628f9fb80635b99e088b11bf1df32fd416b2c20 100644 (file)
@@ -794,7 +794,7 @@ $handle =
   make_slot_active($node_standby, 'wal_level_', 0, \$stdout, \$stderr);
 # We are not able to read from the slot as it requires wal_level >= logical on the primary server
 check_pg_recvlogical_stderr($handle,
-   "logical decoding on standby requires wal_level >= logical on the primary"
+   "logical decoding on standby requires \"wal_level\" >= \"logical\" on the primary"
 );
 
 # Restore primary wal_level
index 4b8c8f143f35bb49b71cc6d4937dccf387e36435..7d59fb44316574cd27313ae16fce3873c2398ec6 100644 (file)
@@ -1042,7 +1042,7 @@ ERROR:  parameter "locale" must be specified
 SET icu_validation_level = ERROR;
 CREATE COLLATION testx (provider = icu, locale = 'nonsense-nowhere'); -- fails
 ERROR:  ICU locale "nonsense-nowhere" has unknown language "nonsense"
-HINT:  To disable ICU locale validation, set the parameter icu_validation_level to "disabled".
+HINT:  To disable ICU locale validation, set the parameter "icu_validation_level" to "disabled".
 CREATE COLLATION testx (provider = icu, locale = '@colStrength=primary;nonsense=yes'); -- fails
 ERROR:  could not convert locale name "@colStrength=primary;nonsense=yes" to language tag: U_ILLEGAL_ARGUMENT_ERROR
 RESET icu_validation_level;
@@ -1050,7 +1050,7 @@ CREATE COLLATION testx (provider = icu, locale = '@colStrength=primary;nonsense=
 WARNING:  could not convert locale name "@colStrength=primary;nonsense=yes" to language tag: U_ILLEGAL_ARGUMENT_ERROR
 CREATE COLLATION testx (provider = icu, locale = 'nonsense-nowhere'); DROP COLLATION testx;
 WARNING:  ICU locale "nonsense-nowhere" has unknown language "nonsense"
-HINT:  To disable ICU locale validation, set the parameter icu_validation_level to "disabled".
+HINT:  To disable ICU locale validation, set the parameter "icu_validation_level" to "disabled".
 CREATE COLLATION test4 FROM nonsense;
 ERROR:  collation "nonsense" for encoding "UTF8" does not exist
 CREATE COLLATION test5 FROM test0;
index 9762c332ce03ac9085817db7992044e23f950791..35d4cf1d4671982d784180f03f2755c14a1023d5 100644 (file)
@@ -113,7 +113,7 @@ COMMIT;
 -- prevent empty values
 SET default_table_access_method = '';
 ERROR:  invalid value for parameter "default_table_access_method": ""
-DETAIL:  default_table_access_method cannot be empty.
+DETAIL:  "default_table_access_method" cannot be empty.
 -- prevent nonexistent values
 SET default_table_access_method = 'I do not exist AM';
 ERROR:  invalid value for parameter "default_table_access_method": "I do not exist AM"
index 7cb28f106d7321f78b8e72420336e3cb8653fc72..aa29bc597bde29fd36097f0416f601e5259ed60d 100644 (file)
@@ -219,10 +219,10 @@ CONTEXT:  JSON data, line 1: {"abc":1,3...
 SET max_stack_depth = '100kB';
 SELECT repeat('[', 10000)::json;
 ERROR:  stack depth limit exceeded
-HINT:  Increase the configuration parameter max_stack_depth (currently 100kB), after ensuring the platform's stack depth limit is adequate.
+HINT:  Increase the configuration parameter "max_stack_depth" (currently 100kB), after ensuring the platform's stack depth limit is adequate.
 SELECT repeat('{"a":', 10000)::json;
 ERROR:  stack depth limit exceeded
-HINT:  Increase the configuration parameter max_stack_depth (currently 100kB), after ensuring the platform's stack depth limit is adequate.
+HINT:  Increase the configuration parameter "max_stack_depth" (currently 100kB), after ensuring the platform's stack depth limit is adequate.
 RESET max_stack_depth;
 -- Miscellaneous stuff.
 SELECT 'true'::json;           -- OK
index 66bee5162b4ed251b5f5fd9da71443bd0d8940b3..e66d7601899c87f9d03ee2a949041770bf46dac1 100644 (file)
@@ -213,10 +213,10 @@ CONTEXT:  JSON data, line 1: {"abc":1,3...
 SET max_stack_depth = '100kB';
 SELECT repeat('[', 10000)::jsonb;
 ERROR:  stack depth limit exceeded
-HINT:  Increase the configuration parameter max_stack_depth (currently 100kB), after ensuring the platform's stack depth limit is adequate.
+HINT:  Increase the configuration parameter "max_stack_depth" (currently 100kB), after ensuring the platform's stack depth limit is adequate.
 SELECT repeat('{"a":', 10000)::jsonb;
 ERROR:  stack depth limit exceeded
-HINT:  Increase the configuration parameter max_stack_depth (currently 100kB), after ensuring the platform's stack depth limit is adequate.
+HINT:  Increase the configuration parameter "max_stack_depth" (currently 100kB), after ensuring the platform's stack depth limit is adequate.
 RESET max_stack_depth;
 -- Miscellaneous stuff.
 SELECT 'true'::jsonb;          -- OK
index 7168f86bf945abf6450569d9aa3b8a3c0c79b8eb..6ad3d11898a71ce296bcd9002e039bc48d35c7a1 100644 (file)
@@ -19,7 +19,7 @@ SELECT * FROM pxtest1;
 
 PREPARE TRANSACTION 'regress_foo1';
 ERROR:  prepared transactions are disabled
-HINT:  Set max_prepared_transactions to a nonzero value.
+HINT:  Set "max_prepared_transactions" to a nonzero value.
 SELECT * FROM pxtest1;
  foobar 
 --------
@@ -58,7 +58,7 @@ SELECT * FROM pxtest1;
 
 PREPARE TRANSACTION 'regress_foo2';
 ERROR:  prepared transactions are disabled
-HINT:  Set max_prepared_transactions to a nonzero value.
+HINT:  Set "max_prepared_transactions" to a nonzero value.
 SELECT * FROM pxtest1;
  foobar 
 --------
@@ -84,7 +84,7 @@ SELECT * FROM pxtest1;
 
 PREPARE TRANSACTION 'regress_foo3';
 ERROR:  prepared transactions are disabled
-HINT:  Set max_prepared_transactions to a nonzero value.
+HINT:  Set "max_prepared_transactions" to a nonzero value.
 SELECT gid FROM pg_prepared_xacts WHERE gid ~ '^regress_' ORDER BY gid;
  gid 
 -----
@@ -95,7 +95,7 @@ INSERT INTO pxtest1 VALUES ('fff');
 -- This should fail, because the gid foo3 is already in use
 PREPARE TRANSACTION 'regress_foo3';
 ERROR:  prepared transactions are disabled
-HINT:  Set max_prepared_transactions to a nonzero value.
+HINT:  Set "max_prepared_transactions" to a nonzero value.
 SELECT * FROM pxtest1;
  foobar 
 --------
@@ -121,7 +121,7 @@ SELECT * FROM pxtest1;
 
 PREPARE TRANSACTION 'regress_foo4';
 ERROR:  prepared transactions are disabled
-HINT:  Set max_prepared_transactions to a nonzero value.
+HINT:  Set "max_prepared_transactions" to a nonzero value.
 SELECT gid FROM pg_prepared_xacts WHERE gid ~ '^regress_' ORDER BY gid;
  gid 
 -----
@@ -138,7 +138,7 @@ SELECT * FROM pxtest1;
 INSERT INTO pxtest1 VALUES ('fff');
 PREPARE TRANSACTION 'regress_foo5';
 ERROR:  prepared transactions are disabled
-HINT:  Set max_prepared_transactions to a nonzero value.
+HINT:  Set "max_prepared_transactions" to a nonzero value.
 SELECT gid FROM pg_prepared_xacts WHERE gid ~ '^regress_' ORDER BY gid;
  gid 
 -----
@@ -169,7 +169,7 @@ SELECT pg_advisory_xact_lock_shared(1);
 
 PREPARE TRANSACTION 'regress_foo6';  -- fails
 ERROR:  prepared transactions are disabled
-HINT:  Set max_prepared_transactions to a nonzero value.
+HINT:  Set "max_prepared_transactions" to a nonzero value.
 -- Test subtransactions
 BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE;
   CREATE TABLE pxtest2 (a int);
@@ -181,7 +181,7 @@ BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE;
   INSERT INTO pxtest2 VALUES (3);
 PREPARE TRANSACTION 'regress_sub1';
 ERROR:  prepared transactions are disabled
-HINT:  Set max_prepared_transactions to a nonzero value.
+HINT:  Set "max_prepared_transactions" to a nonzero value.
 CREATE TABLE pxtest3(fff int);
 -- Test shared invalidation
 BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE;
@@ -199,7 +199,7 @@ BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE;
 
 PREPARE TRANSACTION 'regress_sub2';
 ERROR:  prepared transactions are disabled
-HINT:  Set max_prepared_transactions to a nonzero value.
+HINT:  Set "max_prepared_transactions" to a nonzero value.
 -- No such cursor
 FETCH 1 FROM foo;
 ERROR:  cursor "foo" does not exist
index b7500d9c0e78d2363855c337c68c485649dbaf46..52b69a107fb7aeed106f6d33f14f16a477450e02 100644 (file)
@@ -147,17 +147,17 @@ SELECT U&'d\0061t\+000061' AS U&"d\0061t\+000061";
 ERROR:  unsafe use of string constant with Unicode escapes
 LINE 1: SELECT U&'d\0061t\+000061' AS U&"d\0061t\+000061";
                ^
-DETAIL:  String constants with Unicode escapes cannot be used when standard_conforming_strings is off.
+DETAIL:  String constants with Unicode escapes cannot be used when "standard_conforming_strings" is off.
 SELECT U&'d!0061t\+000061' UESCAPE '!' AS U&"d*0061t\+000061" UESCAPE '*';
 ERROR:  unsafe use of string constant with Unicode escapes
 LINE 1: SELECT U&'d!0061t\+000061' UESCAPE '!' AS U&"d*0061t\+000061...
                ^
-DETAIL:  String constants with Unicode escapes cannot be used when standard_conforming_strings is off.
+DETAIL:  String constants with Unicode escapes cannot be used when "standard_conforming_strings" is off.
 SELECT U&' \' UESCAPE '!' AS "tricky";
 ERROR:  unsafe use of string constant with Unicode escapes
 LINE 1: SELECT U&' \' UESCAPE '!' AS "tricky";
                ^
-DETAIL:  String constants with Unicode escapes cannot be used when standard_conforming_strings is off.
+DETAIL:  String constants with Unicode escapes cannot be used when "standard_conforming_strings" is off.
 SELECT 'tricky' AS U&"\" UESCAPE '!';
    \    
 --------
@@ -168,17 +168,17 @@ SELECT U&'wrong: \061';
 ERROR:  unsafe use of string constant with Unicode escapes
 LINE 1: SELECT U&'wrong: \061';
                ^
-DETAIL:  String constants with Unicode escapes cannot be used when standard_conforming_strings is off.
+DETAIL:  String constants with Unicode escapes cannot be used when "standard_conforming_strings" is off.
 SELECT U&'wrong: \+0061';
 ERROR:  unsafe use of string constant with Unicode escapes
 LINE 1: SELECT U&'wrong: \+0061';
                ^
-DETAIL:  String constants with Unicode escapes cannot be used when standard_conforming_strings is off.
+DETAIL:  String constants with Unicode escapes cannot be used when "standard_conforming_strings" is off.
 SELECT U&'wrong: +0061' UESCAPE '+';
 ERROR:  unsafe use of string constant with Unicode escapes
 LINE 1: SELECT U&'wrong: +0061' UESCAPE '+';
                ^
-DETAIL:  String constants with Unicode escapes cannot be used when standard_conforming_strings is off.
+DETAIL:  String constants with Unicode escapes cannot be used when "standard_conforming_strings" is off.
 RESET standard_conforming_strings;
 -- bytea
 SET bytea_output TO hex;
index 68c1b6b41f68023aa9fb7bef31db23e0d2d3ff55..b8773270235aecf6e26de206daa85954aefac4b4 100644 (file)
@@ -554,11 +554,11 @@ $node->connect_fails(
 $node->connect_fails(
    "$common_connstr sslrootcert=ssl/root+server_ca.crt sslmode=require ssl_min_protocol_version=incorrect_tls",
    "connection failure with an incorrect SSL protocol minimum bound",
-   expected_stderr => qr/invalid ssl_min_protocol_version value/);
+   expected_stderr => qr/invalid "ssl_min_protocol_version" value/);
 $node->connect_fails(
    "$common_connstr sslrootcert=ssl/root+server_ca.crt sslmode=require ssl_max_protocol_version=incorrect_tls",
    "connection failure with an incorrect SSL protocol maximum bound",
-   expected_stderr => qr/invalid ssl_max_protocol_version value/);
+   expected_stderr => qr/invalid "ssl_max_protocol_version" value/);
 
 ### Server-side tests.
 ###
index 9ccebd890a1df8bb326bcec1e769f659f1665537..471e9819628f51faee700bd5ddad8da84e0cfaf5 100644 (file)
@@ -573,7 +573,7 @@ CREATE PUBLICATION tap_pub2 FOR TABLE skip_wal;
 ROLLBACK;
 });
 ok( $reterr =~
-     m/WARNING:  wal_level is insufficient to publish logical changes/,
-   'CREATE PUBLICATION while wal_level=minimal');
+     m/WARNING:  "wal_level" is insufficient to publish logical changes/,
+   'CREATE PUBLICATION while "wal_level=minimal"');
 
 done_testing();