break;
default:
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
+ /* getopt_long already emitted a complaint */
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
}
if (optind < argc)
{
- fprintf(stderr, _("%s: too many command-line arguments (first is \"%s\")\n"),
- progname, argv[optind]);
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
+ pg_log_error("too many command-line arguments (first is \"%s\")",
+ argv[optind]);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
}
conn = PQconnectdbParams(keywords, values, true);
if (!conn)
- {
- pg_log_error("could not connect to database %s",
- my_opts->dbname);
- exit(1);
- }
+ pg_fatal("could not connect to database %s",
+ my_opts->dbname);
if (PQstatus(conn) == CONNECTION_BAD &&
PQconnectionNeedsPassword(conn) &&
PQerrorMessage(conn));
PQclear(res);
PQfinish(conn);
- exit(-1);
+ exit(1);
}
PQclear(res);
if (!res || PQresultStatus(res) > 2)
{
pg_log_error("query failed: %s", PQerrorMessage(conn));
- pg_log_error("query was: %s", todo);
+ pg_log_error_detail("Query was: %s", todo);
PQclear(res);
PQfinish(conn);
- exit(-1);
+ exit(1);
}
/* get the number of fields */
{
switch (c)
{
- case '?':
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
- exit(1);
case 'h':
param.pg_host = pg_strdup(optarg);
break;
case 'l':
param.transaction_limit = strtol(optarg, NULL, 10);
if (param.transaction_limit < 0)
- {
- pg_log_error("transaction limit must not be negative (0 disables)");
- exit(1);
- }
+ pg_fatal("transaction limit must not be negative (0 disables)");
break;
case 'n':
param.dry_run = 1;
case 'p':
port = strtol(optarg, NULL, 10);
if ((port < 1) || (port > 65535))
- {
- pg_log_error("invalid port number: %s", optarg);
- exit(1);
- }
+ pg_fatal("invalid port number: %s", optarg);
param.pg_port = pg_strdup(optarg);
break;
case 'U':
param.pg_prompt = TRI_YES;
break;
default:
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
+ /* getopt_long already emitted a complaint */
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
}
if (optind >= argc)
{
pg_log_error("missing required argument: database name");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
char *result = escape_single_quotes_ascii(src);
if (!result)
- {
- pg_log_error("out of memory");
- exit(1);
- }
+ pg_fatal("out of memory");
return result;
}
int n;
if ((infile = fopen(path, "r")) == NULL)
- {
- pg_log_error("could not open file \"%s\" for reading: %m", path);
- exit(1);
- }
+ pg_fatal("could not open file \"%s\" for reading: %m", path);
initStringInfo(&line);
char **line;
if ((out_file = fopen(path, "w")) == NULL)
- {
- pg_log_error("could not open file \"%s\" for writing: %m", path);
- exit(1);
- }
+ pg_fatal("could not open file \"%s\" for writing: %m", path);
for (line = lines; *line != NULL; line++)
{
if (fputs(*line, out_file) < 0)
- {
- pg_log_error("could not write file \"%s\": %m", path);
- exit(1);
- }
+ pg_fatal("could not write file \"%s\": %m", path);
free(*line);
}
if (fclose(out_file))
- {
- pg_log_error("could not write file \"%s\": %m", path);
- exit(1);
- }
+ pg_fatal("could not close file \"%s\": %m", path);
}
/*
if (geteuid() == 0) /* 0 is root's uid */
{
pg_log_error("cannot be run as root");
- fprintf(stderr,
- _("Please log in (using, e.g., \"su\") as the (unprivileged) user that will\n"
- "own the server process.\n"));
+ pg_log_error_hint("Please log in (using, e.g., \"su\") as the (unprivileged) user that will own the server process.");
exit(1);
}
#endif
if ((enc = pg_valid_server_encoding(encoding_name)) >= 0)
return enc;
}
- pg_log_error("\"%s\" is not a valid server encoding name",
- encoding_name ? encoding_name : "(null)");
- exit(1);
+ pg_fatal("\"%s\" is not a valid server encoding name",
+ encoding_name ? encoding_name : "(null)");
}
/*
if (errno == ENOENT)
{
pg_log_error("file \"%s\" does not exist", path);
- fprintf(stderr,
- _("This might mean you have a corrupted installation or identified\n"
- "the wrong directory with the invocation option -L.\n"));
+ pg_log_error_hint("This might mean you have a corrupted installation or identified the wrong directory with the invocation option -L.");
}
else
{
pg_log_error("could not access file \"%s\": %m", path);
- fprintf(stderr,
- _("This might mean you have a corrupted installation or identified\n"
- "the wrong directory with the invocation option -L.\n"));
+ pg_log_error_hint("This might mean you have a corrupted installation or identified the wrong directory with the invocation option -L.");
}
exit(1);
}
if (!S_ISREG(statbuf.st_mode))
{
pg_log_error("file \"%s\" is not a regular file", path);
- fprintf(stderr,
- _("This might mean you have a corrupted installation or identified\n"
- "the wrong directory with the invocation option -L.\n"));
+ pg_log_error_hint("This might mean you have a corrupted installation or identified the wrong directory with the invocation option -L.");
exit(1);
}
}
path = psprintf("%s/%s/PG_VERSION", pg_data, extrapath);
if ((version_file = fopen(path, PG_BINARY_W)) == NULL)
- {
- pg_log_error("could not open file \"%s\" for writing: %m", path);
- exit(1);
- }
+ pg_fatal("could not open file \"%s\" for writing: %m", path);
if (fprintf(version_file, "%s\n", PG_MAJORVERSION) < 0 ||
fclose(version_file))
- {
- pg_log_error("could not write file \"%s\": %m", path);
- exit(1);
- }
+ pg_fatal("could not write file \"%s\": %m", path);
free(path);
}
path = psprintf("%s/postgresql.conf", pg_data);
conf_file = fopen(path, PG_BINARY_W);
if (conf_file == NULL)
- {
- pg_log_error("could not open file \"%s\" for writing: %m", path);
- exit(1);
- }
+ pg_fatal("could not open file \"%s\" for writing: %m", path);
if (fclose(conf_file))
- {
- pg_log_error("could not write file \"%s\": %m", path);
- exit(1);
- }
+ pg_fatal("could not write file \"%s\": %m", path);
free(path);
}
writefile(path, conflines);
if (chmod(path, pg_file_create_mode) != 0)
- {
- pg_log_error("could not change permissions of \"%s\": %m", path);
- exit(1);
- }
+ pg_fatal("could not change permissions of \"%s\": %m", path);
/*
* create the automatic configuration file to store the configuration
writefile(path, autoconflines);
if (chmod(path, pg_file_create_mode) != 0)
- {
- pg_log_error("could not change permissions of \"%s\": %m", path);
- exit(1);
- }
+ pg_fatal("could not change permissions of \"%s\": %m", path);
free(conflines);
writefile(path, conflines);
if (chmod(path, pg_file_create_mode) != 0)
- {
- pg_log_error("could not change permissions of \"%s\": %m", path);
- exit(1);
- }
+ pg_fatal("could not change permissions of \"%s\": %m", path);
free(conflines);
writefile(path, conflines);
if (chmod(path, pg_file_create_mode) != 0)
- {
- pg_log_error("could not change permissions of \"%s\": %m", path);
- exit(1);
- }
+ pg_fatal("could not change permissions of \"%s\": %m", path);
free(conflines);
{
pg_log_error("input file \"%s\" does not belong to PostgreSQL %s",
bki_file, PG_VERSION);
- fprintf(stderr,
- _("Check your installation or specify the correct path "
- "using the option -L.\n"));
+ pg_log_error_hint("Specify the correct path using the option -L.");
exit(1);
}
FILE *pwf = fopen(pwfilename, "r");
if (!pwf)
- {
- pg_log_error("could not open file \"%s\" for reading: %m",
- pwfilename);
- exit(1);
- }
+ pg_fatal("could not open file \"%s\" for reading: %m",
+ pwfilename);
pwd1 = pg_get_line(pwf, NULL);
if (!pwd1)
{
if (ferror(pwf))
- pg_log_error("could not read password from file \"%s\": %m",
- pwfilename);
+ pg_fatal("could not read password from file \"%s\": %m",
+ pwfilename);
else
- pg_log_error("password file \"%s\" is empty",
- pwfilename);
- exit(1);
+ pg_fatal("password file \"%s\" is empty",
+ pwfilename);
}
fclose(pwf);
save = setlocale(category, NULL);
if (!save)
- {
- pg_log_error("setlocale() failed");
- exit(1);
- }
+ pg_fatal("setlocale() failed");
/* save may be pointing at a modifiable scratch variable, so copy it. */
save = pg_strdup(save);
/* restore old value. */
if (!setlocale(category, save))
- {
- pg_log_error("failed to restore old locale \"%s\"", save);
- exit(1);
- }
+ pg_fatal("failed to restore old locale \"%s\"", save);
free(save);
/* complain if locale wasn't valid */
if (res == NULL)
{
if (*locale)
- pg_log_error("invalid locale name \"%s\"", locale);
+ pg_fatal("invalid locale name \"%s\"", locale);
else
{
/*
* setlocale's behavior is implementation-specific, it's hard to
* be sure what it didn't like. Print a safe generic message.
*/
- pg_log_error("invalid locale settings; check LANG and LC_* environment variables");
+ pg_fatal("invalid locale settings; check LANG and LC_* environment variables");
}
- exit(1);
}
}
user_enc == PG_SQL_ASCII))
{
pg_log_error("encoding mismatch");
- fprintf(stderr,
- _("The encoding you selected (%s) and the encoding that the\n"
- "selected locale uses (%s) do not match. This would lead to\n"
- "misbehavior in various character string processing functions.\n"
- "Rerun %s and either do not specify an encoding explicitly,\n"
- "or choose a matching combination.\n"),
- pg_encoding_to_char(user_enc),
- pg_encoding_to_char(locale_enc),
- progname);
+ pg_log_error_detail("The encoding you selected (%s) and the encoding that the "
+ "selected locale uses (%s) do not match. This would lead to "
+ "misbehavior in various character string processing functions.",
+ pg_encoding_to_char(user_enc),
+ pg_encoding_to_char(locale_enc));
+ pg_log_error_hint("Rerun %s and either do not specify an encoding explicitly, "
+ "or choose a matching combination.",
+ progname);
return false;
}
return true;
if (locale_provider == COLLPROVIDER_ICU)
{
if (!icu_locale)
- {
- pg_log_error("ICU locale must be specified");
- exit(1);
- }
+ pg_fatal("ICU locale must be specified");
/*
* In supported builds, the ICU locale ID will be checked by the
- * backend when performing the post-boostrap initialization.
+ * backend during post-bootstrap initialization.
*/
#ifndef USE_ICU
- pg_log_error("ICU is not supported in this build");
- exit(1);
+ pg_fatal("ICU is not supported in this build");
#endif
}
}
return;
}
- pg_log_error("invalid authentication method \"%s\" for \"%s\" connections",
- authmethod, conntype);
- exit(1);
+ pg_fatal("invalid authentication method \"%s\" for \"%s\" connections",
+ authmethod, conntype);
}
static void
strcmp(authmethodhost, "password") == 0 ||
strcmp(authmethodhost, "scram-sha-256") == 0) &&
!(pwprompt || pwfilename))
- {
- pg_log_error("must specify a password for the superuser to enable password authentication");
- exit(1);
- }
+ pg_fatal("must specify a password for the superuser to enable password authentication");
}
else
{
pg_log_error("no data directory specified");
- fprintf(stderr,
- _("You must identify the directory where the data for this database system\n"
- "will reside. Do this with either the invocation option -D or the\n"
- "environment variable PGDATA.\n"));
+ pg_log_error_hint("You must identify the directory where the data for this database system "
+ "will reside. Do this with either the invocation option -D or the "
+ "environment variable PGDATA.");
exit(1);
}
}
* have embedded spaces.
*/
if (setenv("PGDATA", pg_data, 1) != 0)
- {
- pg_log_error("could not set environment");
- exit(1);
- }
+ pg_fatal("could not set environment");
}
strlcpy(full_path, progname, sizeof(full_path));
if (ret == -1)
- pg_log_error("The program \"%s\" is needed by %s but was not found in the\n"
- "same directory as \"%s\".\n"
- "Check your installation.",
- "postgres", progname, full_path);
+ pg_fatal("program \"%s\" is needed by %s but was not found in the same directory as \"%s\"",
+ "postgres", progname, full_path);
else
- pg_log_error("The program \"%s\" was found by \"%s\"\n"
- "but was not the same version as %s.\n"
- "Check your installation.",
- "postgres", full_path, progname);
- exit(1);
+ pg_fatal("program \"%s\" was found by \"%s\" but was not the same version as %s",
+ "postgres", full_path, progname);
}
/* store binary directory */
get_share_path(backend_exec, share_path);
}
else if (!is_absolute_path(share_path))
- {
- pg_log_error("input file location must be an absolute path");
- exit(1);
- }
+ pg_fatal("input file location must be an absolute path");
canonicalize_path(share_path);
}
/* Couldn't recognize the locale's codeset */
pg_log_error("could not find suitable encoding for locale \"%s\"",
lc_ctype);
- fprintf(stderr, _("Rerun %s with the -E option.\n"), progname);
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ pg_log_error_hint("Rerun %s with the -E option.", progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
else if (!pg_valid_server_encoding_id(ctype_enc))
#else
pg_log_error("locale \"%s\" requires unsupported encoding \"%s\"",
lc_ctype, pg_encoding_to_char(ctype_enc));
- fprintf(stderr,
- _("Encoding \"%s\" is not allowed as a server-side encoding.\n"
- "Rerun %s with a different locale selection.\n"),
- pg_encoding_to_char(ctype_enc), progname);
+ pg_log_error_detail("Encoding \"%s\" is not allowed as a server-side encoding.",
+ pg_encoding_to_char(ctype_enc));
+ pg_log_error_hint("Rerun %s with a different locale selection.",
+ progname);
exit(1);
#endif
}
fflush(stdout);
if (pg_mkdir_p(pg_data, pg_dir_create_mode) != 0)
- {
- pg_log_error("could not create directory \"%s\": %m", pg_data);
- exit(1);
- }
+ pg_fatal("could not create directory \"%s\": %m", pg_data);
else
check_ok();
fflush(stdout);
if (chmod(pg_data, pg_dir_create_mode) != 0)
- {
- pg_log_error("could not change permissions of directory \"%s\": %m",
- pg_data);
- exit(1);
- }
+ pg_fatal("could not change permissions of directory \"%s\": %m",
+ pg_data);
else
check_ok();
if (ret != 4)
warn_on_mount_point(ret);
else
- fprintf(stderr,
- _("If you want to create a new database system, either remove or empty\n"
- "the directory \"%s\" or run %s\n"
- "with an argument other than \"%s\".\n"),
- pg_data, progname, pg_data);
+ pg_log_error_hint("If you want to create a new database system, either remove or empty "
+ "the directory \"%s\" or run %s "
+ "with an argument other than \"%s\".",
+ pg_data, progname, pg_data);
exit(1); /* no further message needed */
default:
/* Trouble accessing directory */
- pg_log_error("could not access directory \"%s\": %m", pg_data);
- exit(1);
+ pg_fatal("could not access directory \"%s\": %m", pg_data);
}
}
/* clean up xlog directory name, check it's absolute */
canonicalize_path(xlog_dir);
if (!is_absolute_path(xlog_dir))
- {
- pg_log_error("WAL directory location must be an absolute path");
- exit(1);
- }
+ pg_fatal("WAL directory location must be an absolute path");
/* check if the specified xlog directory exists/is empty */
switch ((ret = pg_check_dir(xlog_dir)))
fflush(stdout);
if (pg_mkdir_p(xlog_dir, pg_dir_create_mode) != 0)
- {
- pg_log_error("could not create directory \"%s\": %m",
- xlog_dir);
- exit(1);
- }
+ pg_fatal("could not create directory \"%s\": %m",
+ xlog_dir);
else
check_ok();
fflush(stdout);
if (chmod(xlog_dir, pg_dir_create_mode) != 0)
- {
- pg_log_error("could not change permissions of directory \"%s\": %m",
- xlog_dir);
- exit(1);
- }
+ pg_fatal("could not change permissions of directory \"%s\": %m",
+ xlog_dir);
else
check_ok();
if (ret != 4)
warn_on_mount_point(ret);
else
- fprintf(stderr,
- _("If you want to store the WAL there, either remove or empty the directory\n"
- "\"%s\".\n"),
- xlog_dir);
+ pg_log_error_hint("If you want to store the WAL there, either remove or empty the directory \"%s\".",
+ xlog_dir);
exit(1);
default:
/* Trouble accessing directory */
- pg_log_error("could not access directory \"%s\": %m", xlog_dir);
- exit(1);
+ pg_fatal("could not access directory \"%s\": %m", xlog_dir);
}
#ifdef HAVE_SYMLINK
if (symlink(xlog_dir, subdirloc) != 0)
- {
- pg_log_error("could not create symbolic link \"%s\": %m",
- subdirloc);
- exit(1);
- }
+ pg_fatal("could not create symbolic link \"%s\": %m",
+ subdirloc);
#else
- pg_log_error("symlinks are not supported on this platform");
- exit(1);
+ pg_fatal("symlinks are not supported on this platform");
#endif
}
else
{
/* Without -X option, just make the subdirectory normally */
if (mkdir(subdirloc, pg_dir_create_mode) < 0)
- {
- pg_log_error("could not create directory \"%s\": %m",
- subdirloc);
- exit(1);
- }
+ pg_fatal("could not create directory \"%s\": %m",
+ subdirloc);
}
free(subdirloc);
warn_on_mount_point(int error)
{
if (error == 2)
- fprintf(stderr,
- _("It contains a dot-prefixed/invisible file, perhaps due to it being a mount point.\n"));
+ pg_log_error_detail("It contains a dot-prefixed/invisible file, perhaps due to it being a mount point.");
else if (error == 3)
- fprintf(stderr,
- _("It contains a lost+found directory, perhaps due to it being a mount point.\n"));
+ pg_log_error_detail("It contains a lost+found directory, perhaps due to it being a mount point.");
- fprintf(stderr,
- _("Using a mount point directly as the data directory is not recommended.\n"
- "Create a subdirectory under the mount point.\n"));
+ pg_log_error_hint("Using a mount point directly as the data directory is not recommended.\n"
+ "Create a subdirectory under the mount point.");
}
* pg_mkdir_p() here, which avoids some failure modes; cf bug #13853.
*/
if (mkdir(path, pg_dir_create_mode) < 0)
- {
- pg_log_error("could not create directory \"%s\": %m", path);
- exit(1);
- }
+ pg_fatal("could not create directory \"%s\": %m", path);
free(path);
}
else if (strcmp(optarg, "libc") == 0)
locale_provider = COLLPROVIDER_LIBC;
else
- {
- pg_log_error("unrecognized locale provider: %s", optarg);
- exit(1);
- }
+ pg_fatal("unrecognized locale provider: %s", optarg);
break;
case 16:
icu_locale = pg_strdup(optarg);
break;
default:
/* getopt_long already emitted a complaint */
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
}
{
pg_log_error("too many command-line arguments (first is \"%s\")",
argv[optind]);
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
if (icu_locale && locale_provider != COLLPROVIDER_ICU)
- {
- pg_log_error("%s cannot be specified unless locale provider \"%s\" is chosen",
- "--icu-locale", "icu");
- exit(1);
- }
+ pg_fatal("%s cannot be specified unless locale provider \"%s\" is chosen",
+ "--icu-locale", "icu");
atexit(cleanup_directories_atexit);
/* must check that directory is readable */
if (pg_check_dir(pg_data) <= 0)
- {
- pg_log_error("could not access directory \"%s\": %m", pg_data);
- exit(1);
- }
+ pg_fatal("could not access directory \"%s\": %m", pg_data);
fputs(_("syncing data to disk ... "), stdout);
fflush(stdout);
}
if (pwprompt && pwfilename)
- {
- pg_log_error("password prompt and password file cannot be specified together");
- exit(1);
- }
+ pg_fatal("password prompt and password file cannot be specified together");
check_authmethod_unspecified(&authmethodlocal);
check_authmethod_unspecified(&authmethodhost);
/* verify that wal segment size is valid */
if (endptr == str_wal_segment_size_mb || *endptr != '\0')
- {
- pg_log_error("argument of --wal-segsize must be a number");
- exit(1);
- }
+ pg_fatal("argument of --wal-segsize must be a number");
if (!IsValidWalSegSize(wal_segment_size_mb * 1024 * 1024))
- {
- pg_log_error("argument of --wal-segsize must be a power of 2 between 1 and 1024");
- exit(1);
- }
+ pg_fatal("argument of --wal-segsize must be a power of 2 between 1 and 1024");
}
get_restricted_token();
username = effective_user;
if (strncmp(username, "pg_", 3) == 0)
- {
- pg_log_error("superuser name \"%s\" is disallowed; role names cannot begin with \"pg_\"", username);
- exit(1);
- }
+ pg_fatal("superuser name \"%s\" is disallowed; role names cannot begin with \"pg_\"", username);
printf(_("The files belonging to this database system will be owned "
"by user \"%s\".\n"
{
printf("\n");
pg_log_warning("enabling \"trust\" authentication for local connections");
- fprintf(stderr, _("You can change this by editing pg_hba.conf or using the option -A, or\n"
- "--auth-local and --auth-host, the next time you run initdb.\n"));
+ pg_log_warning_hint("You can change this by editing pg_hba.conf or using the option -A, or "
+ "--auth-local and --auth-host, the next time you run initdb.");
}
if (!noinstructions)
#define log_no_match(...) do { \
if (opts.strict_names) \
- pg_log_generic(PG_LOG_ERROR, __VA_ARGS__); \
+ pg_log_error(__VA_ARGS__); \
else \
- pg_log_generic(PG_LOG_WARNING, __VA_ARGS__); \
+ pg_log_warning(__VA_ARGS__); \
} while(0)
#define FREE_AND_SET_NULL(x) do { \
else if (pg_strcasecmp(optarg, "none") == 0)
opts.skip = "none";
else
- {
- pg_log_error("invalid argument for option %s", "--skip");
- exit(1);
- }
+ pg_fatal("invalid argument for option %s", "--skip");
break;
case 7:
errno = 0;
optval = strtoul(optarg, &endptr, 10);
if (endptr == optarg || *endptr != '\0' || errno != 0)
- {
- pg_log_error("invalid start block");
- exit(1);
- }
+ pg_fatal("invalid start block");
if (optval > MaxBlockNumber)
- {
- pg_log_error("start block out of bounds");
- exit(1);
- }
+ pg_fatal("start block out of bounds");
opts.startblock = optval;
break;
case 8:
errno = 0;
optval = strtoul(optarg, &endptr, 10);
if (endptr == optarg || *endptr != '\0' || errno != 0)
- {
- pg_log_error("invalid end block");
- exit(1);
- }
+ pg_fatal("invalid end block");
if (optval > MaxBlockNumber)
- {
- pg_log_error("end block out of bounds");
- exit(1);
- }
+ pg_fatal("end block out of bounds");
opts.endblock = optval;
break;
case 9:
opts.install_schema = pg_strdup(optarg);
break;
default:
- fprintf(stderr,
- _("Try \"%s --help\" for more information.\n"),
- progname);
+ /* getopt_long already emitted a complaint */
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
}
if (opts.endblock >= 0 && opts.endblock < opts.startblock)
- {
- pg_log_error("end block precedes start block");
- exit(1);
- }
+ pg_fatal("end block precedes start block");
/*
* A single non-option arguments specifies a database name or connection
{
pg_log_error("too many command-line arguments (first is \"%s\")",
argv[optind]);
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
if (opts.alldb)
{
if (db != NULL)
- {
- pg_log_error("cannot specify a database name with --all");
- exit(1);
- }
+ pg_fatal("cannot specify a database name with --all");
cparams.dbname = maintenance_db;
}
else if (db != NULL)
{
if (opts.dbpattern)
- {
- pg_log_error("cannot specify both a database name and database patterns");
- exit(1);
- }
+ pg_fatal("cannot specify both a database name and database patterns");
cparams.dbname = db;
}
{
if (conn != NULL)
disconnectDatabase(conn);
- pg_log_error("no databases to check");
+ pg_log_warning("no databases to check");
exit(0);
}
/* Querying the catalog failed. */
pg_log_error("database \"%s\": %s",
PQdb(conn), PQerrorMessage(conn));
- pg_log_info("query was: %s", amcheck_sql);
+ pg_log_error_detail("Query was: %s", amcheck_sql);
PQclear(result);
disconnectDatabase(conn);
exit(1);
{
if (conn != NULL)
disconnectDatabase(conn);
- pg_log_error("no relations to check");
- exit(1);
+ pg_fatal("no relations to check");
}
progress_report(reltotal, relprogress, pagestotal, pageschecked,
NULL, true, false);
pg_log_error("error sending command to database \"%s\": %s",
PQdb(slot->connection),
PQerrorMessage(slot->connection));
- pg_log_error("command was: %s", sql);
+ pg_log_error_detail("Command was: %s", sql);
exit(1);
}
}
pg_log_warning("btree index \"%s.%s.%s\": btree checking function returned unexpected number of rows: %d",
rel->datinfo->datname, rel->nspname, rel->relname, ntups);
if (opts.verbose)
- pg_log_info("query was: %s", rel->sql);
- pg_log_warning("Are %s's and amcheck's versions compatible?",
- progname);
+ pg_log_warning_detail("Query was: %s", rel->sql);
+ pg_log_warning_hint("Are %s's and amcheck's versions compatible?",
+ progname);
progress_since_last_stderr = false;
}
}
if (PQresultStatus(res) != PGRES_TUPLES_OK)
{
pg_log_error("query failed: %s", PQerrorMessage(conn));
- pg_log_info("query was: %s", sql.data);
+ pg_log_error_detail("Query was: %s", sql.data);
disconnectDatabase(conn);
exit(1);
}
*/
fatal = opts.strict_names;
if (pattern_id >= opts.include.len)
- {
- pg_log_error("internal error: received unexpected database pattern_id %d",
- pattern_id);
- exit(1);
- }
+ pg_fatal("internal error: received unexpected database pattern_id %d",
+ pattern_id);
log_no_match("no connectable databases to check matching \"%s\"",
opts.include.data[pattern_id].pattern);
}
if (PQresultStatus(res) != PGRES_TUPLES_OK)
{
pg_log_error("query failed: %s", PQerrorMessage(conn));
- pg_log_info("query was: %s", sql.data);
+ pg_log_error_detail("Query was: %s", sql.data);
disconnectDatabase(conn);
exit(1);
}
*/
if (pattern_id >= opts.include.len)
- {
- pg_log_error("internal error: received unexpected relation pattern_id %d",
- pattern_id);
- exit(1);
- }
+ pg_fatal("internal error: received unexpected relation pattern_id %d",
+ pattern_id);
opts.include.data[pattern_id].matched = true;
}
rc = unlink(WALFilePath);
if (rc != 0)
- {
- pg_log_error("could not remove file \"%s\": %m",
- WALFilePath);
- exit(1);
- }
+ pg_fatal("could not remove file \"%s\": %m",
+ WALFilePath);
}
}
if (errno)
- {
- pg_log_error("could not read archive location \"%s\": %m",
- archiveLocation);
- exit(1);
- }
+ pg_fatal("could not read archive location \"%s\": %m",
+ archiveLocation);
if (closedir(xldir))
- {
- pg_log_error("could not close archive location \"%s\": %m",
- archiveLocation);
- exit(1);
- }
- }
- else
- {
- pg_log_error("could not open archive location \"%s\": %m",
+ pg_fatal("could not close archive location \"%s\": %m",
archiveLocation);
- exit(1);
}
+ else
+ pg_fatal("could not open archive location \"%s\": %m",
+ archiveLocation);
}
/*
if (!fnameOK)
{
pg_log_error("invalid file name argument");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(2);
}
}
* from xlogfile names */
break;
default:
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
+ /* getopt already emitted a complaint */
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(2);
- break;
}
}
else
{
pg_log_error("must specify archive location");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(2);
}
else
{
pg_log_error("must specify oldest kept WAL file");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(2);
}
if (optind < argc)
{
pg_log_error("too many command-line arguments");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(2);
}
{
streamer->file = fopen(pathname, "wb");
if (streamer->file == NULL)
- {
- pg_log_error("could not create file \"%s\": %m", pathname);
- exit(1);
- }
+ pg_fatal("could not create file \"%s\": %m", pathname);
streamer->should_close_file = true;
}
/* if write didn't set errno, assume problem is no disk space */
if (errno == 0)
errno = ENOSPC;
- pg_log_error("could not write to file \"%s\": %m",
- mystreamer->pathname);
- exit(1);
+ pg_fatal("could not write to file \"%s\": %m",
+ mystreamer->pathname);
}
}
mystreamer = (bbstreamer_plain_writer *) streamer;
if (mystreamer->should_close_file && fclose(mystreamer->file) != 0)
- {
- pg_log_error("could not close file \"%s\": %m",
- mystreamer->pathname);
- exit(1);
- }
+ pg_fatal("could not close file \"%s\": %m",
+ mystreamer->pathname);
mystreamer->file = NULL;
mystreamer->should_close_file = false;
/* if write didn't set errno, assume problem is no disk space */
if (errno == 0)
errno = ENOSPC;
- pg_log_error("could not write to file \"%s\": %m",
- mystreamer->filename);
- exit(1);
+ pg_fatal("could not write to file \"%s\": %m",
+ mystreamer->filename);
}
break;
default:
/* Shouldn't happen. */
- pg_log_error("unexpected state while extracting archive");
- exit(1);
+ pg_fatal("unexpected state while extracting archive");
}
}
pg_str_endswith(filename, "/pg_xlog") ||
pg_str_endswith(filename, "/archive_status")) &&
errno == EEXIST))
- {
- pg_log_error("could not create directory \"%s\": %m",
- filename);
- exit(1);
- }
+ pg_fatal("could not create directory \"%s\": %m",
+ filename);
}
#ifndef WIN32
if (chmod(filename, mode))
- {
- pg_log_error("could not set permissions on directory \"%s\": %m",
- filename);
- exit(1);
- }
+ pg_fatal("could not set permissions on directory \"%s\": %m",
+ filename);
#endif
}
extract_link(const char *filename, const char *linktarget)
{
if (symlink(linktarget, filename) != 0)
- {
- pg_log_error("could not create symbolic link from \"%s\" to \"%s\": %m",
- filename, linktarget);
- exit(1);
- }
+ pg_fatal("could not create symbolic link from \"%s\" to \"%s\": %m",
+ filename, linktarget);
}
/*
file = fopen(filename, "wb");
if (file == NULL)
- {
- pg_log_error("could not create file \"%s\": %m", filename);
- exit(1);
- }
+ pg_fatal("could not create file \"%s\": %m", filename);
#ifndef WIN32
if (chmod(filename, mode))
- {
- pg_log_error("could not set permissions on file \"%s\": %m",
- filename);
- exit(1);
- }
+ pg_fatal("could not set permissions on file \"%s\": %m",
+ filename);
#endif
return file;
{
streamer->gzfile = gzopen(pathname, "wb");
if (streamer->gzfile == NULL)
- {
- pg_log_error("could not create compressed file \"%s\": %m",
- pathname);
- exit(1);
- }
+ pg_fatal("could not create compressed file \"%s\": %m",
+ pathname);
}
else
{
int fd = dup(fileno(file));
if (fd < 0)
- {
- pg_log_error("could not duplicate stdout: %m");
- exit(1);
- }
+ pg_fatal("could not duplicate stdout: %m");
streamer->gzfile = gzdopen(fd, "wb");
if (streamer->gzfile == NULL)
- {
- pg_log_error("could not open output file: %m");
- exit(1);
- }
+ pg_fatal("could not open output file: %m");
}
if ((compress->options & BACKUP_COMPRESSION_OPTION_LEVEL) != 0 &&
gzsetparams(streamer->gzfile, compress->level,
Z_DEFAULT_STRATEGY) != Z_OK)
- {
- pg_log_error("could not set compression level %d: %s",
- compress->level, get_gz_error(streamer->gzfile));
- exit(1);
- }
+ pg_fatal("could not set compression level %d: %s",
+ compress->level, get_gz_error(streamer->gzfile));
return &streamer->base;
#else
- pg_log_error("this build does not support compression");
- exit(1);
+ pg_fatal("this build does not support gzip compression");
#endif
}
/* if write didn't set errno, assume problem is no disk space */
if (errno == 0)
errno = ENOSPC;
- pg_log_error("could not write to compressed file \"%s\": %s",
- mystreamer->pathname, get_gz_error(mystreamer->gzfile));
- exit(1);
+ pg_fatal("could not write to compressed file \"%s\": %s",
+ mystreamer->pathname, get_gz_error(mystreamer->gzfile));
}
}
errno = 0; /* in case gzclose() doesn't set it */
if (gzclose(mystreamer->gzfile) != 0)
- {
- pg_log_error("could not close compressed file \"%s\": %m",
- mystreamer->pathname);
- exit(1);
- }
+ pg_fatal("could not close compressed file \"%s\": %m",
+ mystreamer->pathname);
mystreamer->gzfile = NULL;
}
* possible value for safety.
*/
if (inflateInit2(zs, 15 + 16) != Z_OK)
- {
- pg_log_error("could not initialize compression library");
- exit(1);
- }
+ pg_fatal("could not initialize compression library");
return &streamer->base;
#else
- pg_log_error("this build does not support compression");
- exit(1);
+ pg_fatal("this build does not support gzip compression");
#endif
}
default:
/* Shouldn't happen. */
- pg_log_error("unexpected state while injecting recovery settings");
- exit(1);
+ pg_fatal("unexpected state while injecting recovery settings");
}
bbstreamer_content(mystreamer->base.bbs_next, &mystreamer->member,
ctxError = LZ4F_createCompressionContext(&streamer->cctx, LZ4F_VERSION);
if (LZ4F_isError(ctxError))
- pg_log_error("could not create lz4 compression context: %s",
- LZ4F_getErrorName(ctxError));
+ pg_log_error("could not create lz4 compression context: %s",
+ LZ4F_getErrorName(ctxError));
return &streamer->base;
#else
- pg_log_error("this build does not support compression");
- exit(1);
+ pg_fatal("this build does not support lz4 compression");
#endif
}
/* Initialize internal stream state for decompression */
ctxError = LZ4F_createDecompressionContext(&streamer->dctx, LZ4F_VERSION);
if (LZ4F_isError(ctxError))
- {
- pg_log_error("could not initialize compression library: %s",
- LZ4F_getErrorName(ctxError));
- exit(1);
- }
+ pg_fatal("could not initialize compression library: %s",
+ LZ4F_getErrorName(ctxError));
return &streamer->base;
#else
- pg_log_error("this build does not support compression");
- exit(1);
+ pg_fatal("this build does not support lz4 compression");
#endif
}
*/
bbstreamer_buffer_bytes(streamer, &data, &len, len);
if (len > 2 * TAR_BLOCK_SIZE)
- {
- pg_log_error("tar file trailer exceeds 2 blocks");
- exit(1);
- }
+ pg_fatal("tar file trailer exceeds 2 blocks");
return;
default:
/* Shouldn't happen. */
- pg_log_error("unexpected state while parsing tar archive");
- exit(1);
+ pg_fatal("unexpected state while parsing tar archive");
}
}
}
*/
strlcpy(member->pathname, &buffer[0], MAXPGPATH);
if (member->pathname[0] == '\0')
- {
- pg_log_error("tar member has empty name");
- exit(1);
- }
+ pg_fatal("tar member has empty name");
member->size = read_tar_number(&buffer[124], 12);
member->mode = read_tar_number(&buffer[100], 8);
member->uid = read_tar_number(&buffer[108], 8);
if (mystreamer->next_context != BBSTREAMER_ARCHIVE_TRAILER &&
(mystreamer->next_context != BBSTREAMER_MEMBER_HEADER ||
mystreamer->base.bbs_buffer.len > 0))
- {
- pg_log_error("COPY stream ended before last file was finished");
- exit(1);
- }
+ pg_fatal("COPY stream ended before last file was finished");
/* Send the archive trailer, even if empty. */
bbstreamer_content(streamer->bbs_next, NULL,
streamer->cctx = ZSTD_createCCtx();
if (!streamer->cctx)
- {
- pg_log_error("could not create zstd compression context");
- exit(1);
- }
+ pg_fatal("could not create zstd compression context");
/* Set compression level, if specified */
if ((compress->options & BACKUP_COMPRESSION_OPTION_LEVEL) != 0)
ret = ZSTD_CCtx_setParameter(streamer->cctx, ZSTD_c_compressionLevel,
compress->level);
if (ZSTD_isError(ret))
- {
- pg_log_error("could not set zstd compression level to %d: %s",
- compress->level, ZSTD_getErrorName(ret));
- exit(1);
- }
+ pg_fatal("could not set zstd compression level to %d: %s",
+ compress->level, ZSTD_getErrorName(ret));
}
/* Set # of workers, if specified */
ret = ZSTD_CCtx_setParameter(streamer->cctx, ZSTD_c_nbWorkers,
compress->workers);
if (ZSTD_isError(ret))
- {
- pg_log_error("could not set compression worker count to %d: %s",
- compress->workers, ZSTD_getErrorName(ret));
- exit(1);
- }
+ pg_fatal("could not set compression worker count to %d: %s",
+ compress->workers, ZSTD_getErrorName(ret));
}
/* Initialize the ZSTD output buffer. */
return &streamer->base;
#else
- pg_log_error("this build does not support zstd compression");
- exit(1);
+ pg_fatal("this build does not support zstd compression");
#endif
}
streamer->dctx = ZSTD_createDCtx();
if (!streamer->dctx)
- {
- pg_log_error("could not create zstd decompression context");
- exit(1);
- }
+ pg_fatal("could not create zstd decompression context");
/* Initialize the ZSTD output buffer. */
streamer->zstd_outBuf.dst = streamer->base.bbs_buffer.data;
return &streamer->base;
#else
- pg_log_error("this build does not support compression");
- exit(1);
+ pg_fatal("this build does not support zstd compression");
#endif
}
&mystreamer->zstd_outBuf, &inBuf);
if (ZSTD_isError(ret))
- pg_log_error("could not decompress data: %s", ZSTD_getErrorName(ret));
+ pg_log_error("could not decompress data: %s",
+ ZSTD_getErrorName(ret));
}
}
for (arg_ptr = arg; *arg_ptr; arg_ptr++)
{
if (dst_ptr - dst >= MAXPGPATH)
- {
- pg_log_error("directory name too long");
- exit(1);
- }
+ pg_fatal("directory name too long");
if (*arg_ptr == '\\' && *(arg_ptr + 1) == '=')
; /* skip backslash escaping = */
else if (*arg_ptr == '=' && (arg_ptr == arg || *(arg_ptr - 1) != '\\'))
{
if (*cell->new_dir)
- {
- pg_log_error("multiple \"=\" signs in tablespace mapping");
- exit(1);
- }
+ pg_fatal("multiple \"=\" signs in tablespace mapping");
else
dst = dst_ptr = cell->new_dir;
}
}
if (!*cell->old_dir || !*cell->new_dir)
- {
- pg_log_error("invalid tablespace mapping format \"%s\", must be \"OLDDIR=NEWDIR\"", arg);
- exit(1);
- }
+ pg_fatal("invalid tablespace mapping format \"%s\", must be \"OLDDIR=NEWDIR\"", arg);
/*
* This check isn't absolutely necessary. But all tablespaces are created
* consistent with the new_dir check.
*/
if (!is_absolute_path(cell->old_dir))
- {
- pg_log_error("old directory is not an absolute path in tablespace mapping: %s",
- cell->old_dir);
- exit(1);
- }
+ pg_fatal("old directory is not an absolute path in tablespace mapping: %s",
+ cell->old_dir);
if (!is_absolute_path(cell->new_dir))
- {
- pg_log_error("new directory is not an absolute path in tablespace mapping: %s",
- cell->new_dir);
- exit(1);
- }
+ pg_fatal("new directory is not an absolute path in tablespace mapping: %s",
+ cell->new_dir);
/*
* Comparisons done with these values should involve similarly
MemSet(xlogend, 0, sizeof(xlogend));
r = read(bgpipe[0], xlogend, sizeof(xlogend) - 1);
if (r < 0)
- {
- pg_log_error("could not read from ready pipe: %m");
- exit(1);
- }
+ pg_fatal("could not read from ready pipe: %m");
if (sscanf(xlogend, "%X/%X", &hi, &lo) != 2)
- {
- pg_log_error("could not parse write-ahead log location \"%s\"",
- xlogend);
- exit(1);
- }
+ pg_fatal("could not parse write-ahead log location \"%s\"",
+ xlogend);
xlogendptr = ((uint64) hi) << 32 | lo;
has_xlogendptr = 1;
/* Convert the starting position */
if (sscanf(startpos, "%X/%X", &hi, &lo) != 2)
- {
- pg_log_error("could not parse write-ahead log location \"%s\"",
- startpos);
- exit(1);
- }
+ pg_fatal("could not parse write-ahead log location \"%s\"",
+ startpos);
param->startptr = ((uint64) hi) << 32 | lo;
/* Round off to even segment position */
param->startptr -= XLogSegmentOffset(param->startptr, WalSegSz);
#ifndef WIN32
/* Create our background pipe */
if (pipe(bgpipe) < 0)
- {
- pg_log_error("could not create pipe for background process: %m");
- exit(1);
- }
+ pg_fatal("could not create pipe for background process: %m");
#endif
/* Get a second connection */
"pg_xlog" : "pg_wal");
if (pg_mkdir_p(statusdir, pg_dir_create_mode) != 0 && errno != EEXIST)
- {
- pg_log_error("could not create directory \"%s\": %m", statusdir);
- exit(1);
- }
+ pg_fatal("could not create directory \"%s\": %m", statusdir);
}
/*
exit(ret);
}
else if (bgchild < 0)
- {
- pg_log_error("could not create background process: %m");
- exit(1);
- }
+ pg_fatal("could not create background process: %m");
/*
* Else we are in the parent process and all is well.
#else /* WIN32 */
bgchild = _beginthreadex(NULL, 0, (void *) LogStreamerMain, param, 0, NULL);
if (bgchild == 0)
- {
- pg_log_error("could not create background thread: %m");
- exit(1);
- }
+ pg_fatal("could not create background thread: %m");
#endif
}
* Does not exist, so create
*/
if (pg_mkdir_p(dirname, pg_dir_create_mode) == -1)
- {
- pg_log_error("could not create directory \"%s\": %m", dirname);
- exit(1);
- }
+ pg_fatal("could not create directory \"%s\": %m", dirname);
if (created)
*created = true;
return;
/*
* Exists, not empty
*/
- pg_log_error("directory \"%s\" exists but is not empty", dirname);
- exit(1);
+ pg_fatal("directory \"%s\" exists but is not empty", dirname);
case -1:
/*
* Access problem
*/
- pg_log_error("could not access directory \"%s\": %m", dirname);
- exit(1);
+ pg_fatal("could not access directory \"%s\": %m", dirname);
}
}
errno = 0;
result = strtod(src, &after_num);
if (src == after_num)
- {
- pg_log_error("transfer rate \"%s\" is not a valid value", src);
- exit(1);
- }
+ pg_fatal("transfer rate \"%s\" is not a valid value", src);
if (errno != 0)
- {
- pg_log_error("invalid transfer rate \"%s\": %m", src);
- exit(1);
- }
+ pg_fatal("invalid transfer rate \"%s\": %m", src);
if (result <= 0)
{
/*
* Reject obviously wrong values here.
*/
- pg_log_error("transfer rate must be greater than zero");
- exit(1);
+ pg_fatal("transfer rate must be greater than zero");
}
/*
after_num++;
if (*after_num != '\0')
- {
- pg_log_error("invalid --max-rate unit: \"%s\"", suffix);
- exit(1);
- }
+ pg_fatal("invalid --max-rate unit: \"%s\"", suffix);
/* Valid integer? */
if ((uint64) result != (uint64) ((uint32) result))
- {
- pg_log_error("transfer rate \"%s\" exceeds integer range", src);
- exit(1);
- }
+ pg_fatal("transfer rate \"%s\" exceeds integer range", src);
/*
* The range is checked on the server side too, but avoid the server
* connection if a nonsensical value was passed.
*/
if (result < MAX_RATE_LOWER || result > MAX_RATE_UPPER)
- {
- pg_log_error("transfer rate \"%s\" is out of range", src);
- exit(1);
- }
+ pg_fatal("transfer rate \"%s\" is out of range", src);
return (int32) result;
}
/* Get the COPY data stream. */
res = PQgetResult(conn);
if (PQresultStatus(res) != PGRES_COPY_OUT)
- {
- pg_log_error("could not get COPY data stream: %s",
- PQerrorMessage(conn));
- exit(1);
- }
+ pg_fatal("could not get COPY data stream: %s",
+ PQerrorMessage(conn));
PQclear(res);
/* Loop over chunks until done. */
break;
}
else if (r == -2)
- {
- pg_log_error("could not read COPY data: %s",
- PQerrorMessage(conn));
- exit(1);
- }
+ pg_fatal("could not read COPY data: %s",
+ PQerrorMessage(conn));
if (bgchild_exited)
- {
- pg_log_error("background process terminated unexpectedly");
- exit(1);
- }
+ pg_fatal("background process terminated unexpectedly");
(*callback) (r, copybuf, callback_data);
if (must_parse_archive && !is_tar && !is_compressed_tar)
{
pg_log_error("unable to parse archive: %s", archive_name);
- pg_log_info("only tar archives can be parsed");
+ pg_log_error_detail("Only tar archives can be parsed.");
if (format == 'p')
- pg_log_info("plain format requires pg_basebackup to parse the archive");
+ pg_log_error_detail("Plain format requires pg_basebackup to parse the archive.");
if (inject_manifest)
- pg_log_info("using - as the output directory requires pg_basebackup to parse the archive");
+ pg_log_error_detail("Using - as the output directory requires pg_basebackup to parse the archive.");
if (writerecoveryconf)
- pg_log_info("the -R option requires pg_basebackup to parse the archive");
+ pg_log_error_detail("The -R option requires pg_basebackup to parse the archive.");
exit(1);
}
/* Sanity check. */
if (state->manifest_buffer != NULL ||
state->manifest_file !=NULL)
- {
- pg_log_error("archives should precede manifest");
- exit(1);
- }
+ pg_fatal("archives should precede manifest");
/* Parse the rest of the CopyData message. */
archive_name = GetCopyDataString(r, copybuf, &cursor);
if (archive_name[0] == '\0' || archive_name[0] == '.' ||
strchr(archive_name, '/') != NULL ||
strchr(archive_name, '\\') != NULL)
- {
- pg_log_error("invalid archive name: \"%s\"",
- archive_name);
- exit(1);
- }
+ pg_fatal("invalid archive name: \"%s\"",
+ archive_name);
/*
* An empty spclocation is treated as NULL. We expect this
*/
if (errno == 0)
errno = ENOSPC;
- pg_log_error("could not write to file \"%s\": %m",
- state->manifest_filename);
- exit(1);
+ pg_fatal("could not write to file \"%s\": %m",
+ state->manifest_filename);
}
}
else if (state->streamer != NULL)
r - 1, BBSTREAMER_UNKNOWN);
}
else
- {
- pg_log_error("unexpected payload data");
- exit(1);
- }
+ pg_fatal("unexpected payload data");
break;
}
state->manifest_file =
fopen(state->manifest_filename, "wb");
if (state->manifest_file == NULL)
- {
- pg_log_error("could not create file \"%s\": %m",
- state->manifest_filename);
- exit(1);
- }
+ pg_fatal("could not create file \"%s\": %m",
+ state->manifest_filename);
}
}
break;
ReportCopyDataParseError(size_t r, char *copybuf)
{
if (r == 0)
- pg_log_error("empty COPY message");
+ pg_fatal("empty COPY message");
else
- pg_log_error("malformed COPY message of type %d, length %zu",
- copybuf[0], r);
- exit(1);
+ pg_fatal("malformed COPY message of type %d, length %zu",
+ copybuf[0], r);
}
/*
initPQExpBuffer(&buf);
ReceiveBackupManifestInMemory(conn, &buf);
if (PQExpBufferDataBroken(buf))
- {
- pg_log_error("out of memory");
- exit(1);
- }
+ pg_fatal("out of memory");
/* Inject it into the output tarfile. */
bbstreamer_inject_file(manifest_inject_streamer, "backup_manifest",
"%s/backup_manifest.tmp", basedir);
state.file = fopen(state.filename, "wb");
if (state.file == NULL)
- {
- pg_log_error("could not create file \"%s\": %m", state.filename);
- exit(1);
- }
+ pg_fatal("could not create file \"%s\": %m", state.filename);
ReceiveCopyData(conn, ReceiveBackupManifestChunk, &state);
/* if write didn't set errno, assume problem is no disk space */
if (errno == 0)
errno = ENOSPC;
- pg_log_error("could not write to file \"%s\": %m", state->filename);
- exit(1);
+ pg_fatal("could not write to file \"%s\": %m", state->filename);
}
}
{
const char *serverver = PQparameterStatus(conn, "server_version");
- pg_log_error("incompatible server version %s",
- serverver ? serverver : "'unknown'");
- exit(1);
+ pg_fatal("incompatible server version %s",
+ serverver ? serverver : "'unknown'");
}
if (serverMajor >= 1500)
use_new_option_syntax = true;
char *colon;
if (serverMajor < 1500)
- {
- pg_log_error("backup targets are not supported by this server version");
- exit(1);
- }
+ pg_fatal("backup targets are not supported by this server version");
if (writerecoveryconf)
- {
- pg_log_error("recovery configuration cannot be written when a backup target is used");
- exit(1);
- }
+ pg_fatal("recovery configuration cannot be written when a backup target is used");
AppendPlainCommandOption(&buf, use_new_option_syntax, "TABLESPACE_MAP");
if (compressloc == COMPRESS_LOCATION_SERVER)
{
if (!use_new_option_syntax)
- {
- pg_log_error("server does not support server-side compression");
- exit(1);
- }
+ pg_fatal("server does not support server-side compression");
AppendStringCommandOption(&buf, use_new_option_syntax,
"COMPRESSION", compression_algorithm);
if (compression_detail != NULL)
basebkp = psprintf("BASE_BACKUP %s", buf.data);
if (PQsendQuery(conn, basebkp) == 0)
- {
- pg_log_error("could not send replication command \"%s\": %s",
- "BASE_BACKUP", PQerrorMessage(conn));
- exit(1);
- }
+ pg_fatal("could not send replication command \"%s\": %s",
+ "BASE_BACKUP", PQerrorMessage(conn));
/*
* Get the starting WAL location
*/
res = PQgetResult(conn);
if (PQresultStatus(res) != PGRES_TUPLES_OK)
- {
- pg_log_error("could not initiate base backup: %s",
- PQerrorMessage(conn));
- exit(1);
- }
+ pg_fatal("could not initiate base backup: %s",
+ PQerrorMessage(conn));
if (PQntuples(res) != 1)
- {
- pg_log_error("server returned unexpected response to BASE_BACKUP command; got %d rows and %d fields, expected %d rows and %d fields",
- PQntuples(res), PQnfields(res), 1, 2);
- exit(1);
- }
+ pg_fatal("server returned unexpected response to BASE_BACKUP command; got %d rows and %d fields, expected %d rows and %d fields",
+ PQntuples(res), PQnfields(res), 1, 2);
strlcpy(xlogstart, PQgetvalue(res, 0, 0), sizeof(xlogstart));
*/
res = PQgetResult(conn);
if (PQresultStatus(res) != PGRES_TUPLES_OK)
- {
- pg_log_error("could not get backup header: %s",
- PQerrorMessage(conn));
- exit(1);
- }
+ pg_fatal("could not get backup header: %s",
+ PQerrorMessage(conn));
if (PQntuples(res) < 1)
- {
- pg_log_error("no data returned from server");
- exit(1);
- }
+ pg_fatal("no data returned from server");
/*
* Sum up the total size, for progress reporting
writing_to_stdout = format == 't' && basedir != NULL &&
strcmp(basedir, "-") == 0;
if (writing_to_stdout && PQntuples(res) > 1)
- {
- pg_log_error("can only write single tablespace to stdout, database has %d",
- PQntuples(res));
- exit(1);
- }
+ pg_fatal("can only write single tablespace to stdout, database has %d",
+ PQntuples(res));
/*
* If we're streaming WAL, start the streaming session before we start
*/
res = PQgetResult(conn);
if (PQresultStatus(res) != PGRES_TUPLES_OK)
- {
- pg_log_error("backup failed: %s",
- PQerrorMessage(conn));
- exit(1);
- }
+ pg_fatal("backup failed: %s",
+ PQerrorMessage(conn));
if (PQntuples(res) != 1)
- {
- pg_log_error("no write-ahead log end position returned from server");
- exit(1);
- }
+ pg_fatal("no write-ahead log end position returned from server");
strlcpy(xlogend, PQgetvalue(res, 0, 0), sizeof(xlogend));
if (verbose && includewal != NO_WAL)
pg_log_info("write-ahead log end point: %s", xlogend);
#ifndef WIN32
if (write(bgpipe[1], xlogend, strlen(xlogend)) != strlen(xlogend))
- {
- pg_log_info("could not send command to background pipe: %m");
- exit(1);
- }
+ pg_fatal("could not send command to background pipe: %m");
/* Just wait for the background process to exit */
r = waitpid(bgchild, &status, 0);
if (r == (pid_t) -1)
- {
- pg_log_error("could not wait for child process: %m");
- exit(1);
- }
+ pg_fatal("could not wait for child process: %m");
if (r != bgchild)
- {
- pg_log_error("child %d died, expected %d", (int) r, (int) bgchild);
- exit(1);
- }
+ pg_fatal("child %d died, expected %d", (int) r, (int) bgchild);
if (status != 0)
- {
- pg_log_error("%s", wait_result_to_str(status));
- exit(1);
- }
+ pg_fatal("%s", wait_result_to_str(status));
/* Exited normally, we're happy! */
#else /* WIN32 */
* it's there.
*/
if (sscanf(xlogend, "%X/%X", &hi, &lo) != 2)
- {
- pg_log_error("could not parse write-ahead log location \"%s\"",
- xlogend);
- exit(1);
- }
+ pg_fatal("could not parse write-ahead log location \"%s\"",
+ xlogend);
xlogendptr = ((uint64) hi) << 32 | lo;
InterlockedIncrement(&has_xlogendptr);
WAIT_OBJECT_0)
{
_dosmaperr(GetLastError());
- pg_log_error("could not wait for child thread: %m");
- exit(1);
+ pg_fatal("could not wait for child thread: %m");
}
if (GetExitCodeThread((HANDLE) bgchild_handle, &status) == 0)
{
_dosmaperr(GetLastError());
- pg_log_error("could not get child thread exit status: %m");
- exit(1);
+ pg_fatal("could not get child thread exit status: %m");
}
if (status != 0)
- {
- pg_log_error("child thread exited with error %u",
- (unsigned int) status);
- exit(1);
- }
+ pg_fatal("child thread exited with error %u",
+ (unsigned int) status);
/* Exited normally, we're happy */
#endif
}
else
{
if (rename(tmp_filename, filename) != 0)
- {
- pg_log_error("could not rename file \"%s\" to \"%s\": %m",
- tmp_filename, filename);
- exit(1);
- }
+ pg_fatal("could not rename file \"%s\" to \"%s\": %m",
+ tmp_filename, filename);
}
}
else if (strcmp(optarg, "t") == 0 || strcmp(optarg, "tar") == 0)
format = 't';
else
- {
- pg_log_error("invalid output format \"%s\", must be \"plain\" or \"tar\"",
- optarg);
- exit(1);
- }
+ pg_fatal("invalid output format \"%s\", must be \"plain\" or \"tar\"",
+ optarg);
break;
case 'r':
maxrate = parse_max_rate(optarg);
includewal = STREAM_WAL;
}
else
- {
- pg_log_error("invalid wal-method option \"%s\", must be \"fetch\", \"stream\", or \"none\"",
- optarg);
- exit(1);
- }
+ pg_fatal("invalid wal-method option \"%s\", must be \"fetch\", \"stream\", or \"none\"",
+ optarg);
break;
case 1:
xlog_dir = pg_strdup(optarg);
else if (pg_strcasecmp(optarg, "spread") == 0)
fastcheckpoint = false;
else
- {
- pg_log_error("invalid checkpoint argument \"%s\", must be \"fast\" or \"spread\"",
- optarg);
- exit(1);
- }
+ pg_fatal("invalid checkpoint argument \"%s\", must be \"fast\" or \"spread\"",
+ optarg);
break;
case 'd':
connection_string = pg_strdup(optarg);
manifest_checksums = pg_strdup(optarg);
break;
default:
-
- /*
- * getopt_long already emitted a complaint
- */
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ /* getopt_long already emitted a complaint */
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
}
{
pg_log_error("too many command-line arguments (first is \"%s\")",
argv[optind]);
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
if (backup_target != NULL && format != '\0')
{
pg_log_error("cannot specify both format and backup target");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
if (format == '\0')
if (basedir == NULL && backup_target == NULL)
{
pg_log_error("must specify output directory or backup target");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
if (basedir != NULL && backup_target != NULL)
{
pg_log_error("cannot specify both output directory and backup target");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
char *error_detail;
if (!parse_bc_algorithm(compression_algorithm, &alg))
- {
- pg_log_error("unrecognized compression algorithm \"%s\"",
- compression_algorithm);
- exit(1);
- }
+ pg_fatal("unrecognized compression algorithm \"%s\"",
+ compression_algorithm);
parse_bc_specification(alg, compression_detail, &client_compress);
error_detail = validate_bc_specification(&client_compress);
if (error_detail != NULL)
- {
- pg_log_error("invalid compression specification: %s",
- error_detail);
- exit(1);
- }
+ pg_fatal("invalid compression specification: %s",
+ error_detail);
}
else
{
if (backup_target != NULL && compressloc == COMPRESS_LOCATION_CLIENT)
{
pg_log_error("client-side compression is not possible when a backup target is specified");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
client_compress.algorithm != BACKUP_COMPRESSION_NONE)
{
pg_log_error("only tar mode backups can be compressed");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
if (backup_target != NULL && includewal == STREAM_WAL)
{
pg_log_error("WAL cannot be streamed when a backup target is specified");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
if (format == 't' && includewal == STREAM_WAL && strcmp(basedir, "-") == 0)
{
pg_log_error("cannot stream write-ahead logs in tar mode to stdout");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
if (replication_slot && includewal != STREAM_WAL)
{
pg_log_error("replication slots can only be used with WAL streaming");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
if (replication_slot)
{
pg_log_error("--no-slot cannot be used with slot name");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
temp_replication_slot = false;
{
pg_log_error("%s needs a slot to be specified using --slot",
"--create-slot");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
{
pg_log_error("%s and %s are incompatible options",
"--create-slot", "--no-slot");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
}
if (backup_target != NULL)
{
pg_log_error("WAL directory location cannot be specified along with a backup target");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
if (format != 'p')
{
pg_log_error("WAL directory location can only be specified in plain mode");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
if (!is_absolute_path(xlog_dir))
{
pg_log_error("WAL directory location must be an absolute path");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
}
{
pg_log_error("%s and %s are incompatible options",
"--progress", "--no-estimate-size");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
{
pg_log_error("%s and %s are incompatible options",
"--no-manifest", "--manifest-checksums");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
{
pg_log_error("%s and %s are incompatible options",
"--no-manifest", "--manifest-force-encode");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
#ifdef HAVE_SYMLINK
if (symlink(xlog_dir, linkloc) != 0)
- {
- pg_log_error("could not create symbolic link \"%s\": %m", linkloc);
- exit(1);
- }
+ pg_fatal("could not create symbolic link \"%s\": %m", linkloc);
#else
- pg_log_error("symlinks are not supported on this platform");
- exit(1);
+ pg_fatal("symlinks are not supported on this platform");
#endif
free(linkloc);
}
Assert(dest_folder != NULL);
dir = opendir(dest_folder);
if (dir == NULL)
- {
- pg_log_error("could not open directory \"%s\": %m", dest_folder);
- exit(1);
- }
+ pg_fatal("could not open directory \"%s\": %m", dest_folder);
return dir;
}
{
Assert(dest_dir != NULL && dest_folder != NULL);
if (closedir(dest_dir))
- {
- pg_log_error("could not close directory \"%s\": %m", dest_folder);
- exit(1);
- }
+ pg_fatal("could not close directory \"%s\": %m", dest_folder);
}
snprintf(fullpath, sizeof(fullpath), "%s/%s", basedir, dirent->d_name);
if (stat(fullpath, &statbuf) != 0)
- {
- pg_log_error("could not stat file \"%s\": %m", fullpath);
- exit(1);
- }
+ pg_fatal("could not stat file \"%s\": %m", fullpath);
if (statbuf.st_size != WalSegSz)
{
fd = open(fullpath, O_RDONLY | PG_BINARY, 0);
if (fd < 0)
- {
- pg_log_error("could not open compressed file \"%s\": %m",
- fullpath);
- exit(1);
- }
+ pg_fatal("could not open compressed file \"%s\": %m",
+ fullpath);
if (lseek(fd, (off_t) (-4), SEEK_END) < 0)
- {
- pg_log_error("could not seek in compressed file \"%s\": %m",
- fullpath);
- exit(1);
- }
+ pg_fatal("could not seek in compressed file \"%s\": %m",
+ fullpath);
r = read(fd, (char *) buf, sizeof(buf));
if (r != sizeof(buf))
{
if (r < 0)
- pg_log_error("could not read compressed file \"%s\": %m",
- fullpath);
+ pg_fatal("could not read compressed file \"%s\": %m",
+ fullpath);
else
- pg_log_error("could not read compressed file \"%s\": read %d of %zu",
- fullpath, r, sizeof(buf));
- exit(1);
+ pg_fatal("could not read compressed file \"%s\": read %d of %zu",
+ fullpath, r, sizeof(buf));
}
close(fd);
fd = open(fullpath, O_RDONLY | PG_BINARY, 0);
if (fd < 0)
- {
- pg_log_error("could not open file \"%s\": %m", fullpath);
- exit(1);
- }
+ pg_fatal("could not open file \"%s\": %m", fullpath);
status = LZ4F_createDecompressionContext(&ctx, LZ4F_VERSION);
if (LZ4F_isError(status))
- {
- pg_log_error("could not create LZ4 decompression context: %s",
- LZ4F_getErrorName(status));
- exit(1);
- }
+ pg_fatal("could not create LZ4 decompression context: %s",
+ LZ4F_getErrorName(status));
outbuf = pg_malloc0(LZ4_CHUNK_SZ);
readbuf = pg_malloc0(LZ4_CHUNK_SZ);
r = read(fd, readbuf, LZ4_CHUNK_SZ);
if (r < 0)
- {
- pg_log_error("could not read file \"%s\": %m", fullpath);
- exit(1);
- }
+ pg_fatal("could not read file \"%s\": %m", fullpath);
/* Done reading the file */
if (r == 0)
status = LZ4F_decompress(ctx, outbuf, &out_size,
readp, &read_size, &dec_opt);
if (LZ4F_isError(status))
- {
- pg_log_error("could not decompress file \"%s\": %s",
- fullpath,
- LZ4F_getErrorName(status));
- exit(1);
- }
+ pg_fatal("could not decompress file \"%s\": %s",
+ fullpath,
+ LZ4F_getErrorName(status));
readp += read_size;
uncompressed_size += out_size;
status = LZ4F_freeDecompressionContext(ctx);
if (LZ4F_isError(status))
- {
- pg_log_error("could not free LZ4 decompression context: %s",
- LZ4F_getErrorName(status));
- exit(1);
- }
+ pg_fatal("could not free LZ4 decompression context: %s",
+ LZ4F_getErrorName(status));
if (uncompressed_size != WalSegSz)
{
#else
pg_log_error("could not check file \"%s\"",
dirent->d_name);
- pg_log_error("this build does not support compression with %s",
- "LZ4");
+ pg_log_error_detail("This build does not support compression with %s.",
+ "LZ4");
exit(1);
#endif
}
}
if (errno)
- {
- pg_log_error("could not read directory \"%s\": %m", basedir);
- exit(1);
- }
+ pg_fatal("could not read directory \"%s\": %m", basedir);
close_destination_dir(dir, basedir);
break;
case 'E':
if (sscanf(optarg, "%X/%X", &hi, &lo) != 2)
- {
- pg_log_error("could not parse end position \"%s\"", optarg);
- exit(1);
- }
+ pg_fatal("could not parse end position \"%s\"", optarg);
endpos = ((uint64) hi) << 32 | lo;
break;
case 'n':
else if (pg_strcasecmp(optarg, "none") == 0)
compression_method = COMPRESSION_NONE;
else
- {
- pg_log_error("invalid value \"%s\" for option %s",
- optarg, "--compression-method");
- exit(1);
- }
+ pg_fatal("invalid value \"%s\" for option %s",
+ optarg, "--compression-method");
break;
default:
-
- /*
- * getopt_long already emitted a complaint
- */
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ /* getopt_long already emitted a complaint */
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
}
{
pg_log_error("too many command-line arguments (first is \"%s\")",
argv[optind]);
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
if (do_drop_slot && do_create_slot)
{
pg_log_error("cannot use --create-slot together with --drop-slot");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
/* translator: second %s is an option name */
pg_log_error("%s needs a slot to be specified using --slot",
do_drop_slot ? "--drop-slot" : "--create-slot");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
if (synchronous && !do_sync)
{
pg_log_error("cannot use --synchronous together with --no-sync");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
if (basedir == NULL && !do_drop_slot && !do_create_slot)
{
pg_log_error("no target directory specified");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
{
pg_log_error("cannot use --compress with --compression-method=%s",
"none");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
break;
compresslevel = Z_DEFAULT_COMPRESSION;
}
#else
- pg_log_error("this build does not support compression with %s",
- "gzip");
- exit(1);
+ pg_fatal("this build does not support compression with %s",
+ "gzip");
#endif
break;
case COMPRESSION_LZ4:
{
pg_log_error("cannot use --compress with --compression-method=%s",
"lz4");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
#else
- pg_log_error("this build does not support compression with %s",
- "LZ4");
- exit(1);
+ pg_fatal("this build does not support compression with %s",
+ "LZ4");
#endif
break;
case COMPRESSION_ZSTD:
- pg_log_error("compression with %s is not yet supported", "ZSTD");
- exit(1);
-
+ pg_fatal("compression with %s is not yet supported", "ZSTD");
+ break;
}
* be defined in this context.
*/
if (db_name)
- {
- pg_log_error("replication connection using slot \"%s\" is unexpectedly database specific",
- replication_slot);
- exit(1);
- }
+ pg_fatal("replication connection using slot \"%s\" is unexpectedly database specific",
+ replication_slot);
/*
* Set umask so that directories/files are created with the same
exit(0);
}
else if (noloop)
- {
- pg_log_error("disconnected");
- exit(1);
- }
+ pg_fatal("disconnected");
else
{
/* translator: check source for value for %d */
return true;
if (fsync(outfd) != 0)
- {
- pg_log_fatal("could not fsync file \"%s\": %m", outfile);
- exit(1);
- }
+ pg_fatal("could not fsync file \"%s\": %m", outfile);
return true;
}
/* replication options */
case 'I':
if (sscanf(optarg, "%X/%X", &hi, &lo) != 2)
- {
- pg_log_error("could not parse start position \"%s\"", optarg);
- exit(1);
- }
+ pg_fatal("could not parse start position \"%s\"", optarg);
startpos = ((uint64) hi) << 32 | lo;
break;
case 'E':
if (sscanf(optarg, "%X/%X", &hi, &lo) != 2)
- {
- pg_log_error("could not parse end position \"%s\"", optarg);
- exit(1);
- }
+ pg_fatal("could not parse end position \"%s\"", optarg);
endpos = ((uint64) hi) << 32 | lo;
break;
case 'o':
break;
default:
-
- /*
- * getopt_long already emitted a complaint
- */
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ /* getopt_long already emitted a complaint */
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
}
{
pg_log_error("too many command-line arguments (first is \"%s\")",
argv[optind]);
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
if (replication_slot == NULL)
{
pg_log_error("no slot specified");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
if (do_start_slot && outfile == NULL)
{
pg_log_error("no target file specified");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
if (!do_drop_slot && dbname == NULL)
{
pg_log_error("no database specified");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
if (!do_drop_slot && !do_create_slot && !do_start_slot)
{
pg_log_error("at least one action needs to be specified");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
if (do_drop_slot && (do_create_slot || do_start_slot))
{
pg_log_error("cannot use --create-slot or --start together with --drop-slot");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
if (startpos != InvalidXLogRecPtr && (do_create_slot || do_drop_slot))
{
pg_log_error("cannot use --create-slot or --drop-slot together with --startpos");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
if (endpos != InvalidXLogRecPtr && !do_start_slot)
{
pg_log_error("--endpos may only be specified with --start");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
if (two_phase && !do_create_slot)
{
pg_log_error("--two-phase may only be specified with --create-slot");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
exit(1);
if (db_name == NULL)
- {
- pg_log_error("could not establish database-specific replication connection");
- exit(1);
- }
+ pg_fatal("could not establish database-specific replication connection");
/*
* Set umask so that directories/files are created with the same
exit(0);
}
else if (noloop)
- {
- pg_log_error("disconnected");
- exit(1);
- }
+ pg_fatal("disconnected");
else
{
/* translator: check source for value for %d */
/* fsync file in case of a previous crash */
if (stream->walmethod->sync(f) != 0)
{
- pg_log_fatal("could not fsync existing write-ahead log file \"%s\": %s",
+ pg_log_error("could not fsync existing write-ahead log file \"%s\": %s",
fn, stream->walmethod->getlasterror());
stream->walmethod->close(f, CLOSE_UNLINK);
exit(1);
if (stream->synchronous && lastFlushPosition < blockpos && walfile != NULL)
{
if (stream->walmethod->sync(walfile) != 0)
- {
- pg_log_fatal("could not fsync file \"%s\": %s",
- current_walfile_name, stream->walmethod->getlasterror());
- exit(1);
- }
+ pg_fatal("could not fsync file \"%s\": %s",
+ current_walfile_name, stream->walmethod->getlasterror());
lastFlushPosition = blockpos;
/*
* shutdown of the server.
*/
if (stream->walmethod->sync(walfile) != 0)
- {
- pg_log_fatal("could not fsync file \"%s\": %s",
- current_walfile_name, stream->walmethod->getlasterror());
- exit(1);
- }
+ pg_fatal("could not fsync file \"%s\": %s",
+ current_walfile_name, stream->walmethod->getlasterror());
lastFlushPosition = blockpos;
}
{
conn_opts = PQconninfoParse(connection_string, &err_msg);
if (conn_opts == NULL)
- {
- pg_log_error("%s", err_msg);
- exit(1);
- }
+ pg_fatal("%s", err_msg);
for (conn_opt = conn_opts; conn_opt->keyword != NULL; conn_opt++)
{
* and PQconnectdbParams returns NULL, we call exit(1) directly.
*/
if (!tmpconn)
- {
- pg_log_error("could not connect to server");
- exit(1);
- }
+ pg_fatal("could not connect to server");
/* If we need a password and -w wasn't given, loop back and get one */
if (PQstatus(tmpconn) == CONNECTION_BAD &&
if (tar_sync(f) < 0)
{
/* XXX this seems pretty bogus; why is only this case fatal? */
- pg_log_fatal("could not fsync file \"%s\": %s",
- tf->pathname, tar_getlasterror());
- exit(1);
+ pg_fatal("could not fsync file \"%s\": %s",
+ tf->pathname, tar_getlasterror());
}
/* Clean up and done */
f = open(fn, PG_BINARY | flags, 0);
if (f < 0)
- {
- pg_log_error("could not open file \"%s\": %m", fn);
- exit(1);
- }
+ pg_fatal("could not open file \"%s\": %m", fn);
files_scanned++;
if (r != BLCKSZ)
{
if (r < 0)
- pg_log_error("could not read block %u in file \"%s\": %m",
- blockno, fn);
+ pg_fatal("could not read block %u in file \"%s\": %m",
+ blockno, fn);
else
- pg_log_error("could not read block %u in file \"%s\": read %d of %d",
- blockno, fn, r, BLCKSZ);
- exit(1);
+ pg_fatal("could not read block %u in file \"%s\": read %d of %d",
+ blockno, fn, r, BLCKSZ);
}
blocks_scanned++;
/* Seek back to beginning of block */
if (lseek(f, -BLCKSZ, SEEK_CUR) < 0)
- {
- pg_log_error("seek failed for block %u in file \"%s\": %m", blockno, fn);
- exit(1);
- }
+ pg_fatal("seek failed for block %u in file \"%s\": %m", blockno, fn);
/* Write block with checksum */
w = write(f, buf.data, BLCKSZ);
if (w != BLCKSZ)
{
if (w < 0)
- pg_log_error("could not write block %u in file \"%s\": %m",
- blockno, fn);
+ pg_fatal("could not write block %u in file \"%s\": %m",
+ blockno, fn);
else
- pg_log_error("could not write block %u in file \"%s\": wrote %d of %d",
- blockno, fn, w, BLCKSZ);
- exit(1);
+ pg_fatal("could not write block %u in file \"%s\": wrote %d of %d",
+ blockno, fn, w, BLCKSZ);
}
}
snprintf(path, sizeof(path), "%s/%s", basedir, subdir);
dir = opendir(path);
if (!dir)
- {
- pg_log_error("could not open directory \"%s\": %m", path);
- exit(1);
- }
+ pg_fatal("could not open directory \"%s\": %m", path);
while ((de = readdir(dir)) != NULL)
{
char fn[MAXPGPATH];
snprintf(fn, sizeof(fn), "%s/%s", path, de->d_name);
if (lstat(fn, &st) < 0)
- {
- pg_log_error("could not stat file \"%s\": %m", fn);
- exit(1);
- }
+ pg_fatal("could not stat file \"%s\": %m", fn);
if (S_ISREG(st.st_mode))
{
char fnonly[MAXPGPATH];
*segmentpath++ = '\0';
segmentno = atoi(segmentpath);
if (segmentno == 0)
- {
- pg_log_error("invalid segment number %d in file name \"%s\"",
- segmentno, fn);
- exit(1);
- }
+ pg_fatal("invalid segment number %d in file name \"%s\"",
+ segmentno, fn);
}
forkpath = strchr(fnonly, '_');
path, de->d_name, TABLESPACE_VERSION_DIRECTORY);
if (lstat(tblspc_path, &tblspc_st) < 0)
- {
- pg_log_error("could not stat file \"%s\": %m",
- tblspc_path);
- exit(1);
- }
+ pg_fatal("could not stat file \"%s\": %m",
+ tblspc_path);
/*
* Move backwards once as the scan needs to happen for the
showprogress = true;
break;
default:
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
+ /* getopt_long already emitted a complaint */
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
}
if (DataDir == NULL)
{
pg_log_error("no data directory specified");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
}
{
pg_log_error("too many command-line arguments (first is \"%s\")",
argv[optind]);
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
if (mode != PG_MODE_CHECK && only_filenode)
{
pg_log_error("option -f/--filenode can only be used with --check");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
/* Read the control file and check compatibility */
ControlFile = get_controlfile(DataDir, &crc_ok);
if (!crc_ok)
- {
- pg_log_error("pg_control CRC value is incorrect");
- exit(1);
- }
+ pg_fatal("pg_control CRC value is incorrect");
if (ControlFile->pg_control_version != PG_CONTROL_VERSION)
- {
- pg_log_error("cluster is not compatible with this version of pg_checksums");
- exit(1);
- }
+ pg_fatal("cluster is not compatible with this version of pg_checksums");
if (ControlFile->blcksz != BLCKSZ)
{
pg_log_error("database cluster is not compatible");
- fprintf(stderr, _("The database cluster was initialized with block size %u, but pg_checksums was compiled with block size %u.\n"),
- ControlFile->blcksz, BLCKSZ);
+ pg_log_error_detail("The database cluster was initialized with block size %u, but pg_checksums was compiled with block size %u.",
+ ControlFile->blcksz, BLCKSZ);
exit(1);
}
*/
if (ControlFile->state != DB_SHUTDOWNED &&
ControlFile->state != DB_SHUTDOWNED_IN_RECOVERY)
- {
- pg_log_error("cluster must be shut down");
- exit(1);
- }
+ pg_fatal("cluster must be shut down");
if (ControlFile->data_checksum_version == 0 &&
mode == PG_MODE_CHECK)
- {
- pg_log_error("data checksums are not enabled in cluster");
- exit(1);
- }
+ pg_fatal("data checksums are not enabled in cluster");
if (ControlFile->data_checksum_version == 0 &&
mode == PG_MODE_DISABLE)
- {
- pg_log_error("data checksums are already disabled in cluster");
- exit(1);
- }
+ pg_fatal("data checksums are already disabled in cluster");
if (ControlFile->data_checksum_version > 0 &&
mode == PG_MODE_ENABLE)
- {
- pg_log_error("data checksums are already enabled in cluster");
- exit(1);
- }
+ pg_fatal("data checksums are already enabled in cluster");
/* Operate on all files if checking or enabling checksums */
if (mode == PG_MODE_CHECK || mode == PG_MODE_ENABLE)
break;
default:
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
+ /* getopt_long already emitted a complaint */
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
}
{
pg_log_error("too many command-line arguments (first is \"%s\")",
argv[optind]);
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
if (DataDir == NULL)
{
pg_log_error("no data directory specified");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
strlcpy(full_path, progname, sizeof(full_path));
if (ret == -1)
- write_stderr(_("The program \"%s\" is needed by %s but was not found in the\n"
- "same directory as \"%s\".\n"
- "Check your installation.\n"),
+ write_stderr(_("program \"%s\" is needed by %s but was not found in the same directory as \"%s\"\n"),
target, progname, full_path);
else
- write_stderr(_("The program \"%s\" was found by \"%s\"\n"
- "but was not the same version as %s.\n"
- "Check your installation.\n"),
+ write_stderr(_("program \"%s\" was found by \"%s\" but was not the same version as %s\n"),
target, full_path, progname);
exit(1);
}
/* With partitions there can only be one parent */
if (tblinfo[i].numParents != 1)
- fatal("invalid number of parents %d for table \"%s\"",
- tblinfo[i].numParents,
- tblinfo[i].dobj.name);
+ pg_fatal("invalid number of parents %d for table \"%s\"",
+ tblinfo[i].numParents,
+ tblinfo[i].dobj.name);
attachinfo = (TableAttachInfo *) palloc(sizeof(TableAttachInfo));
attachinfo->dobj.objType = DO_TABLE_ATTACH;
parent = findTableByOid(inhinfo[i].inhparent);
if (parent == NULL)
- {
- pg_log_error("failed sanity check, parent OID %u of table \"%s\" (OID %u) not found",
- inhinfo[i].inhparent,
- self->dobj.name,
- oid);
- exit_nicely(1);
- }
+ pg_fatal("failed sanity check, parent OID %u of table \"%s\" (OID %u) not found",
+ inhinfo[i].inhparent,
+ self->dobj.name,
+ oid);
self->parents[j++] = parent;
}
}
if (j > 0)
{
if (argNum >= arraysize)
- {
- pg_log_error("could not parse numeric array \"%s\": too many numbers", str);
- exit_nicely(1);
- }
+ pg_fatal("could not parse numeric array \"%s\": too many numbers", str);
temp[j] = '\0';
array[argNum++] = atooid(temp);
j = 0;
{
if (!(isdigit((unsigned char) s) || s == '-') ||
j >= sizeof(temp) - 1)
- {
- pg_log_error("could not parse numeric array \"%s\": invalid character in number", str);
- exit_nicely(1);
- }
+ pg_fatal("could not parse numeric array \"%s\": invalid character in number", str);
temp[j++] = s;
}
}
*alg = COMPR_ALG_NONE;
else
{
- fatal("invalid compression code: %d", compression);
+ pg_fatal("invalid compression code: %d", compression);
*alg = COMPR_ALG_NONE; /* keep compiler quiet */
}
#ifndef HAVE_LIBZ
if (alg == COMPR_ALG_LIBZ)
- fatal("not built with zlib support");
+ pg_fatal("not built with zlib support");
#endif
cs = (CompressorState *) pg_malloc0(sizeof(CompressorState));
#ifdef HAVE_LIBZ
ReadDataFromArchiveZlib(AH, readF);
#else
- fatal("not built with zlib support");
+ pg_fatal("not built with zlib support");
#endif
}
}
#ifdef HAVE_LIBZ
WriteDataToArchiveZlib(AH, cs, data, dLen);
#else
- fatal("not built with zlib support");
+ pg_fatal("not built with zlib support");
#endif
break;
case COMPR_ALG_NONE:
cs->zlibOutSize = ZLIB_OUT_SIZE;
if (deflateInit(zp, level) != Z_OK)
- fatal("could not initialize compression library: %s",
- zp->msg);
+ pg_fatal("could not initialize compression library: %s",
+ zp->msg);
/* Just be paranoid - maybe End is called after Start, with no Write */
zp->next_out = (void *) cs->zlibOut;
DeflateCompressorZlib(AH, cs, true);
if (deflateEnd(zp) != Z_OK)
- fatal("could not close compression stream: %s", zp->msg);
+ pg_fatal("could not close compression stream: %s", zp->msg);
free(cs->zlibOut);
free(cs->zp);
{
res = deflate(zp, flush ? Z_FINISH : Z_NO_FLUSH);
if (res == Z_STREAM_ERROR)
- fatal("could not compress data: %s", zp->msg);
+ pg_fatal("could not compress data: %s", zp->msg);
if ((flush && (zp->avail_out < cs->zlibOutSize))
|| (zp->avail_out == 0)
|| (zp->avail_in != 0)
out = pg_malloc(ZLIB_OUT_SIZE + 1);
if (inflateInit(zp) != Z_OK)
- fatal("could not initialize compression library: %s",
- zp->msg);
+ pg_fatal("could not initialize compression library: %s",
+ zp->msg);
/* no minimal chunk size for zlib */
while ((cnt = readF(AH, &buf, &buflen)))
res = inflate(zp, 0);
if (res != Z_OK && res != Z_STREAM_END)
- fatal("could not uncompress data: %s", zp->msg);
+ pg_fatal("could not uncompress data: %s", zp->msg);
out[ZLIB_OUT_SIZE - zp->avail_out] = '\0';
ahwrite(out, 1, ZLIB_OUT_SIZE - zp->avail_out, AH);
zp->avail_out = ZLIB_OUT_SIZE;
res = inflate(zp, 0);
if (res != Z_OK && res != Z_STREAM_END)
- fatal("could not uncompress data: %s", zp->msg);
+ pg_fatal("could not uncompress data: %s", zp->msg);
out[ZLIB_OUT_SIZE - zp->avail_out] = '\0';
ahwrite(out, 1, ZLIB_OUT_SIZE - zp->avail_out, AH);
}
if (inflateEnd(zp) != Z_OK)
- fatal("could not close compression library: %s", zp->msg);
+ pg_fatal("could not close compression library: %s", zp->msg);
free(buf);
free(out);
fp = cfopen(fname, mode, compression);
free_keep_errno(fname);
#else
- fatal("not built with zlib support");
+ pg_fatal("not built with zlib support");
fp = NULL; /* keep compiler quiet */
#endif
}
fp = NULL;
}
#else
- fatal("not built with zlib support");
+ pg_fatal("not built with zlib support");
#endif
}
else
int errnum;
const char *errmsg = gzerror(fp->compressedfp, &errnum);
- fatal("could not read from input file: %s",
- errnum == Z_ERRNO ? strerror(errno) : errmsg);
+ pg_fatal("could not read from input file: %s",
+ errnum == Z_ERRNO ? strerror(errno) : errmsg);
}
}
else
if (ret == EOF)
{
if (!gzeof(fp->compressedfp))
- fatal("could not read from input file: %s", strerror(errno));
+ pg_fatal("could not read from input file: %s", strerror(errno));
else
- fatal("could not read from input file: end of file");
+ pg_fatal("could not read from input file: end of file");
}
}
else
../../common/exec.c ../../common/fe_memutils.c \
../../common/wait_error.c
GETTEXT_TRIGGERS = $(FRONTEND_COMMON_GETTEXT_TRIGGERS) \
- fatal simple_prompt \
+ simple_prompt \
ExecuteSqlCommand:3 warn_or_exit_horribly:2
GETTEXT_FLAGS = $(FRONTEND_COMMON_GETTEXT_FLAGS) \
- fatal:1:c-format \
warn_or_exit_horribly:2:c-format
/* Initialize socket access */
err = WSAStartup(MAKEWORD(2, 2), &wsaData);
if (err != 0)
- {
- pg_log_error("%s() failed: error code %d", "WSAStartup", err);
- exit_nicely(1);
- }
+ pg_fatal("%s() failed: error code %d", "WSAStartup", err);
parallel_init_done = true;
}
*
* Note that we don't expect to come here during normal exit (the workers
* should be long gone, and the ParallelState too). We're only here in a
- * fatal() situation, so intervening to cancel active commands is
+ * pg_fatal() situation, so intervening to cancel active commands is
* appropriate.
*/
static void
/* Create communication pipes for this worker */
if (pgpipe(pipeMW) < 0 || pgpipe(pipeWM) < 0)
- fatal("could not create communication channels: %m");
+ pg_fatal("could not create communication channels: %m");
/* leader's ends of the pipes */
slot->pipeRead = pipeWM[PIPE_READ];
else if (pid < 0)
{
/* fork failed */
- fatal("could not create worker process: %m");
+ pg_fatal("could not create worker process: %m");
}
/* In Leader after successful fork */
Assert(*te != NULL);
}
else
- fatal("unrecognized command received from leader: \"%s\"",
- msg);
+ pg_fatal("unrecognized command received from leader: \"%s\"",
+ msg);
}
/*
AH->public.n_errors += n_errors;
}
else
- fatal("invalid message received from worker: \"%s\"",
- msg);
+ pg_fatal("invalid message received from worker: \"%s\"",
+ msg);
return status;
}
res = PQexec(AH->connection, query->data);
if (!res || PQresultStatus(res) != PGRES_COMMAND_OK)
- fatal("could not obtain lock on relation \"%s\"\n"
- "This usually means that someone requested an ACCESS EXCLUSIVE lock "
- "on the table after the pg_dump parent process had gotten the "
- "initial ACCESS SHARE lock on the table.", qualId);
+ pg_fatal("could not obtain lock on relation \"%s\"\n"
+ "This usually means that someone requested an ACCESS EXCLUSIVE lock "
+ "on the table after the pg_dump parent process had gotten the "
+ "initial ACCESS SHARE lock on the table.", qualId);
PQclear(res);
destroyPQExpBuffer(query);
{
/* If do_wait is true, we must have detected EOF on some socket */
if (do_wait)
- fatal("a worker process died unexpectedly");
+ pg_fatal("a worker process died unexpectedly");
return false;
}
pstate->te[worker] = NULL;
}
else
- fatal("invalid message received from worker: \"%s\"",
- msg);
+ pg_fatal("invalid message received from worker: \"%s\"",
+ msg);
/* Free the string returned from getMessageFromWorker */
free(msg);
int len = strlen(str) + 1;
if (pipewrite(pipefd[PIPE_WRITE], str, len) != len)
- fatal("could not write to the communication channel: %m");
+ pg_fatal("could not write to the communication channel: %m");
}
/*
}
if (i < 0)
- fatal("%s() failed: %m", "select");
+ pg_fatal("%s() failed: %m", "select");
for (i = 0; i < pstate->numWorkers; i++)
{
if (pipewrite(pstate->parallelSlot[worker].pipeWrite, str, len) != len)
{
- fatal("could not write to the communication channel: %m");
+ pg_fatal("could not write to the communication channel: %m");
}
}
res = fclose(AH->OF);
if (res != 0)
- fatal("could not close output file: %m");
+ pg_fatal("could not close output file: %m");
}
/* Public */
/* ok no matter which section we were in */
break;
default:
- fatal("unexpected section code %d",
- (int) te->section);
+ pg_fatal("unexpected section code %d",
+ (int) te->section);
break;
}
}
{
/* We haven't got round to making this work for all archive formats */
if (AH->ClonePtr == NULL || AH->ReopenPtr == NULL)
- fatal("parallel restore is not supported with this archive file format");
+ pg_fatal("parallel restore is not supported with this archive file format");
/* Doesn't work if the archive represents dependencies as OIDs */
if (AH->version < K_VERS_1_8)
- fatal("parallel restore is not supported with archives made by pre-8.0 pg_dump");
+ pg_fatal("parallel restore is not supported with archives made by pre-8.0 pg_dump");
/*
* It's also not gonna work if we can't reopen the input file, so
for (te = AH->toc->next; te != AH->toc; te = te->next)
{
if (te->hadDumper && (te->reqs & REQ_DATA) != 0)
- fatal("cannot restore from compressed archive (compression not supported in this installation)");
+ pg_fatal("cannot restore from compressed archive (compression not supported in this installation)");
}
}
#endif
{
pg_log_info("connecting to database for restore");
if (AH->version < K_VERS_1_3)
- fatal("direct database connections are not supported in pre-1.3 archives");
+ pg_fatal("direct database connections are not supported in pre-1.3 archives");
/*
* We don't want to guess at whether the dump will successfully
ArchiveHandle *AH = (ArchiveHandle *) AHX;
if (!AH->currToc)
- fatal("internal error -- WriteData cannot be called outside the context of a DataDumper routine");
+ pg_fatal("internal error -- WriteData cannot be called outside the context of a DataDumper routine");
AH->WriteDataPtr(AH, data, dLen);
}
ArchiveHandle *AH = (ArchiveHandle *) AHX;
if (!AH->StartBlobPtr)
- fatal("large-object output not supported in chosen format");
+ pg_fatal("large-object output not supported in chosen format");
AH->StartBlobPtr(AH, AH->currToc, oid);
{
loOid = lo_create(AH->connection, oid);
if (loOid == 0 || loOid != oid)
- fatal("could not create large object %u: %s",
- oid, PQerrorMessage(AH->connection));
+ pg_fatal("could not create large object %u: %s",
+ oid, PQerrorMessage(AH->connection));
}
AH->loFd = lo_open(AH->connection, oid, INV_WRITE);
if (AH->loFd == -1)
- fatal("could not open large object %u: %s",
- oid, PQerrorMessage(AH->connection));
+ pg_fatal("could not open large object %u: %s",
+ oid, PQerrorMessage(AH->connection));
}
else
{
/* Setup the file */
fh = fopen(ropt->tocFile, PG_BINARY_R);
if (!fh)
- fatal("could not open TOC file \"%s\": %m", ropt->tocFile);
+ pg_fatal("could not open TOC file \"%s\": %m", ropt->tocFile);
initStringInfo(&linebuf);
/* Find TOC entry */
te = getTocEntryByDumpId(AH, id);
if (!te)
- fatal("could not find entry for ID %d",
- id);
+ pg_fatal("could not find entry for ID %d",
+ id);
/* Mark it wanted */
ropt->idWanted[id - 1] = true;
pg_free(linebuf.data);
if (fclose(fh) != 0)
- fatal("could not close TOC file: %m");
+ pg_fatal("could not close TOC file: %m");
}
/**********************
if (!AH->OF)
{
if (filename)
- fatal("could not open output file \"%s\": %m", filename);
+ pg_fatal("could not open output file \"%s\": %m", filename);
else
- fatal("could not open output file: %m");
+ pg_fatal("could not open output file: %m");
}
}
res = fclose(AH->OF);
if (res != 0)
- fatal("could not close output file: %m");
+ pg_fatal("could not close output file: %m");
AH->gzOut = savedContext.gzOut;
AH->OF = savedContext.OF;
case STAGE_INITIALIZING:
if (AH->stage != AH->lastErrorStage)
- pg_log_generic(PG_LOG_INFO, "while INITIALIZING:");
+ pg_log_info("while INITIALIZING:");
break;
case STAGE_PROCESSING:
if (AH->stage != AH->lastErrorStage)
- pg_log_generic(PG_LOG_INFO, "while PROCESSING TOC:");
+ pg_log_info("while PROCESSING TOC:");
break;
case STAGE_FINALIZING:
if (AH->stage != AH->lastErrorStage)
- pg_log_generic(PG_LOG_INFO, "while FINALIZING:");
+ pg_log_info("while FINALIZING:");
break;
}
if (AH->currentTE != NULL && AH->currentTE != AH->lastErrorTE)
{
- pg_log_generic(PG_LOG_INFO, "from TOC entry %d; %u %u %s %s %s",
- AH->currentTE->dumpId,
- AH->currentTE->catalogId.tableoid,
- AH->currentTE->catalogId.oid,
- AH->currentTE->desc ? AH->currentTE->desc : "(no desc)",
- AH->currentTE->tag ? AH->currentTE->tag : "(no tag)",
- AH->currentTE->owner ? AH->currentTE->owner : "(no owner)");
+ pg_log_info("from TOC entry %d; %u %u %s %s %s",
+ AH->currentTE->dumpId,
+ AH->currentTE->catalogId.tableoid,
+ AH->currentTE->catalogId.oid,
+ AH->currentTE->desc ? AH->currentTE->desc : "(no desc)",
+ AH->currentTE->tag ? AH->currentTE->tag : "(no tag)",
+ AH->currentTE->owner ? AH->currentTE->owner : "(no owner)");
}
AH->lastErrorStage = AH->stage;
AH->lastErrorTE = AH->currentTE;
va_start(ap, fmt);
- pg_log_generic_v(PG_LOG_ERROR, fmt, ap);
+ pg_log_generic_v(PG_LOG_ERROR, PG_LOG_PRIMARY, fmt, ap);
va_end(ap);
if (AH->public.exit_on_error)
{
/* this check is purely paranoia, maxDumpId should be correct */
if (te->dumpId <= 0 || te->dumpId > maxDumpId)
- fatal("bad dumpId");
+ pg_fatal("bad dumpId");
/* tocsByDumpId indexes all TOCs by their dump ID */
AH->tocsByDumpId[te->dumpId] = te;
* item's dump ID, so there should be a place for it in the array.
*/
if (tableId <= 0 || tableId > maxDumpId)
- fatal("bad table dumpId for TABLE DATA item");
+ pg_fatal("bad table dumpId for TABLE DATA item");
AH->tableDataId[tableId] = te->dumpId;
}
break;
default:
- fatal("unexpected data offset flag %d", offsetFlg);
+ pg_fatal("unexpected data offset flag %d", offsetFlg);
}
/*
else
{
if (AH->ReadBytePtr(AH) != 0)
- fatal("file offset in dump file is too large");
+ pg_fatal("file offset in dump file is too large");
}
}
char buf[MAXPGPATH];
if (snprintf(buf, MAXPGPATH, "%s/toc.dat", AH->fSpec) >= MAXPGPATH)
- fatal("directory name too long: \"%s\"",
- AH->fSpec);
+ pg_fatal("directory name too long: \"%s\"",
+ AH->fSpec);
if (stat(buf, &st) == 0 && S_ISREG(st.st_mode))
{
AH->format = archDirectory;
#ifdef HAVE_LIBZ
if (snprintf(buf, MAXPGPATH, "%s/toc.dat.gz", AH->fSpec) >= MAXPGPATH)
- fatal("directory name too long: \"%s\"",
- AH->fSpec);
+ pg_fatal("directory name too long: \"%s\"",
+ AH->fSpec);
if (stat(buf, &st) == 0 && S_ISREG(st.st_mode))
{
AH->format = archDirectory;
return AH->format;
}
#endif
- fatal("directory \"%s\" does not appear to be a valid archive (\"toc.dat\" does not exist)",
- AH->fSpec);
+ pg_fatal("directory \"%s\" does not appear to be a valid archive (\"toc.dat\" does not exist)",
+ AH->fSpec);
fh = NULL; /* keep compiler quiet */
}
else
{
fh = fopen(AH->fSpec, PG_BINARY_R);
if (!fh)
- fatal("could not open input file \"%s\": %m", AH->fSpec);
+ pg_fatal("could not open input file \"%s\": %m", AH->fSpec);
}
}
else
{
fh = stdin;
if (!fh)
- fatal("could not open input file: %m");
+ pg_fatal("could not open input file: %m");
}
if ((cnt = fread(sig, 1, 5, fh)) != 5)
{
if (ferror(fh))
- fatal("could not read input file: %m");
+ pg_fatal("could not read input file: %m");
else
- fatal("input file is too short (read %lu, expected 5)",
- (unsigned long) cnt);
+ pg_fatal("input file is too short (read %lu, expected 5)",
+ (unsigned long) cnt);
}
/* Save it, just in case we need it later */
* looks like it's probably a text format dump. so suggest they
* try psql
*/
- fatal("input file appears to be a text format dump. Please use psql.");
+ pg_fatal("input file appears to be a text format dump. Please use psql.");
}
if (AH->lookaheadLen != 512)
{
if (feof(fh))
- fatal("input file does not appear to be a valid archive (too short?)");
+ pg_fatal("input file does not appear to be a valid archive (too short?)");
else
READ_ERROR_EXIT(fh);
}
if (!isValidTarHeader(AH->lookahead))
- fatal("input file does not appear to be a valid archive");
+ pg_fatal("input file does not appear to be a valid archive");
AH->format = archTar;
}
if (wantClose)
{
if (fclose(fh) != 0)
- fatal("could not close input file: %m");
+ pg_fatal("could not close input file: %m");
/* Forget lookahead, since we'll re-read header after re-opening */
AH->readHeader = 0;
AH->lookaheadLen = 0;
break;
default:
- fatal("unrecognized file format \"%d\"", fmt);
+ pg_fatal("unrecognized file format \"%d\"", fmt);
}
return AH;
te->dumpId, te->desc, te->tag);
if (status != 0)
- fatal("worker process failed: exit code %d",
- status);
+ pg_fatal("worker process failed: exit code %d",
+ status);
}
/* Sanity check */
if (te->dumpId <= 0)
- fatal("entry ID %d out of range -- perhaps a corrupt TOC",
- te->dumpId);
+ pg_fatal("entry ID %d out of range -- perhaps a corrupt TOC",
+ te->dumpId);
te->hadDumper = ReadInt(AH);
*ptr2 = '\0';
encoding = pg_char_to_encoding(ptr1);
if (encoding < 0)
- fatal("unrecognized encoding \"%s\"",
- ptr1);
+ pg_fatal("unrecognized encoding \"%s\"",
+ ptr1);
AH->public.encoding = encoding;
}
else
- fatal("invalid ENCODING item: %s",
- te->defn);
+ pg_fatal("invalid ENCODING item: %s",
+ te->defn);
free(defn);
}
else if (ptr1 && strncmp(ptr1, "'off'", 5) == 0)
AH->public.std_strings = false;
else
- fatal("invalid STDSTRINGS item: %s",
- te->defn);
+ pg_fatal("invalid STDSTRINGS item: %s",
+ te->defn);
}
static void
{
missing_name = simple_string_list_not_touched(&ropt->schemaNames);
if (missing_name != NULL)
- fatal("schema \"%s\" not found", missing_name);
+ pg_fatal("schema \"%s\" not found", missing_name);
}
if (ropt->tableNames.head != NULL)
{
missing_name = simple_string_list_not_touched(&ropt->tableNames);
if (missing_name != NULL)
- fatal("table \"%s\" not found", missing_name);
+ pg_fatal("table \"%s\" not found", missing_name);
}
if (ropt->indexNames.head != NULL)
{
missing_name = simple_string_list_not_touched(&ropt->indexNames);
if (missing_name != NULL)
- fatal("index \"%s\" not found", missing_name);
+ pg_fatal("index \"%s\" not found", missing_name);
}
if (ropt->functionNames.head != NULL)
{
missing_name = simple_string_list_not_touched(&ropt->functionNames);
if (missing_name != NULL)
- fatal("function \"%s\" not found", missing_name);
+ pg_fatal("function \"%s\" not found", missing_name);
}
if (ropt->triggerNames.head != NULL)
{
missing_name = simple_string_list_not_touched(&ropt->triggerNames);
if (missing_name != NULL)
- fatal("trigger \"%s\" not found", missing_name);
+ pg_fatal("trigger \"%s\" not found", missing_name);
}
}
if (!res || PQresultStatus(res) != PGRES_COMMAND_OK)
/* NOT warn_or_exit_horribly... use -O instead to skip this. */
- fatal("could not set session user to \"%s\": %s",
- user, PQerrorMessage(AH->connection));
+ pg_fatal("could not set session user to \"%s\": %s",
+ user, PQerrorMessage(AH->connection));
PQclear(res);
}
AH->ReadBufPtr(AH, tmpMag, 5);
if (strncmp(tmpMag, "PGDMP", 5) != 0)
- fatal("did not find magic string in file header");
+ pg_fatal("did not find magic string in file header");
}
vmaj = AH->ReadBytePtr(AH);
AH->version = MAKE_ARCHIVE_VERSION(vmaj, vmin, vrev);
if (AH->version < K_VERS_1_0 || AH->version > K_VERS_MAX)
- fatal("unsupported version (%d.%d) in file header",
- vmaj, vmin);
+ pg_fatal("unsupported version (%d.%d) in file header",
+ vmaj, vmin);
AH->intSize = AH->ReadBytePtr(AH);
if (AH->intSize > 32)
- fatal("sanity check on integer size (%lu) failed",
- (unsigned long) AH->intSize);
+ pg_fatal("sanity check on integer size (%lu) failed",
+ (unsigned long) AH->intSize);
if (AH->intSize > sizeof(int))
pg_log_warning("archive was made on a machine with larger integers, some operations might fail");
fmt = AH->ReadBytePtr(AH);
if (AH->format != fmt)
- fatal("expected format (%d) differs from format found in file (%d)",
- AH->format, fmt);
+ pg_fatal("expected format (%d) differs from format found in file (%d)",
+ AH->format, fmt);
if (AH->version >= K_VERS_1_2)
{
else if (status == WORKER_IGNORED_ERRORS)
AH->public.n_errors++;
else if (status != 0)
- fatal("worker process failed: exit code %d",
- status);
+ pg_fatal("worker process failed: exit code %d",
+ status);
reduce_dependencies(AH, te, ready_list);
}
#define READ_ERROR_EXIT(fd) \
do { \
if (feof(fd)) \
- fatal("could not read from input file: end of file"); \
+ pg_fatal("could not read from input file: end of file"); \
else \
- fatal("could not read from input file: %m"); \
+ pg_fatal("could not read from input file: %m"); \
} while (0)
#define WRITE_ERROR_EXIT \
do { \
- fatal("could not write to output file: %m"); \
+ pg_fatal("could not write to output file: %m"); \
} while (0)
typedef enum T_Action
{
AH->FH = fopen(AH->fSpec, PG_BINARY_W);
if (!AH->FH)
- fatal("could not open output file \"%s\": %m", AH->fSpec);
+ pg_fatal("could not open output file \"%s\": %m", AH->fSpec);
}
else
{
AH->FH = stdout;
if (!AH->FH)
- fatal("could not open output file: %m");
+ pg_fatal("could not open output file: %m");
}
ctx->hasSeek = checkSeek(AH->FH);
{
AH->FH = fopen(AH->fSpec, PG_BINARY_R);
if (!AH->FH)
- fatal("could not open input file \"%s\": %m", AH->fSpec);
+ pg_fatal("could not open input file \"%s\": %m", AH->fSpec);
}
else
{
AH->FH = stdin;
if (!AH->FH)
- fatal("could not open input file: %m");
+ pg_fatal("could not open input file: %m");
}
ctx->hasSeek = checkSeek(AH->FH);
lclContext *ctx = (lclContext *) AH->formatData;
if (oid == 0)
- fatal("invalid OID for large object");
+ pg_fatal("invalid OID for large object");
WriteInt(AH, oid);
if (ctx->hasSeek)
{
if (fseeko(AH->FH, ctx->lastFilePos, SEEK_SET) != 0)
- fatal("error during file seek: %m");
+ pg_fatal("error during file seek: %m");
}
for (;;)
break;
default: /* Always have a default */
- fatal("unrecognized data block type (%d) while searching archive",
- blkType);
+ pg_fatal("unrecognized data block type (%d) while searching archive",
+ blkType);
break;
}
}
{
/* We can just seek to the place we need to be. */
if (fseeko(AH->FH, tctx->dataPos, SEEK_SET) != 0)
- fatal("error during file seek: %m");
+ pg_fatal("error during file seek: %m");
_readBlockHeader(AH, &blkType, &id);
}
if (blkType == EOF)
{
if (!ctx->hasSeek)
- fatal("could not find block ID %d in archive -- "
- "possibly due to out-of-order restore request, "
- "which cannot be handled due to non-seekable input file",
- te->dumpId);
+ pg_fatal("could not find block ID %d in archive -- "
+ "possibly due to out-of-order restore request, "
+ "which cannot be handled due to non-seekable input file",
+ te->dumpId);
else
- fatal("could not find block ID %d in archive -- "
- "possibly corrupt archive",
- te->dumpId);
+ pg_fatal("could not find block ID %d in archive -- "
+ "possibly corrupt archive",
+ te->dumpId);
}
/* Are we sane? */
if (id != te->dumpId)
- fatal("found unexpected block ID (%d) when reading data -- expected %d",
- id, te->dumpId);
+ pg_fatal("found unexpected block ID (%d) when reading data -- expected %d",
+ id, te->dumpId);
switch (blkType)
{
break;
default: /* Always have a default */
- fatal("unrecognized data block type %d while restoring archive",
- blkType);
+ pg_fatal("unrecognized data block type %d while restoring archive",
+ blkType);
break;
}
if (ctx->hasSeek)
{
if (fseeko(AH->FH, blkLen, SEEK_CUR) != 0)
- fatal("error during file seek: %m");
+ pg_fatal("error during file seek: %m");
}
else
{
if (fread(buf, 1, blkLen, AH->FH) != blkLen)
{
if (feof(AH->FH))
- fatal("could not read from input file: end of file");
+ pg_fatal("could not read from input file: end of file");
else
- fatal("could not read from input file: %m");
+ pg_fatal("could not read from input file: %m");
}
}
/* Remember TOC's seek position for use below */
tpos = ftello(AH->FH);
if (tpos < 0 && ctx->hasSeek)
- fatal("could not determine seek position in archive file: %m");
+ pg_fatal("could not determine seek position in archive file: %m");
WriteToc(AH);
WriteDataChunks(AH, NULL);
}
if (fclose(AH->FH) != 0)
- fatal("could not close archive file: %m");
+ pg_fatal("could not close archive file: %m");
/* Sync the output file if one is defined */
if (AH->dosync && AH->mode == archModeWrite && AH->fSpec)
pgoff_t tpos;
if (AH->mode == archModeWrite)
- fatal("can only reopen input archives");
+ pg_fatal("can only reopen input archives");
/*
* These two cases are user-facing errors since they represent unsupported
* (but not invalid) use-cases. Word the error messages appropriately.
*/
if (AH->fSpec == NULL || strcmp(AH->fSpec, "") == 0)
- fatal("parallel restore from standard input is not supported");
+ pg_fatal("parallel restore from standard input is not supported");
if (!ctx->hasSeek)
- fatal("parallel restore from non-seekable file is not supported");
+ pg_fatal("parallel restore from non-seekable file is not supported");
tpos = ftello(AH->FH);
if (tpos < 0)
- fatal("could not determine seek position in archive file: %m");
+ pg_fatal("could not determine seek position in archive file: %m");
#ifndef WIN32
if (fclose(AH->FH) != 0)
- fatal("could not close archive file: %m");
+ pg_fatal("could not close archive file: %m");
#endif
AH->FH = fopen(AH->fSpec, PG_BINARY_R);
if (!AH->FH)
- fatal("could not open input file \"%s\": %m", AH->fSpec);
+ pg_fatal("could not open input file \"%s\": %m", AH->fSpec);
if (fseeko(AH->FH, tpos, SEEK_SET) != 0)
- fatal("could not set seek position in archive file: %m");
+ pg_fatal("could not set seek position in archive file: %m");
}
/*
pgoff_t endpos;
if (fseeko(AH->FH, 0, SEEK_END) != 0)
- fatal("error during file seek: %m");
+ pg_fatal("error during file seek: %m");
endpos = ftello(AH->FH);
if (endpos > prev_tctx->dataPos)
prev_te->dataLength = endpos - prev_tctx->dataPos;
/* sanity check, shouldn't happen */
if (ctx->cs != NULL)
- fatal("compressor active");
+ pg_fatal("compressor active");
/*
* We intentionally do not clone TOC-entry-local state: it's useful to
{
/* Not expected if we found we can seek. */
if (ctx->hasSeek)
- fatal("could not determine seek position in archive file: %m");
+ pg_fatal("could not determine seek position in archive file: %m");
}
return pos;
}
int byt;
/*
- * Note: if we are at EOF with a pre-1.3 input file, we'll fatal() inside
+ * Note: if we are at EOF with a pre-1.3 input file, we'll pg_fatal() inside
* ReadInt rather than returning EOF. It doesn't seem worth jumping
* through hoops to deal with that case better, because no such files are
* likely to exist in the wild: only some 7.1 development versions of
remoteversion_str = PQparameterStatus(AH->connection, "server_version");
remoteversion = PQserverVersion(AH->connection);
if (remoteversion == 0 || !remoteversion_str)
- fatal("could not get server_version from libpq");
+ pg_fatal("could not get server_version from libpq");
AH->public.remoteVersionStr = pg_strdup(remoteversion_str);
AH->public.remoteVersion = remoteversion;
&& (remoteversion < AH->public.minRemoteVersion ||
remoteversion > AH->public.maxRemoteVersion))
{
- pg_log_error("server version: %s; %s version: %s",
- remoteversion_str, progname, PG_VERSION);
- fatal("aborting because of server version mismatch");
+ pg_log_error("aborting because of server version mismatch");
+ pg_log_error_detail("server version: %s; %s version: %s",
+ remoteversion_str, progname, PG_VERSION);
+ exit(1);
}
/*
bool new_pass;
if (AH->connection)
- fatal("already connected to a database");
+ pg_fatal("already connected to a database");
/* Never prompt for a password during a reconnection */
prompt_password = isReconnect ? TRI_NO : cparams->promptPassword;
AH->connection = PQconnectdbParams(keywords, values, true);
if (!AH->connection)
- fatal("could not connect to database");
+ pg_fatal("could not connect to database");
if (PQstatus(AH->connection) == CONNECTION_BAD &&
PQconnectionNeedsPassword(AH->connection) &&
if (PQstatus(AH->connection) == CONNECTION_BAD)
{
if (isReconnect)
- fatal("reconnection failed: %s",
- PQerrorMessage(AH->connection));
+ pg_fatal("reconnection failed: %s",
+ PQerrorMessage(AH->connection));
else
- fatal("%s",
- PQerrorMessage(AH->connection));
+ pg_fatal("%s",
+ PQerrorMessage(AH->connection));
}
/* Start strict; later phases may override this. */
/*
* If we have an active query, send a cancel before closing, ignoring
* any errors. This is of no use for a normal exit, but might be
- * helpful during fatal().
+ * helpful during pg_fatal().
*/
if (PQtransactionStatus(AH->connection) == PQTRANS_ACTIVE)
(void) PQcancel(AH->connCancel, errbuf, sizeof(errbuf));
static void
notice_processor(void *arg, const char *message)
{
- pg_log_generic(PG_LOG_INFO, "%s", message);
+ pg_log_info("%s", message);
}
-/* Like fatal(), but with a complaint about a particular query. */
+/* Like pg_fatal(), but with a complaint about a particular query. */
static void
die_on_query_failure(ArchiveHandle *AH, const char *query)
{
pg_log_error("query failed: %s",
PQerrorMessage(AH->connection));
- fatal("query was: %s", query);
+ pg_log_error_detail("Query was: %s", query);
+ exit(1);
}
void
/* Expecting a single result only */
ntups = PQntuples(res);
if (ntups != 1)
- fatal(ngettext("query returned %d row instead of one: %s",
- "query returned %d rows instead of one: %s",
- ntups),
- ntups, query);
+ pg_fatal(ngettext("query returned %d row instead of one: %s",
+ "query returned %d rows instead of one: %s",
+ ntups),
+ ntups, query);
return res;
}
*/
if (AH->pgCopyIn &&
PQputCopyData(AH->connection, buf, bufLen) <= 0)
- fatal("error returned by PQputCopyData: %s",
- PQerrorMessage(AH->connection));
+ pg_fatal("error returned by PQputCopyData: %s",
+ PQerrorMessage(AH->connection));
}
else if (AH->outputKind == OUTPUT_OTHERDATA)
{
PGresult *res;
if (PQputCopyEnd(AH->connection, NULL) <= 0)
- fatal("error returned by PQputCopyEnd: %s",
- PQerrorMessage(AH->connection));
+ pg_fatal("error returned by PQputCopyEnd: %s",
+ PQerrorMessage(AH->connection));
/* Check command status and return to normal libpq state */
res = PQgetResult(AH->connection);
*/
if (!AH->fSpec || strcmp(AH->fSpec, "") == 0)
- fatal("no output directory specified");
+ pg_fatal("no output directory specified");
ctx->directory = AH->fSpec;
}
if (errno)
- fatal("could not read directory \"%s\": %m",
- ctx->directory);
+ pg_fatal("could not read directory \"%s\": %m",
+ ctx->directory);
if (closedir(dir))
- fatal("could not close directory \"%s\": %m",
- ctx->directory);
+ pg_fatal("could not close directory \"%s\": %m",
+ ctx->directory);
}
}
if (!is_empty && mkdir(ctx->directory, 0700) < 0)
- fatal("could not create directory \"%s\": %m",
- ctx->directory);
+ pg_fatal("could not create directory \"%s\": %m",
+ ctx->directory);
}
else
{ /* Read Mode */
tocFH = cfopen_read(fname, PG_BINARY_R);
if (tocFH == NULL)
- fatal("could not open input file \"%s\": %m", fname);
+ pg_fatal("could not open input file \"%s\": %m", fname);
ctx->dataFH = tocFH;
/* Nothing else in the file, so close it again... */
if (cfclose(tocFH) != 0)
- fatal("could not close TOC file: %m");
+ pg_fatal("could not close TOC file: %m");
ctx->dataFH = NULL;
}
}
ctx->dataFH = cfopen_write(fname, PG_BINARY_W, AH->compression);
if (ctx->dataFH == NULL)
- fatal("could not open output file \"%s\": %m", fname);
+ pg_fatal("could not open output file \"%s\": %m", fname);
}
/*
/* if write didn't set errno, assume problem is no disk space */
if (errno == 0)
errno = ENOSPC;
- fatal("could not write to output file: %s",
- get_cfp_error(ctx->dataFH));
+ pg_fatal("could not write to output file: %s",
+ get_cfp_error(ctx->dataFH));
}
}
/* Close the file */
if (cfclose(ctx->dataFH) != 0)
- fatal("could not close data file: %m");
+ pg_fatal("could not close data file: %m");
ctx->dataFH = NULL;
}
cfp = cfopen_read(filename, PG_BINARY_R);
if (!cfp)
- fatal("could not open input file \"%s\": %m", filename);
+ pg_fatal("could not open input file \"%s\": %m", filename);
buf = pg_malloc(ZLIB_OUT_SIZE);
buflen = ZLIB_OUT_SIZE;
free(buf);
if (cfclose(cfp) != 0)
- fatal("could not close data file \"%s\": %m", filename);
+ pg_fatal("could not close data file \"%s\": %m", filename);
}
/*
ctx->blobsTocFH = cfopen_read(tocfname, PG_BINARY_R);
if (ctx->blobsTocFH == NULL)
- fatal("could not open large object TOC file \"%s\" for input: %m",
- tocfname);
+ pg_fatal("could not open large object TOC file \"%s\" for input: %m",
+ tocfname);
/* Read the blobs TOC file line-by-line, and process each blob */
while ((cfgets(ctx->blobsTocFH, line, MAXPGPATH)) != NULL)
/* Can't overflow because line and blobfname are the same length */
if (sscanf(line, "%u %" CppAsString2(MAXPGPATH) "s\n", &oid, blobfname) != 2)
- fatal("invalid line in large object TOC file \"%s\": \"%s\"",
- tocfname, line);
+ pg_fatal("invalid line in large object TOC file \"%s\": \"%s\"",
+ tocfname, line);
StartRestoreBlob(AH, oid, AH->public.ropt->dropSchema);
snprintf(path, MAXPGPATH, "%s/%s", ctx->directory, blobfname);
EndRestoreBlob(AH, oid);
}
if (!cfeof(ctx->blobsTocFH))
- fatal("error reading large object TOC file \"%s\"",
- tocfname);
+ pg_fatal("error reading large object TOC file \"%s\"",
+ tocfname);
if (cfclose(ctx->blobsTocFH) != 0)
- fatal("could not close large object TOC file \"%s\": %m",
- tocfname);
+ pg_fatal("could not close large object TOC file \"%s\": %m",
+ tocfname);
ctx->blobsTocFH = NULL;
/* if write didn't set errno, assume problem is no disk space */
if (errno == 0)
errno = ENOSPC;
- fatal("could not write to output file: %s",
- get_cfp_error(ctx->dataFH));
+ pg_fatal("could not write to output file: %s",
+ get_cfp_error(ctx->dataFH));
}
return 1;
/* if write didn't set errno, assume problem is no disk space */
if (errno == 0)
errno = ENOSPC;
- fatal("could not write to output file: %s",
- get_cfp_error(ctx->dataFH));
+ pg_fatal("could not write to output file: %s",
+ get_cfp_error(ctx->dataFH));
}
}
* exit on short reads.
*/
if (cfread(buf, len, ctx->dataFH) != len)
- fatal("could not read from input file: end of file");
+ pg_fatal("could not read from input file: end of file");
}
/*
/* The TOC is always created uncompressed */
tocFH = cfopen_write(fname, PG_BINARY_W, 0);
if (tocFH == NULL)
- fatal("could not open output file \"%s\": %m", fname);
+ pg_fatal("could not open output file \"%s\": %m", fname);
ctx->dataFH = tocFH;
/*
AH->format = archDirectory;
WriteToc(AH);
if (cfclose(tocFH) != 0)
- fatal("could not close TOC file: %m");
+ pg_fatal("could not close TOC file: %m");
WriteDataChunks(AH, ctx->pstate);
ParallelBackupEnd(AH, ctx->pstate);
/* The blob TOC file is never compressed */
ctx->blobsTocFH = cfopen_write(fname, "ab", 0);
if (ctx->blobsTocFH == NULL)
- fatal("could not open output file \"%s\": %m", fname);
+ pg_fatal("could not open output file \"%s\": %m", fname);
}
/*
ctx->dataFH = cfopen_write(fname, PG_BINARY_W, AH->compression);
if (ctx->dataFH == NULL)
- fatal("could not open output file \"%s\": %m", fname);
+ pg_fatal("could not open output file \"%s\": %m", fname);
}
/*
/* Close the BLOB data file itself */
if (cfclose(ctx->dataFH) != 0)
- fatal("could not close blob data file: %m");
+ pg_fatal("could not close blob data file: %m");
ctx->dataFH = NULL;
/* register the blob in blobs.toc */
len = snprintf(buf, sizeof(buf), "%u blob_%u.dat\n", oid, oid);
if (cfwrite(buf, len, ctx->blobsTocFH) != len)
- fatal("could not write to blobs TOC file");
+ pg_fatal("could not write to blobs TOC file");
}
/*
lclContext *ctx = (lclContext *) AH->formatData;
if (cfclose(ctx->blobsTocFH) != 0)
- fatal("could not close blobs TOC file: %m");
+ pg_fatal("could not close blobs TOC file: %m");
ctx->blobsTocFH = NULL;
}
dname = ctx->directory;
if (strlen(dname) + 1 + strlen(relativeFilename) + 1 > MAXPGPATH)
- fatal("file name too long: \"%s\"", dname);
+ pg_fatal("file name too long: \"%s\"", dname);
strcpy(buf, dname);
strcat(buf, "/");
* Now prevent reading...
*/
if (AH->mode == archModeRead)
- fatal("this format cannot be read");
+ pg_fatal("this format cannot be read");
}
/*
bool old_blob_style = (AH->version < K_VERS_1_12);
if (oid == 0)
- fatal("invalid OID for large object");
+ pg_fatal("invalid OID for large object");
/* With an old archive we must do drop and create logic here */
if (old_blob_style && AH->public.ropt->dropSchema)
{
ctx->tarFH = fopen(AH->fSpec, PG_BINARY_W);
if (ctx->tarFH == NULL)
- fatal("could not open TOC file \"%s\" for output: %m",
- AH->fSpec);
+ pg_fatal("could not open TOC file \"%s\" for output: %m",
+ AH->fSpec);
}
else
{
ctx->tarFH = stdout;
if (ctx->tarFH == NULL)
- fatal("could not open TOC file for output: %m");
+ pg_fatal("could not open TOC file for output: %m");
}
ctx->tarFHpos = 0;
* positioning.
*/
if (AH->compression != 0)
- fatal("compression is not supported by tar archive format");
+ pg_fatal("compression is not supported by tar archive format");
}
else
{ /* Read Mode */
{
ctx->tarFH = fopen(AH->fSpec, PG_BINARY_R);
if (ctx->tarFH == NULL)
- fatal("could not open TOC file \"%s\" for input: %m",
- AH->fSpec);
+ pg_fatal("could not open TOC file \"%s\" for input: %m",
+ AH->fSpec);
}
else
{
ctx->tarFH = stdin;
if (ctx->tarFH == NULL)
- fatal("could not open TOC file for input: %m");
+ pg_fatal("could not open TOC file for input: %m");
}
/*
* Couldn't find the requested file. Future: do SEEK(0) and
* retry.
*/
- fatal("could not find file \"%s\" in archive", filename);
+ pg_fatal("could not find file \"%s\" in archive", filename);
}
else
{
if (AH->compression == 0)
tm->nFH = ctx->tarFH;
else
- fatal("compression is not supported by tar archive format");
+ pg_fatal("compression is not supported by tar archive format");
}
else
{
#endif
if (tm->tmpFH == NULL)
- fatal("could not generate temporary file name: %m");
+ pg_fatal("could not generate temporary file name: %m");
umask(old_umask);
if (AH->compression == 0)
tm->nFH = tm->tmpFH;
else
- fatal("compression is not supported by tar archive format");
+ pg_fatal("compression is not supported by tar archive format");
tm->AH = AH;
tm->targetFile = pg_strdup(filename);
tarClose(ArchiveHandle *AH, TAR_MEMBER *th)
{
if (AH->compression != 0)
- fatal("compression is not supported by tar archive format");
+ pg_fatal("compression is not supported by tar archive format");
if (th->mode == 'w')
_tarAddFile(AH, th); /* This will close the temp file */
pos1 = (int) strlen(te->copyStmt) - 13;
if (pos1 < 6 || strncmp(te->copyStmt, "COPY ", 5) != 0 ||
strcmp(te->copyStmt + pos1, " FROM stdin;\n") != 0)
- fatal("unexpected COPY statement syntax: \"%s\"",
- te->copyStmt);
+ pg_fatal("unexpected COPY statement syntax: \"%s\"",
+ te->copyStmt);
/* Emit all but the FROM part ... */
ahwrite(te->copyStmt, 1, pos1, AH);
res = tarRead(&c, 1, ctx->FH);
if (res != 1)
/* We already would have exited for errors on reads, must be EOF */
- fatal("could not read from input file: end of file");
+ pg_fatal("could not read from input file: end of file");
ctx->filePos += 1;
return c;
}
if (tarRead(buf, len, ctx->FH) != len)
/* We already would have exited for errors on reads, must be EOF */
- fatal("could not read from input file: end of file");
+ pg_fatal("could not read from input file: end of file");
ctx->filePos += len;
}
char fname[255];
if (oid == 0)
- fatal("invalid OID for large object (%u)", oid);
+ pg_fatal("invalid OID for large object (%u)", oid);
if (AH->compression != 0)
- fatal("compression is not supported by tar archive format");
+ pg_fatal("compression is not supported by tar archive format");
sprintf(fname, "blob_%u.dat", oid);
* Find file len & go back to start.
*/
if (fseeko(tmp, 0, SEEK_END) != 0)
- fatal("error during file seek: %m");
+ pg_fatal("error during file seek: %m");
th->fileLen = ftello(tmp);
if (th->fileLen < 0)
- fatal("could not determine seek position in archive file: %m");
+ pg_fatal("could not determine seek position in archive file: %m");
if (fseeko(tmp, 0, SEEK_SET) != 0)
- fatal("error during file seek: %m");
+ pg_fatal("error during file seek: %m");
_tarWriteHeader(th);
READ_ERROR_EXIT(tmp);
if (fclose(tmp) != 0) /* This *should* delete it... */
- fatal("could not close temporary file: %m");
+ pg_fatal("could not close temporary file: %m");
if (len != th->fileLen)
- fatal("actual file length (%lld) does not match expected (%lld)",
- (long long) len, (long long) th->fileLen);
+ pg_fatal("actual file length (%lld) does not match expected (%lld)",
+ (long long) len, (long long) th->fileLen);
pad = tarPaddingBytesRequired(len);
for (i = 0; i < pad; i++)
if (!_tarGetHeader(AH, th))
{
if (filename)
- fatal("could not find header for file \"%s\" in tar archive", filename);
+ pg_fatal("could not find header for file \"%s\" in tar archive", filename);
else
{
/*
id = atoi(th->targetFile);
if ((TocIDRequired(AH, id) & REQ_DATA) != 0)
- fatal("restoring data out of order is not supported in this archive format: "
- "\"%s\" is required, but comes before \"%s\" in the archive file.",
- th->targetFile, filename);
+ pg_fatal("restoring data out of order is not supported in this archive format: "
+ "\"%s\" is required, but comes before \"%s\" in the archive file.",
+ th->targetFile, filename);
/* Header doesn't match, so read to next header */
len = th->fileLen;
_tarReadRaw(AH, &header[0], TAR_BLOCK_SIZE, NULL, ctx->tarFH);
if (!_tarGetHeader(AH, th))
- fatal("could not find header for file \"%s\" in tar archive", filename);
+ pg_fatal("could not find header for file \"%s\" in tar archive", filename);
}
ctx->tarNextMember = ctx->tarFHpos + th->fileLen
return 0;
if (len != TAR_BLOCK_SIZE)
- fatal(ngettext("incomplete tar header found (%lu byte)",
- "incomplete tar header found (%lu bytes)",
- len),
- (unsigned long) len);
+ pg_fatal(ngettext("incomplete tar header found (%lu byte)",
+ "incomplete tar header found (%lu bytes)",
+ len),
+ (unsigned long) len);
/* Calc checksum */
chk = tarChecksum(h);
tag, (unsigned long long) hPos, (unsigned long long) len, sum);
if (chk != sum)
- fatal("corrupt tar header found in %s (expected %d, computed %d) file position %llu",
- tag, sum, chk, (unsigned long long) ftello(ctx->tarFH));
+ pg_fatal("corrupt tar header found in %s (expected %d, computed %d) file position %llu",
+ tag, sum, chk, (unsigned long long) ftello(ctx->tarFH));
th->targetFile = pg_strdup(tag);
th->fileLen = len;
else
{
pg_log_error("unrecognized section name: \"%s\"", arg);
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit_nicely(1);
}
}
on_exit_nicely(on_exit_nicely_callback function, void *arg)
{
if (on_exit_nicely_index >= MAX_ON_EXIT_NICELY)
- {
- pg_log_fatal("out of on_exit_nicely slots");
- exit_nicely(1);
- }
+ pg_fatal("out of on_exit_nicely slots");
on_exit_nicely_list[on_exit_nicely_index].function = function;
on_exit_nicely_list[on_exit_nicely_index].arg = arg;
on_exit_nicely_index++;
extern void on_exit_nicely(on_exit_nicely_callback function, void *arg);
extern void exit_nicely(int code) pg_attribute_noreturn();
-#define fatal(...) do { pg_log_error(__VA_ARGS__); exit_nicely(1); } while(0)
+/* In pg_dump, we modify pg_fatal to call exit_nicely instead of exit */
+#undef pg_fatal
+#define pg_fatal(...) do { \
+ if (likely(__pg_log_level <= PG_LOG_ERROR)) \
+ pg_log_generic(PG_LOG_ERROR, PG_LOG_PRIMARY, __VA_ARGS__); \
+ exit_nicely(1); \
+ } while(0)
#endif /* PG_BACKUP_UTILS_H */
break;
default:
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
+ /* getopt_long already emitted a complaint */
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit_nicely(1);
}
}
{
pg_log_error("too many command-line arguments (first is \"%s\")",
argv[optind]);
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit_nicely(1);
}
dopt.sequence_data = 1;
if (dopt.dataOnly && dopt.schemaOnly)
- {
- pg_log_error("options -s/--schema-only and -a/--data-only cannot be used together");
- exit_nicely(1);
- }
+ pg_fatal("options -s/--schema-only and -a/--data-only cannot be used together");
if (dopt.schemaOnly && foreign_servers_include_patterns.head != NULL)
- fatal("options -s/--schema-only and --include-foreign-data cannot be used together");
+ pg_fatal("options -s/--schema-only and --include-foreign-data cannot be used together");
if (numWorkers > 1 && foreign_servers_include_patterns.head != NULL)
- fatal("option --include-foreign-data is not supported with parallel backup");
+ pg_fatal("option --include-foreign-data is not supported with parallel backup");
if (dopt.dataOnly && dopt.outputClean)
- {
- pg_log_error("options -c/--clean and -a/--data-only cannot be used together");
- exit_nicely(1);
- }
+ pg_fatal("options -c/--clean and -a/--data-only cannot be used together");
if (dopt.if_exists && !dopt.outputClean)
- fatal("option --if-exists requires option -c/--clean");
+ pg_fatal("option --if-exists requires option -c/--clean");
/*
* --inserts are already implied above if --column-inserts or
* --rows-per-insert were specified.
*/
if (dopt.do_nothing && dopt.dump_inserts == 0)
- fatal("option --on-conflict-do-nothing requires option --inserts, --rows-per-insert, or --column-inserts");
+ pg_fatal("option --on-conflict-do-nothing requires option --inserts, --rows-per-insert, or --column-inserts");
/* Identify archive format to emit */
archiveFormat = parseArchiveFormat(format, &archiveMode);
/* Parallel backup only in the directory archive format so far */
if (archiveFormat != archDirectory && numWorkers > 1)
- fatal("parallel backup only supported by the directory format");
+ pg_fatal("parallel backup only supported by the directory format");
/* Open the output file */
fout = CreateArchive(filename, archiveFormat, compressLevel, dosync,
&schema_include_oids,
strict_names);
if (schema_include_oids.head == NULL)
- fatal("no matching schemas were found");
+ pg_fatal("no matching schemas were found");
}
expand_schema_name_patterns(fout, &schema_exclude_patterns,
&schema_exclude_oids,
&table_include_oids,
strict_names);
if (table_include_oids.head == NULL)
- fatal("no matching tables were found");
+ pg_fatal("no matching tables were found");
}
expand_table_name_patterns(fout, &table_exclude_patterns,
&table_exclude_oids,
&extension_include_oids,
strict_names);
if (extension_include_oids.head == NULL)
- fatal("no matching extensions were found");
+ pg_fatal("no matching extensions were found");
}
/*
if (dumpencoding)
{
if (PQsetClientEncoding(conn, dumpencoding) < 0)
- fatal("invalid client encoding \"%s\" specified",
- dumpencoding);
+ pg_fatal("invalid client encoding \"%s\" specified",
+ dumpencoding);
}
/*
else if (AH->numWorkers > 1)
{
if (AH->isStandby && AH->remoteVersion < 100000)
- fatal("parallel dumps from standby servers are not supported by this server version");
+ pg_fatal("parallel dumps from standby servers are not supported by this server version");
AH->sync_snapshot_id = get_synchronized_snapshot(AH);
}
}
else if (pg_strcasecmp(format, "tar") == 0)
archiveFormat = archTar;
else
- fatal("invalid output format \"%s\" specified", format);
+ pg_fatal("invalid output format \"%s\" specified", format);
return archiveFormat;
}
res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
if (strict_names && PQntuples(res) == 0)
- fatal("no matching schemas were found for pattern \"%s\"", cell->val);
+ pg_fatal("no matching schemas were found for pattern \"%s\"", cell->val);
for (i = 0; i < PQntuples(res); i++)
{
res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
if (strict_names && PQntuples(res) == 0)
- fatal("no matching extensions were found for pattern \"%s\"", cell->val);
+ pg_fatal("no matching extensions were found for pattern \"%s\"", cell->val);
for (i = 0; i < PQntuples(res); i++)
{
res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
if (PQntuples(res) == 0)
- fatal("no matching foreign servers were found for pattern \"%s\"", cell->val);
+ pg_fatal("no matching foreign servers were found for pattern \"%s\"", cell->val);
for (i = 0; i < PQntuples(res); i++)
simple_oid_list_append(oids, atooid(PQgetvalue(res, i, 0)));
PQclear(ExecuteSqlQueryForSingleRow(fout,
ALWAYS_SECURE_SEARCH_PATH_SQL));
if (strict_names && PQntuples(res) == 0)
- fatal("no matching tables were found for pattern \"%s\"", cell->val);
+ pg_fatal("no matching tables were found for pattern \"%s\"", cell->val);
for (i = 0; i < PQntuples(res); i++)
{
{
/* copy data transfer failed */
pg_log_error("Dumping the contents of table \"%s\" failed: PQgetCopyData() failed.", classname);
- pg_log_error("Error message from server: %s", PQerrorMessage(conn));
- pg_log_error("The command was: %s", q->data);
+ pg_log_error_detail("Error message from server: %s", PQerrorMessage(conn));
+ pg_log_error_detail("Command was: %s", q->data);
exit_nicely(1);
}
if (PQresultStatus(res) != PGRES_COMMAND_OK)
{
pg_log_error("Dumping the contents of table \"%s\" failed: PQgetResult() failed.", classname);
- pg_log_error("Error message from server: %s", PQerrorMessage(conn));
- pg_log_error("The command was: %s", q->data);
+ pg_log_error_detail("Error message from server: %s", PQerrorMessage(conn));
+ pg_log_error_detail("Command was: %s", q->data);
exit_nicely(1);
}
PQclear(res);
/* cross-check field count, allowing for dummy NULL if any */
if (nfields != PQnfields(res) &&
!(nfields == 0 && PQnfields(res) == 1))
- fatal("wrong number of fields retrieved from table \"%s\"",
- tbinfo->dobj.name);
+ pg_fatal("wrong number of fields retrieved from table \"%s\"",
+ tbinfo->dobj.name);
/*
* First time through, we build as much of the INSERT statement as
else if (datlocprovider[0] == 'i')
appendPQExpBufferStr(creaQry, "icu");
else
- fatal("unrecognized locale provider: %s",
- datlocprovider);
+ pg_fatal("unrecognized locale provider: %s",
+ datlocprovider);
if (strlen(collate) > 0 && strcmp(collate, ctype) == 0)
{
"SELECT pg_catalog.current_schemas(false)");
if (!parsePGArray(PQgetvalue(res, 0, 0), &schemanames, &nschemanames))
- fatal("could not parse result of current_schemas()");
+ pg_fatal("could not parse result of current_schemas()");
/*
* We use set_config(), not a simple "SET search_path" command, because
/* Open the BLOB */
loFd = lo_open(conn, blobOid, INV_READ);
if (loFd == -1)
- fatal("could not open large object %u: %s",
- blobOid, PQerrorMessage(conn));
+ pg_fatal("could not open large object %u: %s",
+ blobOid, PQerrorMessage(conn));
StartBlob(fout, blobOid);
{
cnt = lo_read(conn, loFd, buf, LOBBUFSIZE);
if (cnt < 0)
- fatal("error reading large object %u: %s",
- blobOid, PQerrorMessage(conn));
+ pg_fatal("error reading large object %u: %s",
+ blobOid, PQerrorMessage(conn));
WriteData(fout, buf, cnt);
} while (cnt > 0);
else if (polinfo->polcmd == 'd')
cmd = " FOR DELETE";
else
- {
- pg_log_error("unexpected policy command type: %c",
- polinfo->polcmd);
- exit_nicely(1);
- }
+ pg_fatal("unexpected policy command type: %c",
+ polinfo->polcmd);
query = createPQExpBuffer();
delqry = createPQExpBuffer();
if (!parsePGArray(PQgetvalue(res, i, i_prattrs),
&attnames, &nattnames))
- fatal("could not parse %s array", "prattrs");
+ pg_fatal("could not parse %s array", "prattrs");
attribs = createPQExpBuffer();
for (int k = 0; k < nattnames; k++)
{
/* Build list of quoted publications and append them to query. */
if (!parsePGArray(subinfo->subpublications, &pubnames, &npubnames))
- fatal("could not parse %s array", "subpublications");
+ pg_fatal("could not parse %s array", "subpublications");
publications = createPQExpBuffer();
for (i = 0; i < npubnames; i++)
extobj = NULL;
}
if (extobj == NULL)
- fatal("could not find parent extension for %s %s",
- objtype, objname);
+ pg_fatal("could not find parent extension for %s %s",
+ objtype, objname);
appendPQExpBufferStr(upgrade_buffer,
"\n-- For binary upgrade, handle extension membership the hard way\n");
nsinfo = findNamespaceByOid(nsoid);
if (nsinfo == NULL)
- fatal("schema with OID %u does not exist", nsoid);
+ pg_fatal("schema with OID %u does not exist", nsoid);
return nsinfo;
}
owning_tab = findTableByOid(seqinfo->owning_tab);
if (owning_tab == NULL)
- fatal("failed sanity check, parent table with OID %u of sequence with OID %u not found",
- seqinfo->owning_tab, seqinfo->dobj.catId.oid);
+ pg_fatal("failed sanity check, parent table with OID %u of sequence with OID %u not found",
+ seqinfo->owning_tab, seqinfo->dobj.catId.oid);
/*
* Only dump identity sequences if we're going to dump the table that
break;
}
if (curtblindx >= numTables)
- fatal("unrecognized table OID %u", indrelid);
+ pg_fatal("unrecognized table OID %u", indrelid);
/* cross-check that we only got requested tables */
if (!tbinfo->hasindex ||
!tbinfo->interesting)
- fatal("unexpected index data for table \"%s\"",
- tbinfo->dobj.name);
+ pg_fatal("unexpected index data for table \"%s\"",
+ tbinfo->dobj.name);
/* Save data for this table */
tbinfo->indexes = indxinfo + j;
break;
}
if (curtblindx >= numTables)
- fatal("unrecognized table OID %u", conrelid);
+ pg_fatal("unrecognized table OID %u", conrelid);
}
constrinfo[j].dobj.objType = DO_FK_CONSTRAINT;
ruletableoid = atooid(PQgetvalue(res, i, i_ruletable));
ruleinfo[i].ruletable = findTableByOid(ruletableoid);
if (ruleinfo[i].ruletable == NULL)
- fatal("failed sanity check, parent table with OID %u of pg_rewrite entry with OID %u not found",
- ruletableoid, ruleinfo[i].dobj.catId.oid);
+ pg_fatal("failed sanity check, parent table with OID %u of pg_rewrite entry with OID %u not found",
+ ruletableoid, ruleinfo[i].dobj.catId.oid);
ruleinfo[i].dobj.namespace = ruleinfo[i].ruletable->dobj.namespace;
ruleinfo[i].dobj.dump = ruleinfo[i].ruletable->dobj.dump;
ruleinfo[i].ev_type = *(PQgetvalue(res, i, i_ev_type));
break;
}
if (curtblindx >= numTables)
- fatal("unrecognized table OID %u", tgrelid);
+ pg_fatal("unrecognized table OID %u", tgrelid);
/* Save data for this table */
tbinfo->triggers = tginfo + j;
if (OidIsValid(tginfo[j].tgconstrrelid))
{
if (PQgetisnull(res, j, i_tgconstrrelname))
- fatal("query produced null referenced table name for foreign key trigger \"%s\" on table \"%s\" (OID of table: %u)",
- tginfo[j].dobj.name,
- tbinfo->dobj.name,
- tginfo[j].tgconstrrelid);
+ pg_fatal("query produced null referenced table name for foreign key trigger \"%s\" on table \"%s\" (OID of table: %u)",
+ tginfo[j].dobj.name,
+ tbinfo->dobj.name,
+ tginfo[j].tgconstrrelid);
tginfo[j].tgconstrrelname = pg_strdup(PQgetvalue(res, j, i_tgconstrrelname));
}
else
break;
}
if (curtblindx >= numTables)
- fatal("unrecognized table OID %u", attrelid);
+ pg_fatal("unrecognized table OID %u", attrelid);
/* cross-check that we only got requested tables */
if (tbinfo->relkind == RELKIND_SEQUENCE ||
!tbinfo->interesting)
- fatal("unexpected column data for table \"%s\"",
- tbinfo->dobj.name);
+ pg_fatal("unexpected column data for table \"%s\"",
+ tbinfo->dobj.name);
/* Save data for this table */
tbinfo->numatts = numatts;
for (int j = 0; j < numatts; j++, r++)
{
if (j + 1 != atoi(PQgetvalue(res, r, i_attnum)))
- fatal("invalid column numbering in table \"%s\"",
- tbinfo->dobj.name);
+ pg_fatal("invalid column numbering in table \"%s\"",
+ tbinfo->dobj.name);
tbinfo->attnames[j] = pg_strdup(PQgetvalue(res, r, i_attname));
tbinfo->atttypnames[j] = pg_strdup(PQgetvalue(res, r, i_atttypname));
tbinfo->atttypmod[j] = atoi(PQgetvalue(res, r, i_atttypmod));
break;
}
if (curtblindx >= numTables)
- fatal("unrecognized table OID %u", adrelid);
+ pg_fatal("unrecognized table OID %u", adrelid);
}
if (adnum <= 0 || adnum > tbinfo->numatts)
- fatal("invalid adnum value %d for table \"%s\"",
- adnum, tbinfo->dobj.name);
+ pg_fatal("invalid adnum value %d for table \"%s\"",
+ adnum, tbinfo->dobj.name);
/*
* dropped columns shouldn't have defaults, but just in case,
break;
}
if (curtblindx >= numTables)
- fatal("unrecognized table OID %u", conrelid);
+ pg_fatal("unrecognized table OID %u", conrelid);
if (numcons != tbinfo->ncheck)
{
"expected %d check constraints on table \"%s\" but found %d",
tbinfo->ncheck),
tbinfo->ncheck, tbinfo->dobj.name, numcons);
- pg_log_error("(The system catalogs might be corrupted.)");
+ pg_log_error_hint("The system catalogs might be corrupted.");
exit_nicely(1);
}
}
}
- fatal("role with OID %u does not exist", roleoid);
+ pg_fatal("role with OID %u does not exist", roleoid);
return NULL; /* keep compiler quiet */
}
if (*proconfig)
{
if (!parsePGArray(proconfig, &configitems, &nconfigitems))
- fatal("could not parse %s array", "proconfig");
+ pg_fatal("could not parse %s array", "proconfig");
}
else
{
else if (provolatile[0] == PROVOLATILE_STABLE)
appendPQExpBufferStr(q, " STABLE");
else if (provolatile[0] != PROVOLATILE_VOLATILE)
- fatal("unrecognized provolatile value for function \"%s\"",
- finfo->dobj.name);
+ pg_fatal("unrecognized provolatile value for function \"%s\"",
+ finfo->dobj.name);
}
if (proisstrict[0] == 't')
else if (proparallel[0] == PROPARALLEL_RESTRICTED)
appendPQExpBufferStr(q, " PARALLEL RESTRICTED");
else if (proparallel[0] != PROPARALLEL_UNSAFE)
- fatal("unrecognized proparallel value for function \"%s\"",
- finfo->dobj.name);
+ pg_fatal("unrecognized proparallel value for function \"%s\"",
+ finfo->dobj.name);
}
for (i = 0; i < nconfigitems; i++)
{
funcInfo = findFuncByOid(cast->castfunc);
if (funcInfo == NULL)
- fatal("could not find function definition for function with OID %u",
- cast->castfunc);
+ pg_fatal("could not find function definition for function with OID %u",
+ cast->castfunc);
}
defqry = createPQExpBuffer();
{
fromsqlFuncInfo = findFuncByOid(transform->trffromsql);
if (fromsqlFuncInfo == NULL)
- fatal("could not find function definition for function with OID %u",
- transform->trffromsql);
+ pg_fatal("could not find function definition for function with OID %u",
+ transform->trffromsql);
}
if (OidIsValid(transform->trftosql))
{
tosqlFuncInfo = findFuncByOid(transform->trftosql);
if (tosqlFuncInfo == NULL)
- fatal("could not find function definition for function with OID %u",
- transform->trftosql);
+ pg_fatal("could not find function definition for function with OID %u",
+ transform->trftosql);
}
defqry = createPQExpBuffer();
/* to allow dumping pg_catalog; not accepted on input */
appendPQExpBufferStr(q, "default");
else
- fatal("unrecognized collation provider: %s",
- collprovider);
+ pg_fatal("unrecognized collation provider: %s",
+ collprovider);
if (strcmp(PQgetvalue(res, 0, i_collisdeterministic), "f") == 0)
appendPQExpBufferStr(q, ", deterministic = false");
appendPQExpBufferStr(details, ",\n FINALFUNC_MODIFY = READ_WRITE");
break;
default:
- fatal("unrecognized aggfinalmodify value for aggregate \"%s\"",
- agginfo->aggfn.dobj.name);
+ pg_fatal("unrecognized aggfinalmodify value for aggregate \"%s\"",
+ agginfo->aggfn.dobj.name);
break;
}
}
appendPQExpBufferStr(details, ",\n MFINALFUNC_MODIFY = READ_WRITE");
break;
default:
- fatal("unrecognized aggmfinalmodify value for aggregate \"%s\"",
- agginfo->aggfn.dobj.name);
+ pg_fatal("unrecognized aggmfinalmodify value for aggregate \"%s\"",
+ agginfo->aggfn.dobj.name);
break;
}
}
else if (proparallel[0] == PROPARALLEL_RESTRICTED)
appendPQExpBufferStr(details, ",\n PARALLEL = restricted");
else if (proparallel[0] != PROPARALLEL_UNSAFE)
- fatal("unrecognized proparallel value for function \"%s\"",
- agginfo->aggfn.dobj.name);
+ pg_fatal("unrecognized proparallel value for function \"%s\"",
+ agginfo->aggfn.dobj.name);
}
appendPQExpBuffer(delq, "DROP AGGREGATE %s.%s;\n",
break;
default:
/* shouldn't get here */
- fatal("unrecognized object type in default privileges: %d",
- (int) daclinfo->defaclobjtype);
+ pg_fatal("unrecognized object type in default privileges: %d",
+ (int) daclinfo->defaclobjtype);
type = ""; /* keep compiler quiet */
}
daclinfo->defaclrole,
fout->remoteVersion,
q))
- fatal("could not parse default ACL list (%s)",
- daclinfo->dacl.acl);
+ pg_fatal("could not parse default ACL list (%s)",
+ daclinfo->dacl.acl);
if (daclinfo->dobj.dump & DUMP_COMPONENT_ACL)
ArchiveEntry(fout, daclinfo->dobj.catId, daclinfo->dobj.dumpId,
if (!buildACLCommands(name, subname, nspname, type,
initprivs, acldefault, owner,
"", fout->remoteVersion, sql))
- fatal("could not parse initial ACL list (%s) or default (%s) for object \"%s\" (%s)",
- initprivs, acldefault, name, type);
+ pg_fatal("could not parse initial ACL list (%s) or default (%s) for object \"%s\" (%s)",
+ initprivs, acldefault, name, type);
appendPQExpBufferStr(sql, "SELECT pg_catalog.binary_upgrade_set_record_init_privs(false);\n");
}
if (!buildACLCommands(name, subname, nspname, type,
acls, baseacls, owner,
"", fout->remoteVersion, sql))
- fatal("could not parse ACL list (%s) or default (%s) for object \"%s\" (%s)",
- acls, baseacls, name, type);
+ pg_fatal("could not parse ACL list (%s) or default (%s) for object \"%s\" (%s)",
+ acls, baseacls, name, type);
if (sql->len > 0)
{
if (PQntuples(res) != 1)
{
if (PQntuples(res) < 1)
- fatal("query to obtain definition of view \"%s\" returned no data",
- tbinfo->dobj.name);
+ pg_fatal("query to obtain definition of view \"%s\" returned no data",
+ tbinfo->dobj.name);
else
- fatal("query to obtain definition of view \"%s\" returned more than one definition",
- tbinfo->dobj.name);
+ pg_fatal("query to obtain definition of view \"%s\" returned more than one definition",
+ tbinfo->dobj.name);
}
len = PQgetlength(res, 0, 0);
if (len == 0)
- fatal("definition of view \"%s\" appears to be empty (length zero)",
- tbinfo->dobj.name);
+ pg_fatal("definition of view \"%s\" appears to be empty (length zero)",
+ tbinfo->dobj.name);
/* Strip off the trailing semicolon so that other things may follow. */
Assert(PQgetvalue(res, 0, 0)[len - 1] == ';');
case TableOidAttributeNumber:
return "tableoid";
}
- fatal("invalid column number %d for table \"%s\"",
- attrnum, tblInfo->dobj.name);
+ pg_fatal("invalid column number %d for table \"%s\"",
+ attrnum, tblInfo->dobj.name);
return NULL; /* keep compiler quiet */
}
int j;
if (!parsePGArray(indstatcols, &indstatcolsarray, &nstatcols))
- fatal("could not parse index statistic columns");
+ pg_fatal("could not parse index statistic columns");
if (!parsePGArray(indstatvals, &indstatvalsarray, &nstatvals))
- fatal("could not parse index statistic values");
+ pg_fatal("could not parse index statistic values");
if (nstatcols != nstatvals)
- fatal("mismatched number of columns and values for index statistics");
+ pg_fatal("mismatched number of columns and values for index statistics");
for (j = 0; j < nstatcols; j++)
{
indxinfo = (IndxInfo *) findObjectByDumpId(coninfo->conindex);
if (indxinfo == NULL)
- fatal("missing index for constraint \"%s\"",
- coninfo->dobj.name);
+ pg_fatal("missing index for constraint \"%s\"",
+ coninfo->dobj.name);
if (dopt->binary_upgrade)
binary_upgrade_set_pg_class_oids(fout, q,
}
else
{
- fatal("unrecognized constraint type: %c",
- coninfo->contype);
+ pg_fatal("unrecognized constraint type: %c",
+ coninfo->contype);
}
/* Dump Constraint Comments --- only works for table constraints */
res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
if (PQntuples(res) != 1)
- {
- pg_log_error(ngettext("query to get data of sequence \"%s\" returned %d row (expected 1)",
- "query to get data of sequence \"%s\" returned %d rows (expected 1)",
- PQntuples(res)),
- tbinfo->dobj.name, PQntuples(res));
- exit_nicely(1);
- }
+ pg_fatal(ngettext("query to get data of sequence \"%s\" returned %d row (expected 1)",
+ "query to get data of sequence \"%s\" returned %d rows (expected 1)",
+ PQntuples(res)),
+ tbinfo->dobj.name, PQntuples(res));
seqtype = PQgetvalue(res, 0, 0);
startv = PQgetvalue(res, 0, 1);
}
else
{
- fatal("unrecognized sequence type: %s", seqtype);
+ pg_fatal("unrecognized sequence type: %s", seqtype);
default_minv = default_maxv = 0; /* keep compiler quiet */
}
TableInfo *owning_tab = findTableByOid(tbinfo->owning_tab);
if (owning_tab == NULL)
- fatal("failed sanity check, parent table with OID %u of sequence with OID %u not found",
- tbinfo->owning_tab, tbinfo->dobj.catId.oid);
+ pg_fatal("failed sanity check, parent table with OID %u of sequence with OID %u not found",
+ tbinfo->owning_tab, tbinfo->dobj.catId.oid);
if (owning_tab->dobj.dump & DUMP_COMPONENT_DEFINITION)
{
res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
if (PQntuples(res) != 1)
- {
- pg_log_error(ngettext("query to get data of sequence \"%s\" returned %d row (expected 1)",
- "query to get data of sequence \"%s\" returned %d rows (expected 1)",
- PQntuples(res)),
- tbinfo->dobj.name, PQntuples(res));
- exit_nicely(1);
- }
+ pg_fatal(ngettext("query to get data of sequence \"%s\" returned %d row (expected 1)",
+ "query to get data of sequence \"%s\" returned %d rows (expected 1)",
+ PQntuples(res)),
+ tbinfo->dobj.name, PQntuples(res));
last = PQgetvalue(res, 0, 0);
called = (strcmp(PQgetvalue(res, 0, 1), "t") == 0);
else if (TRIGGER_FOR_INSTEAD(tginfo->tgtype))
appendPQExpBufferStr(query, "INSTEAD OF");
else
- {
- pg_log_error("unexpected tgtype value: %d", tginfo->tgtype);
- exit_nicely(1);
- }
+ pg_fatal("unexpected tgtype value: %d", tginfo->tgtype);
findx = 0;
if (TRIGGER_FOR_INSERT(tginfo->tgtype))
if (p + tlen >= tgargs + lentgargs)
{
/* hm, not found before end of bytea value... */
- pg_log_error("invalid argument string (%s) for trigger \"%s\" on table \"%s\"",
- tginfo->tgargs,
- tginfo->dobj.name,
- tbinfo->dobj.name);
- exit_nicely(1);
+ pg_fatal("invalid argument string (%s) for trigger \"%s\" on table \"%s\"",
+ tginfo->tgargs,
+ tginfo->dobj.name,
+ tbinfo->dobj.name);
}
if (findx > 0)
res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
if (PQntuples(res) != 1)
- {
- pg_log_error("query to get rule \"%s\" for table \"%s\" failed: wrong number of rows returned",
- rinfo->dobj.name, tbinfo->dobj.name);
- exit_nicely(1);
- }
+ pg_fatal("query to get rule \"%s\" for table \"%s\" failed: wrong number of rows returned",
+ rinfo->dobj.name, tbinfo->dobj.name);
printfPQExpBuffer(cmd, "%s\n", PQgetvalue(res, 0, 0));
int j;
if (!parsePGArray(extconfig, &extconfigarray, &nconfigitems))
- fatal("could not parse %s array", "extconfig");
+ pg_fatal("could not parse %s array", "extconfig");
if (!parsePGArray(extcondition, &extconditionarray, &nconditionitems))
- fatal("could not parse %s array", "extcondition");
+ pg_fatal("could not parse %s array", "extcondition");
if (nconfigitems != nconditionitems)
- fatal("mismatched number of configurations and conditions for extension");
+ pg_fatal("mismatched number of configurations and conditions for extension");
for (j = 0; j < nconfigitems; j++)
{
obj = objs[i];
j = obj->dumpId;
if (j <= 0 || j > maxDumpId)
- fatal("invalid dumpId %d", j);
+ pg_fatal("invalid dumpId %d", j);
idMap[j] = i;
for (j = 0; j < obj->nDeps; j++)
{
k = obj->dependencies[j];
if (k <= 0 || k > maxDumpId)
- fatal("invalid dependency %d", k);
+ pg_fatal("invalid dependency %d", k);
beforeConstraints[k]++;
}
}
/* We'd better have fixed at least one loop */
if (!fixedloop)
- fatal("could not identify dependency loop");
+ pg_fatal("could not identify dependency loop");
free(workspace);
free(searchFailed);
"there are circular foreign-key constraints among these tables:",
nLoop));
for (i = 0; i < nLoop; i++)
- pg_log_generic(PG_LOG_INFO, " %s", loop[i]->name);
- pg_log_generic(PG_LOG_INFO, "You might not be able to restore the dump without using --disable-triggers or temporarily dropping the constraints.");
- pg_log_generic(PG_LOG_INFO, "Consider using a full dump instead of a --data-only dump to avoid this problem.");
+ pg_log_info(" %s", loop[i]->name);
+ pg_log_info("You might not be able to restore the dump without using --disable-triggers or temporarily dropping the constraints.");
+ pg_log_info("Consider using a full dump instead of a --data-only dump to avoid this problem.");
if (nLoop > 1)
removeObjectDependency(loop[0], loop[1]->dumpId);
else /* must be a self-dependency */
char buf[1024];
describeDumpableObject(loop[i], buf, sizeof(buf));
- pg_log_generic(PG_LOG_INFO, " %s", buf);
+ pg_log_info(" %s", buf);
}
if (nLoop > 1)
strlcpy(full_path, progname, sizeof(full_path));
if (ret == -1)
- pg_log_error("The program \"%s\" is needed by %s but was not found in the\n"
- "same directory as \"%s\".\n"
- "Check your installation.",
- "pg_dump", progname, full_path);
+ pg_fatal("program \"%s\" is needed by %s but was not found in the same directory as \"%s\"",
+ "pg_dump", progname, full_path);
else
- pg_log_error("The program \"%s\" was found by \"%s\"\n"
- "but was not the same version as %s.\n"
- "Check your installation.",
- "pg_dump", full_path, progname);
- exit_nicely(1);
+ pg_fatal("program \"%s\" was found by \"%s\" but was not the same version as %s",
+ "pg_dump", full_path, progname);
}
pgdumpopts = createPQExpBuffer();
break;
default:
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
+ /* getopt_long already emitted a complaint */
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit_nicely(1);
}
}
{
pg_log_error("too many command-line arguments (first is \"%s\")",
argv[optind]);
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit_nicely(1);
}
(globals_only || roles_only || tablespaces_only))
{
pg_log_error("option --exclude-database cannot be used together with -g/--globals-only, -r/--roles-only, or -t/--tablespaces-only");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit_nicely(1);
}
if (globals_only && roles_only)
{
pg_log_error("options -g/--globals-only and -r/--roles-only cannot be used together");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit_nicely(1);
}
if (globals_only && tablespaces_only)
{
pg_log_error("options -g/--globals-only and -t/--tablespaces-only cannot be used together");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit_nicely(1);
}
if (if_exists && !output_clean)
- {
- pg_log_error("option --if-exists requires option -c/--clean");
- exit_nicely(1);
- }
+ pg_fatal("option --if-exists requires option -c/--clean");
if (roles_only && tablespaces_only)
{
pg_log_error("options -r/--roles-only and -t/--tablespaces-only cannot be used together");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit_nicely(1);
}
prompt_password, false);
if (!conn)
- {
- pg_log_error("could not connect to database \"%s\"", pgdb);
- exit_nicely(1);
- }
+ pg_fatal("could not connect to database \"%s\"", pgdb);
}
else
{
{
pg_log_error("could not connect to databases \"postgres\" or \"template1\"\n"
"Please specify an alternative database.");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit_nicely(1);
}
}
{
OPF = fopen(filename, PG_BINARY_W);
if (!OPF)
- {
- pg_log_error("could not open output file \"%s\": %m",
- filename);
- exit_nicely(1);
- }
+ pg_fatal("could not open output file \"%s\": %m",
+ filename);
}
else
OPF = stdout;
if (dumpencoding)
{
if (PQsetClientEncoding(conn, dumpencoding) < 0)
- {
- pg_log_error("invalid client encoding \"%s\" specified",
- dumpencoding);
- exit_nicely(1);
- }
+ pg_fatal("invalid client encoding \"%s\" specified",
+ dumpencoding);
}
/*
ret = runPgDump(dbname, create_opts);
if (ret != 0)
- {
- pg_log_error("pg_dump failed on database \"%s\", exiting", dbname);
- exit_nicely(1);
- }
+ pg_fatal("pg_dump failed on database \"%s\", exiting", dbname);
if (filename)
{
OPF = fopen(filename, PG_BINARY_A);
if (!OPF)
- {
- pg_log_error("could not re-open the output file \"%s\": %m",
- filename);
- exit_nicely(1);
- }
+ pg_fatal("could not re-open the output file \"%s\": %m",
+ filename);
}
}
{
conn_opts = PQconninfoParse(connection_string, &err_msg);
if (conn_opts == NULL)
- {
- pg_log_error("%s", err_msg);
- exit_nicely(1);
- }
+ pg_fatal("%s", err_msg);
for (conn_opt = conn_opts; conn_opt->keyword != NULL; conn_opt++)
{
conn = PQconnectdbParams(keywords, values, true);
if (!conn)
- {
- pg_log_error("could not connect to database \"%s\"", dbname);
- exit_nicely(1);
- }
+ pg_fatal("could not connect to database \"%s\"", dbname);
if (PQstatus(conn) == CONNECTION_BAD &&
PQconnectionNeedsPassword(conn) &&
if (PQstatus(conn) == CONNECTION_BAD)
{
if (fail_on_error)
- {
- pg_log_error("%s", PQerrorMessage(conn));
- exit_nicely(1);
- }
+ pg_fatal("%s", PQerrorMessage(conn));
else
{
PQfinish(conn);
/* Check version */
remoteversion_str = PQparameterStatus(conn, "server_version");
if (!remoteversion_str)
- {
- pg_log_error("could not get server version");
- exit_nicely(1);
- }
+ pg_fatal("could not get server version");
server_version = PQserverVersion(conn);
if (server_version == 0)
- {
- pg_log_error("could not parse server version \"%s\"",
- remoteversion_str);
- exit_nicely(1);
- }
+ pg_fatal("could not parse server version \"%s\"",
+ remoteversion_str);
my_version = PG_VERSION_NUM;
&& (server_version < 90200 ||
(server_version / 100) > (my_version / 100)))
{
- pg_log_error("server version: %s; %s version: %s",
- remoteversion_str, progname, PG_VERSION);
pg_log_error("aborting because of server version mismatch");
+ pg_log_error_detail("server version: %s; %s version: %s",
+ remoteversion_str, progname, PG_VERSION);
exit_nicely(1);
}
PQresultStatus(res) != PGRES_TUPLES_OK)
{
pg_log_error("query failed: %s", PQerrorMessage(conn));
- pg_log_error("query was: %s", query);
+ pg_log_error_detail("Query was: %s", query);
PQfinish(conn);
exit_nicely(1);
}
PQresultStatus(res) != PGRES_COMMAND_OK)
{
pg_log_error("query failed: %s", PQerrorMessage(conn));
- pg_log_error("query was: %s", query);
+ pg_log_error_detail("Query was: %s", query);
PQfinish(conn);
exit_nicely(1);
}
break;
default:
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
+ /* getopt_long already emitted a complaint */
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit_nicely(1);
}
}
{
pg_log_error("too many command-line arguments (first is \"%s\")",
argv[optind]);
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit_nicely(1);
}
/* Complain if neither -f nor -d was specified (except if dumping TOC) */
if (!opts->cparams.dbname && !opts->filename && !opts->tocSummary)
- {
- pg_log_error("one of -d/--dbname and -f/--file must be specified");
- exit_nicely(1);
- }
+ pg_fatal("one of -d/--dbname and -f/--file must be specified");
/* Should get at most one of -d and -f, else user is confused */
if (opts->cparams.dbname)
if (opts->filename)
{
pg_log_error("options -d/--dbname and -f/--file cannot be used together");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit_nicely(1);
}
opts->useDB = 1;
}
if (opts->dataOnly && opts->schemaOnly)
- {
- pg_log_error("options -s/--schema-only and -a/--data-only cannot be used together");
- exit_nicely(1);
- }
+ pg_fatal("options -s/--schema-only and -a/--data-only cannot be used together");
if (opts->dataOnly && opts->dropSchema)
- {
- pg_log_error("options -c/--clean and -a/--data-only cannot be used together");
- exit_nicely(1);
- }
+ pg_fatal("options -c/--clean and -a/--data-only cannot be used together");
/*
* -C is not compatible with -1, because we can't create a database inside
* a transaction block.
*/
if (opts->createDB && opts->single_txn)
- {
- pg_log_error("options -C/--create and -1/--single-transaction cannot be used together");
- exit_nicely(1);
- }
+ pg_fatal("options -C/--create and -1/--single-transaction cannot be used together");
/* Can't do single-txn mode with multiple connections */
if (opts->single_txn && numWorkers > 1)
- {
- pg_log_error("cannot specify both --single-transaction and multiple jobs");
- exit_nicely(1);
- }
+ pg_fatal("cannot specify both --single-transaction and multiple jobs");
opts->disable_triggers = disable_triggers;
opts->enable_row_security = enable_row_security;
opts->no_subscriptions = no_subscriptions;
if (if_exists && !opts->dropSchema)
- {
- pg_log_error("option --if-exists requires option -c/--clean");
- exit_nicely(1);
- }
+ pg_fatal("option --if-exists requires option -c/--clean");
opts->if_exists = if_exists;
opts->strict_names = strict_names;
break;
default:
- pg_log_error("unrecognized archive format \"%s\"; please specify \"c\", \"d\", or \"t\"",
- opts->formatName);
- exit_nicely(1);
+ pg_fatal("unrecognized archive format \"%s\"; please specify \"c\", \"d\", or \"t\"",
+ opts->formatName);
}
}
command_fails_like(
[ "pg_dump", '-p', $port, '--include-foreign-data=s0', 'postgres' ],
- qr/foreign-data wrapper \"dummy\" has no handler\r?\npg_dump: error: query was:.*t0/,
+ qr/foreign-data wrapper \"dummy\" has no handler\r?\ndetail: Query was: .*t0/,
"correctly fails to dump a foreign table from a dummy FDW");
command_ok(
/*------
translator: the second %s is a command line argument (-e, etc) */
pg_log_error("invalid argument for option %s", "-e");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
if (set_xid_epoch == -1)
- {
- pg_log_error("transaction ID epoch (-e) must not be -1");
- exit(1);
- }
+ pg_fatal("transaction ID epoch (-e) must not be -1");
break;
case 'u':
if (endptr == optarg || *endptr != '\0' || errno != 0)
{
pg_log_error("invalid argument for option %s", "-u");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
if (!TransactionIdIsNormal(set_oldest_xid))
- {
- pg_log_error("oldest transaction ID (-u) must be greater than or equal to %u", FirstNormalTransactionId);
- exit(1);
- }
+ pg_fatal("oldest transaction ID (-u) must be greater than or equal to %u", FirstNormalTransactionId);
break;
case 'x':
if (endptr == optarg || *endptr != '\0' || errno != 0)
{
pg_log_error("invalid argument for option %s", "-x");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
if (!TransactionIdIsNormal(set_xid))
- {
- pg_log_error("transaction ID (-x) must be greater than or equal to %u", FirstNormalTransactionId);
- exit(1);
- }
+ pg_fatal("transaction ID (-x) must be greater than or equal to %u", FirstNormalTransactionId);
break;
case 'c':
if (endptr == optarg || *endptr != ',' || errno != 0)
{
pg_log_error("invalid argument for option %s", "-c");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
set_newest_commit_ts_xid = strtoul(endptr + 1, &endptr2, 0);
if (endptr2 == endptr + 1 || *endptr2 != '\0' || errno != 0)
{
pg_log_error("invalid argument for option %s", "-c");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
if (set_oldest_commit_ts_xid < 2 &&
set_oldest_commit_ts_xid != 0)
- {
- pg_log_error("transaction ID (-c) must be either 0 or greater than or equal to 2");
- exit(1);
- }
+ pg_fatal("transaction ID (-c) must be either 0 or greater than or equal to 2");
if (set_newest_commit_ts_xid < 2 &&
set_newest_commit_ts_xid != 0)
- {
- pg_log_error("transaction ID (-c) must be either 0 or greater than or equal to 2");
- exit(1);
- }
+ pg_fatal("transaction ID (-c) must be either 0 or greater than or equal to 2");
break;
case 'o':
if (endptr == optarg || *endptr != '\0' || errno != 0)
{
pg_log_error("invalid argument for option %s", "-o");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
if (set_oid == 0)
- {
- pg_log_error("OID (-o) must not be 0");
- exit(1);
- }
+ pg_fatal("OID (-o) must not be 0");
break;
case 'm':
if (endptr == optarg || *endptr != ',' || errno != 0)
{
pg_log_error("invalid argument for option %s", "-m");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
if (endptr2 == endptr + 1 || *endptr2 != '\0' || errno != 0)
{
pg_log_error("invalid argument for option %s", "-m");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
if (set_mxid == 0)
- {
- pg_log_error("multitransaction ID (-m) must not be 0");
- exit(1);
- }
+ pg_fatal("multitransaction ID (-m) must not be 0");
/*
* XXX It'd be nice to have more sanity checks here, e.g. so
* that oldest is not wrapped around w.r.t. nextMulti.
*/
if (set_oldestmxid == 0)
- {
- pg_log_error("oldest multitransaction ID (-m) must not be 0");
- exit(1);
- }
+ pg_fatal("oldest multitransaction ID (-m) must not be 0");
break;
case 'O':
if (endptr == optarg || *endptr != '\0' || errno != 0)
{
pg_log_error("invalid argument for option %s", "-O");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
if (set_mxoff == -1)
- {
- pg_log_error("multitransaction offset (-O) must not be -1");
- exit(1);
- }
+ pg_fatal("multitransaction offset (-O) must not be -1");
break;
case 'l':
if (strspn(optarg, "01234567890ABCDEFabcdef") != XLOG_FNAME_LEN)
{
pg_log_error("invalid argument for option %s", "-l");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
errno = 0;
set_wal_segsize = strtol(optarg, &endptr, 10) * 1024 * 1024;
if (endptr == optarg || *endptr != '\0' || errno != 0)
- {
- pg_log_error("argument of --wal-segsize must be a number");
- exit(1);
- }
+ pg_fatal("argument of --wal-segsize must be a number");
if (!IsValidWalSegSize(set_wal_segsize))
- {
- pg_log_error("argument of --wal-segsize must be a power of 2 between 1 and 1024");
- exit(1);
- }
+ pg_fatal("argument of --wal-segsize must be a power of 2 between 1 and 1024");
break;
default:
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
+ /* getopt_long already emitted a complaint */
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
}
{
pg_log_error("too many command-line arguments (first is \"%s\")",
argv[optind]);
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
if (DataDir == NULL)
{
pg_log_error("no data directory specified");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
if (geteuid() == 0)
{
pg_log_error("cannot be executed by \"root\"");
- pg_log_info("You must run %s as the PostgreSQL superuser.",
- progname);
+ pg_log_error_hint("You must run %s as the PostgreSQL superuser.",
+ progname);
exit(1);
}
#endif
/* Set mask based on PGDATA permissions */
if (!GetDataDirectoryCreatePerm(DataDir))
- {
- pg_log_error("could not read permissions of directory \"%s\": %m",
- DataDir);
- exit(1);
- }
+ pg_fatal("could not read permissions of directory \"%s\": %m",
+ DataDir);
umask(pg_mode_mask);
if (chdir(DataDir) < 0)
- {
- pg_log_error("could not change directory to \"%s\": %m",
- DataDir);
- exit(1);
- }
+ pg_fatal("could not change directory to \"%s\": %m",
+ DataDir);
/* Check that data directory matches our server version */
CheckDataVersion();
if ((fd = open("postmaster.pid", O_RDONLY, 0)) < 0)
{
if (errno != ENOENT)
- {
- pg_log_error("could not open file \"%s\" for reading: %m",
- "postmaster.pid");
- exit(1);
- }
+ pg_fatal("could not open file \"%s\" for reading: %m",
+ "postmaster.pid");
}
else
{
pg_log_error("lock file \"%s\" exists", "postmaster.pid");
- pg_log_info("Is a server running? If not, delete the lock file and try again.");
+ pg_log_error_hint("Is a server running? If not, delete the lock file and try again.");
exit(1);
}
char rawline[64];
if ((ver_fd = fopen(ver_file, "r")) == NULL)
- {
- pg_log_error("could not open file \"%s\" for reading: %m",
- ver_file);
- exit(1);
- }
+ pg_fatal("could not open file \"%s\" for reading: %m",
+ ver_file);
/* version number has to be the first line read */
if (!fgets(rawline, sizeof(rawline), ver_fd))
{
if (!ferror(ver_fd))
- pg_log_error("unexpected empty file \"%s\"", ver_file);
+ pg_fatal("unexpected empty file \"%s\"", ver_file);
else
- pg_log_error("could not read file \"%s\": %m", ver_file);
- exit(1);
+ pg_fatal("could not read file \"%s\": %m", ver_file);
}
/* strip trailing newline and carriage return */
if (strcmp(rawline, PG_MAJORVERSION) != 0)
{
pg_log_error("data directory is of wrong version");
- pg_log_info("File \"%s\" contains \"%s\", which is not compatible with this program's version \"%s\".",
- ver_file, rawline, PG_MAJORVERSION);
+ pg_log_error_detail("File \"%s\" contains \"%s\", which is not compatible with this program's version \"%s\".",
+ ver_file, rawline, PG_MAJORVERSION);
exit(1);
}
pg_log_error("could not open file \"%s\" for reading: %m",
XLOG_CONTROL_FILE);
if (errno == ENOENT)
- pg_log_info("If you are sure the data directory path is correct, execute\n"
- " touch %s\n"
- "and try again.",
- XLOG_CONTROL_FILE);
+ pg_log_error_hint("If you are sure the data directory path is correct, execute\n"
+ " touch %s\n"
+ "and try again.",
+ XLOG_CONTROL_FILE);
exit(1);
}
len = read(fd, buffer, PG_CONTROL_FILE_SIZE);
if (len < 0)
- {
- pg_log_error("could not read file \"%s\": %m", XLOG_CONTROL_FILE);
- exit(1);
- }
+ pg_fatal("could not read file \"%s\": %m", XLOG_CONTROL_FILE);
close(fd);
if (len >= sizeof(ControlFileData) &&
*/
xldir = opendir(XLOGDIR);
if (xldir == NULL)
- {
- pg_log_error("could not open directory \"%s\": %m", XLOGDIR);
- exit(1);
- }
+ pg_fatal("could not open directory \"%s\": %m", XLOGDIR);
while (errno = 0, (xlde = readdir(xldir)) != NULL)
{
}
if (errno)
- {
- pg_log_error("could not read directory \"%s\": %m", XLOGDIR);
- exit(1);
- }
+ pg_fatal("could not read directory \"%s\": %m", XLOGDIR);
if (closedir(xldir))
- {
- pg_log_error("could not close directory \"%s\": %m", XLOGDIR);
- exit(1);
- }
+ pg_fatal("could not close directory \"%s\": %m", XLOGDIR);
/*
* Finally, convert to new xlog seg size, and advance by one to ensure we
xldir = opendir(XLOGDIR);
if (xldir == NULL)
- {
- pg_log_error("could not open directory \"%s\": %m", XLOGDIR);
- exit(1);
- }
+ pg_fatal("could not open directory \"%s\": %m", XLOGDIR);
while (errno = 0, (xlde = readdir(xldir)) != NULL)
{
{
snprintf(path, sizeof(path), "%s/%s", XLOGDIR, xlde->d_name);
if (unlink(path) < 0)
- {
- pg_log_error("could not delete file \"%s\": %m", path);
- exit(1);
- }
+ pg_fatal("could not delete file \"%s\": %m", path);
}
}
if (errno)
- {
- pg_log_error("could not read directory \"%s\": %m", XLOGDIR);
- exit(1);
- }
+ pg_fatal("could not read directory \"%s\": %m", XLOGDIR);
if (closedir(xldir))
- {
- pg_log_error("could not close directory \"%s\": %m", XLOGDIR);
- exit(1);
- }
+ pg_fatal("could not close directory \"%s\": %m", XLOGDIR);
}
xldir = opendir(ARCHSTATDIR);
if (xldir == NULL)
- {
- pg_log_error("could not open directory \"%s\": %m", ARCHSTATDIR);
- exit(1);
- }
+ pg_fatal("could not open directory \"%s\": %m", ARCHSTATDIR);
while (errno = 0, (xlde = readdir(xldir)) != NULL)
{
{
snprintf(path, sizeof(path), "%s/%s", ARCHSTATDIR, xlde->d_name);
if (unlink(path) < 0)
- {
- pg_log_error("could not delete file \"%s\": %m", path);
- exit(1);
- }
+ pg_fatal("could not delete file \"%s\": %m", path);
}
}
if (errno)
- {
- pg_log_error("could not read directory \"%s\": %m", ARCHSTATDIR);
- exit(1);
- }
+ pg_fatal("could not read directory \"%s\": %m", ARCHSTATDIR);
if (closedir(xldir))
- {
- pg_log_error("could not close directory \"%s\": %m", ARCHSTATDIR);
- exit(1);
- }
+ pg_fatal("could not close directory \"%s\": %m", ARCHSTATDIR);
}
fd = open(path, O_RDWR | O_CREAT | O_EXCL | PG_BINARY,
pg_file_create_mode);
if (fd < 0)
- {
- pg_log_error("could not open file \"%s\": %m", path);
- exit(1);
- }
+ pg_fatal("could not open file \"%s\": %m", path);
errno = 0;
if (write(fd, buffer.data, XLOG_BLCKSZ) != XLOG_BLCKSZ)
/* if write didn't set errno, assume problem is no disk space */
if (errno == 0)
errno = ENOSPC;
- pg_log_error("could not write file \"%s\": %m", path);
- exit(1);
+ pg_fatal("could not write file \"%s\": %m", path);
}
/* Fill the rest of the file with zeroes */
{
if (errno == 0)
errno = ENOSPC;
- pg_log_error("could not write file \"%s\": %m", path);
- exit(1);
+ pg_fatal("could not write file \"%s\": %m", path);
}
}
if (fsync(fd) != 0)
- {
- pg_log_error("fsync error: %m");
- exit(1);
- }
+ pg_fatal("fsync error: %m");
close(fd);
}
CATALOG_NAME = pg_rewind
AVAIL_LANGUAGES = cs de es fr it ja ko pl pt_BR ru sv tr uk zh_CN
GETTEXT_FILES = $(FRONTEND_COMMON_GETTEXT_FILES) datapagemap.c file_ops.c filemap.c libpq_source.c local_source.c parsexlog.c pg_rewind.c timeline.c xlogreader.c ../../common/fe_memutils.c ../../common/restricted_token.c ../../fe_utils/archive.c ../../fe_utils/recovery_gen.c
-GETTEXT_TRIGGERS = $(FRONTEND_COMMON_GETTEXT_TRIGGERS) pg_fatal report_invalid_record:2
+GETTEXT_TRIGGERS = $(FRONTEND_COMMON_GETTEXT_TRIGGERS) report_invalid_record:2
GETTEXT_FLAGS = $(FRONTEND_COMMON_GETTEXT_FLAGS) \
- pg_fatal:1:c-format \
report_invalid_record:2:c-format
{
switch (c)
{
- case '?':
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
- exit(1);
-
case 'c':
restore_wal = true;
break;
case 5:
config_file = pg_strdup(optarg);
break;
+
+ default:
+ /* getopt_long already emitted a complaint */
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
+ exit(1);
}
}
if (datadir_source == NULL && connstr_source == NULL)
{
pg_log_error("no source specified (--source-pgdata or --source-server)");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
if (datadir_source != NULL && connstr_source != NULL)
{
pg_log_error("only one of --source-pgdata or --source-server can be specified");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
if (datadir_target == NULL)
{
pg_log_error("no target data directory specified (--target-pgdata)");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
if (writerecoveryconf && connstr_source == NULL)
{
pg_log_error("no source server information (--source-server) specified for --write-recovery-conf");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
{
pg_log_error("too many command-line arguments (first is \"%s\")",
argv[optind]);
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
if (geteuid() == 0)
{
pg_log_error("cannot be executed by \"root\"");
- fprintf(stderr, _("You must run %s as the PostgreSQL superuser.\n"),
- progname);
+ pg_log_error_hint("You must run %s as the PostgreSQL superuser.",
+ progname);
exit(1);
}
#endif
/* Set mask based on PGDATA permissions */
if (!GetDataDirectoryCreatePerm(datadir_target))
- {
- pg_log_error("could not read permissions of directory \"%s\": %m",
- datadir_target);
- exit(1);
- }
+ pg_fatal("could not read permissions of directory \"%s\": %m",
+ datadir_target);
umask(pg_mode_mask);
strlcpy(full_path, progname, sizeof(full_path));
if (rc == -1)
- pg_log_error("The program \"%s\" is needed by %s but was not found in the\n"
- "same directory as \"%s\".\n"
- "Check your installation.",
- "postgres", progname, full_path);
+ pg_fatal("program \"%s\" is needed by %s but was not found in the same directory as \"%s\"",
+ "postgres", progname, full_path);
else
- pg_log_error("The program \"%s\" was found by \"%s\"\n"
- "but was not the same version as %s.\n"
- "Check your installation.",
- "postgres", full_path, progname);
- exit(1);
+ pg_fatal("program \"%s\" was found by \"%s\" but was not the same version as %s",
+ "postgres", full_path, progname);
}
/*
strlcpy(full_path, progname, sizeof(full_path));
if (ret == -1)
- pg_fatal("The program \"%s\" is needed by %s but was not found in the\n"
- "same directory as \"%s\".\n"
- "Check your installation.",
+ pg_fatal("program \"%s\" is needed by %s but was not found in the same directory as \"%s\"",
"postgres", progname, full_path);
else
- pg_fatal("The program \"%s\" was found by \"%s\"\n"
- "but was not the same version as %s.\n"
- "Check your installation.",
+ pg_fatal("program \"%s\" was found by \"%s\" but was not the same version as %s",
"postgres", full_path, progname);
}
if (system(postgres_cmd->data) != 0)
{
pg_log_error("postgres single-user mode in target cluster failed");
- pg_fatal("Command was: %s", postgres_cmd->data);
+ pg_log_error_detail("Command was: %s", postgres_cmd->data);
+ exit(1);
}
destroyPQExpBuffer(postgres_cmd);
extern uint64 fetch_size;
extern uint64 fetch_done;
-/* logging support */
-#define pg_fatal(...) do { pg_log_fatal(__VA_ARGS__); exit(1); } while(0)
-
/* in parsexlog.c */
extern void extractPageMap(const char *datadir, XLogRecPtr startpoint,
int tliIndex, XLogRecPtr endpoint,
open my $f, '<', "$standby_pgdata/tst_both_dir/file1";
$last = $_ while (<$f>);
close $f;
-like($last, qr/fatal: size of source file/, "Check error message");
+like($last, qr/error: size of source file/, "Check error message");
done_testing();
{
/* expect a numeric timeline ID as first field of line */
pg_log_error("syntax error in history file: %s", fline);
- pg_log_error("Expected a numeric timeline ID.");
+ pg_log_error_detail("Expected a numeric timeline ID.");
exit(1);
}
if (nfields != 3)
{
pg_log_error("syntax error in history file: %s", fline);
- pg_log_error("Expected a write-ahead log switchpoint location.");
+ pg_log_error_detail("Expected a write-ahead log switchpoint location.");
exit(1);
}
if (entries && tli <= lasttli)
{
pg_log_error("invalid data in history file: %s", fline);
- pg_log_error("Timeline IDs must be in increasing sequence.");
+ pg_log_error_detail("Timeline IDs must be in increasing sequence.");
exit(1);
}
if (entries && targetTLI <= lasttli)
{
pg_log_error("invalid data in history file");
- pg_log_error("Timeline IDs must be less than child timeline's ID.");
+ pg_log_error_detail("Timeline IDs must be less than child timeline's ID.");
exit(1);
}
alarm_triggered = false; \
if (CreateThread(NULL, 0, process_alarm, NULL, 0, NULL) == \
INVALID_HANDLE_VALUE) \
- { \
- pg_log_error("could not create thread for alarm"); \
- exit(1); \
- } \
+ pg_fatal("could not create thread for alarm"); \
gettimeofday(&start_t, NULL); \
} while (0)
#endif
#endif
static void print_elapse(struct timeval start_t, struct timeval stop_t, int ops);
-#define die(msg) do { pg_log_error("%s: %m", _(msg)); exit(1); } while(0)
+#define die(msg) pg_fatal("%s: %m", _(msg))
int
errno != 0 || optval != (unsigned int) optval)
{
pg_log_error("invalid argument for option %s", "--secs-per-test");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
secs_per_test = (unsigned int) optval;
if (secs_per_test == 0)
- {
- pg_log_error("%s must be in range %u..%u",
- "--secs-per-test", 1, UINT_MAX);
- exit(1);
- }
+ pg_fatal("%s must be in range %u..%u",
+ "--secs-per-test", 1, UINT_MAX);
break;
default:
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ /* getopt_long already emitted a complaint */
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
- break;
}
}
{
pg_log_error("too many command-line arguments (first is \"%s\")",
argv[optind]);
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
#include "libpq-fe.h"
+/* For now, pg_upgrade does not use common/logging.c; use our own pg_fatal */
+#undef pg_fatal
+
/* Use port in the private/dynamic port number range */
#define DEF_PGUPORT 50432
canonicalize_path(wal_directory);
break;
default:
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ /* getopt_long already emitted a complaint */
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
}
/* Get backup directory name */
if (optind >= argc)
{
- pg_log_fatal("no backup directory specified");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ pg_log_error("no backup directory specified");
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
context.backup_directory = pstrdup(argv[optind++]);
/* Complain if any arguments remain */
if (optind < argc)
{
- pg_log_fatal("too many command-line arguments (first is \"%s\")",
+ pg_log_error("too many command-line arguments (first is \"%s\")",
argv[optind]);
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
if (find_my_exec(argv[0], full_path) < 0)
strlcpy(full_path, progname, sizeof(full_path));
+
if (ret == -1)
- pg_log_fatal("The program \"%s\" is needed by %s but was not found in the\n"
- "same directory as \"%s\".\n"
- "Check your installation.",
- "pg_waldump", "pg_verifybackup", full_path);
+ pg_fatal("program \"%s\" is needed by %s but was not found in the same directory as \"%s\"",
+ "pg_waldump", "pg_verifybackup", full_path);
else
- pg_log_fatal("The program \"%s\" was found by \"%s\"\n"
- "but was not the same version as %s.\n"
- "Check your installation.",
- "pg_waldump", full_path, "pg_verifybackup");
- exit(1);
+ pg_fatal("program \"%s\" was found by \"%s\" but was not the same version as %s",
+ "pg_waldump", full_path, "pg_verifybackup");
}
}
va_list ap;
va_start(ap, fmt);
- pg_log_generic_v(PG_LOG_FATAL, gettext(fmt), ap);
+ pg_log_generic_v(PG_LOG_ERROR, PG_LOG_PRIMARY, gettext(fmt), ap);
va_end(ap);
exit(1);
va_list ap;
va_start(ap, fmt);
- pg_log_generic_v(PG_LOG_ERROR, gettext(fmt), ap);
+ pg_log_generic_v(PG_LOG_ERROR, PG_LOG_PRIMARY, gettext(fmt), ap);
va_end(ap);
context->saw_any_error = true;
va_list ap;
va_start(ap, fmt);
- pg_log_generic_v(PG_LOG_FATAL, gettext(fmt), ap);
+ pg_log_generic_v(PG_LOG_ERROR, PG_LOG_PRIMARY, gettext(fmt), ap);
va_end(ap);
exit(1);
my ($test_name, $manifest_contents) = @_;
- test_bad_manifest($test_name, qr/fatal: $test_name/, $manifest_contents);
+ test_bad_manifest($test_name, qr/error: $test_name/, $manifest_contents);
return;
}
CATALOG_NAME = pg_waldump
AVAIL_LANGUAGES = cs de el es fr ja ko ru sv tr uk vi zh_CN
GETTEXT_FILES = $(FRONTEND_COMMON_GETTEXT_FILES) pg_waldump.c
-GETTEXT_TRIGGERS = $(FRONTEND_COMMON_GETTEXT_TRIGGERS) fatal_error
-GETTEXT_FLAGS = $(FRONTEND_COMMON_GETTEXT_FLAGS) fatal_error:1:c-format
+GETTEXT_TRIGGERS = $(FRONTEND_COMMON_GETTEXT_TRIGGERS)
+GETTEXT_FLAGS = $(FRONTEND_COMMON_GETTEXT_FLAGS)
bool filter_by_fpw;
} XLogDumpConfig;
-#define fatal_error(...) do { pg_log_fatal(__VA_ARGS__); exit(EXIT_FAILURE); } while(0)
/*
* When sigint is called, just tell the system to exit at the next possible
fd = open(fpath, O_RDONLY | PG_BINARY, 0);
if (fd < 0 && errno != ENOENT)
- fatal_error("could not open file \"%s\": %m", fname);
+ pg_fatal("could not open file \"%s\": %m", fname);
return fd;
}
WalSegSz = longhdr->xlp_seg_size;
if (!IsValidWalSegSize(WalSegSz))
- fatal_error(ngettext("WAL segment size must be a power of two between 1 MB and 1 GB, but the WAL file \"%s\" header specifies %d byte",
- "WAL segment size must be a power of two between 1 MB and 1 GB, but the WAL file \"%s\" header specifies %d bytes",
- WalSegSz),
- fname, WalSegSz);
+ pg_fatal(ngettext("WAL segment size must be a power of two between 1 MB and 1 GB, but the WAL file \"%s\" header specifies %d byte",
+ "WAL segment size must be a power of two between 1 MB and 1 GB, but the WAL file \"%s\" header specifies %d bytes",
+ WalSegSz),
+ fname, WalSegSz);
}
else if (r < 0)
- fatal_error("could not read file \"%s\": %m",
- fname);
+ pg_fatal("could not read file \"%s\": %m",
+ fname);
else
- fatal_error("could not read file \"%s\": read %d of %d",
- fname, r, XLOG_BLCKSZ);
+ pg_fatal("could not read file \"%s\": read %d of %d",
+ fname, r, XLOG_BLCKSZ);
close(fd);
return true;
}
/* could not locate WAL file */
if (fname)
- fatal_error("could not locate WAL file \"%s\"", fname);
+ pg_fatal("could not locate WAL file \"%s\"", fname);
else
- fatal_error("could not find any WAL file");
+ pg_fatal("could not find any WAL file");
return NULL; /* not reached */
}
break;
}
- fatal_error("could not find file \"%s\": %m", fname);
+ pg_fatal("could not find file \"%s\": %m", fname);
}
/*
if (errinfo.wre_errno != 0)
{
errno = errinfo.wre_errno;
- fatal_error("could not read from file %s, offset %d: %m",
- fname, errinfo.wre_off);
+ pg_fatal("could not read from file %s, offset %d: %m",
+ fname, errinfo.wre_off);
}
else
- fatal_error("could not read from file %s, offset %d: read %d of %d",
- fname, errinfo.wre_off, errinfo.wre_read,
- errinfo.wre_req);
+ pg_fatal("could not read from file %s, offset %d: read %d of %d",
+ fname, errinfo.wre_off, errinfo.wre_read,
+ errinfo.wre_req);
}
return count;
waldir = directory;
if (!verify_directory(waldir))
- fatal_error("could not open directory \"%s\": %m", waldir);
+ pg_fatal("could not open directory \"%s\": %m", waldir);
}
waldir = identify_target_directory(waldir, fname);
fd = open_file_in_directory(waldir, fname);
if (fd < 0)
- fatal_error("could not open file \"%s\"", fname);
+ pg_fatal("could not open file \"%s\"", fname);
close(fd);
/* parse position from file */
fd = open_file_in_directory(waldir, fname);
if (fd < 0)
- fatal_error("could not open file \"%s\"", fname);
+ pg_fatal("could not open file \"%s\"", fname);
close(fd);
/* parse position from file */
XLogFromFileName(fname, &private.timeline, &endsegno, WalSegSz);
if (endsegno < segno)
- fatal_error("ENDSEG %s is before STARTSEG %s",
- argv[optind + 1], argv[optind]);
+ pg_fatal("ENDSEG %s is before STARTSEG %s",
+ argv[optind + 1], argv[optind]);
if (XLogRecPtrIsInvalid(private.endptr))
XLogSegNoOffsetToRecPtr(endsegno + 1, 0, WalSegSz,
.segment_close = WALDumpCloseSegment),
&private);
if (!xlogreader_state)
- fatal_error("out of memory while allocating a WAL reading processor");
+ pg_fatal("out of memory while allocating a WAL reading processor");
/* first find a valid recptr to start from */
first_record = XLogFindNextRecord(xlogreader_state, private.startptr);
if (first_record == InvalidXLogRecPtr)
- fatal_error("could not find a valid record after %X/%X",
- LSN_FORMAT_ARGS(private.startptr));
+ pg_fatal("could not find a valid record after %X/%X",
+ LSN_FORMAT_ARGS(private.startptr));
/*
* Display a message that we're skipping data if `from` wasn't a pointer
exit(0);
if (errormsg)
- fatal_error("error in WAL record at %X/%X: %s",
- LSN_FORMAT_ARGS(xlogreader_state->ReadRecPtr),
- errormsg);
+ pg_fatal("error in WAL record at %X/%X: %s",
+ LSN_FORMAT_ARGS(xlogreader_state->ReadRecPtr),
+ errormsg);
XLogReaderFree(xlogreader_state);
return EXIT_SUCCESS;
bad_argument:
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
return EXIT_FAILURE;
}
break;
default:
/* internal error which should never occur */
- pg_log_fatal("unexpected error status: %d", estatus);
- exit(1);
+ pg_fatal("unexpected error status: %d", estatus);
}
}
res = PQexec(con, sql);
if (PQresultStatus(res) != PGRES_COMMAND_OK)
{
- pg_log_fatal("query failed: %s", PQerrorMessage(con));
- pg_log_info("query was: %s", sql);
+ pg_log_error("query failed: %s", PQerrorMessage(con));
+ pg_log_error_detail("Query was: %s", sql);
exit(1);
}
PQclear(res);
if (PQresultStatus(res) != PGRES_COMMAND_OK)
{
pg_log_error("%s", PQerrorMessage(con));
- pg_log_info("(ignoring this error and continuing anyway)");
+ pg_log_error_detail("(ignoring this error and continuing anyway)");
}
PQclear(res);
}
default:
/* internal error which should never occur */
- pg_log_fatal("unexpected enode type in evaluation: %d", expr->etype);
- exit(1);
+ pg_fatal("unexpected enode type in evaluation: %d", expr->etype);
}
}
return "deadlock";
default:
/* internal error which should never occur */
- pg_log_fatal("unexpected error status: %d", estatus);
- exit(1);
+ pg_fatal("unexpected error status: %d", estatus);
}
}
else
res = PQexec(con, copy_statement);
if (PQresultStatus(res) != PGRES_COPY_IN)
- {
- pg_log_fatal("unexpected copy in result: %s", PQerrorMessage(con));
- exit(1);
- }
+ pg_fatal("unexpected copy in result: %s", PQerrorMessage(con));
PQclear(res);
start = pg_time_now();
INT64_FORMAT "\t" INT64_FORMAT "\t%d\t\n",
j, k / naccounts + 1, 0);
if (PQputline(con, sql.data))
- {
- pg_log_fatal("PQputline failed");
- exit(1);
- }
+ pg_fatal("PQputline failed");
if (CancelRequested)
break;
fputc('\n', stderr); /* Need to move to next line */
if (PQputline(con, "\\.\n"))
- {
- pg_log_fatal("very last PQputline failed");
- exit(1);
- }
+ pg_fatal("very last PQputline failed");
if (PQendcopy(con))
- {
- pg_log_fatal("PQendcopy failed");
- exit(1);
- }
+ pg_fatal("PQendcopy failed");
termPQExpBuffer(&sql);
checkInitSteps(const char *initialize_steps)
{
if (initialize_steps[0] == '\0')
- {
- pg_log_fatal("no initialization steps specified");
- exit(1);
- }
+ pg_fatal("no initialization steps specified");
for (const char *step = initialize_steps; *step != '\0'; step++)
{
if (strchr(ALL_INIT_STEPS " ", *step) == NULL)
{
- pg_log_fatal("unrecognized initialization step \"%c\"", *step);
- pg_log_info("Allowed step characters are: \"" ALL_INIT_STEPS "\".");
+ pg_log_error("unrecognized initialization step \"%c\"", *step);
+ pg_log_error_detail("Allowed step characters are: \"" ALL_INIT_STEPS "\".");
exit(1);
}
}
initPQExpBuffer(&stats);
if ((con = doConnect()) == NULL)
- {
- pg_log_fatal("could not create connection for initialization");
- exit(1);
- }
+ pg_fatal("could not create connection for initialization");
setup_cancel_handler(NULL);
SetCancelConn(con);
case ' ':
break; /* ignore */
default:
- pg_log_fatal("unrecognized initialization step \"%c\"", *step);
+ pg_log_error("unrecognized initialization step \"%c\"", *step);
PQfinish(con);
exit(1);
}
{
char *sqlState = PQresultErrorField(res, PG_DIAG_SQLSTATE);
- pg_log_fatal("could not count number of branches: %s", PQerrorMessage(con));
+ pg_log_error("could not count number of branches: %s", PQerrorMessage(con));
if (sqlState && strcmp(sqlState, ERRCODE_UNDEFINED_TABLE) == 0)
- pg_log_info("Perhaps you need to do initialization (\"pgbench -i\") in database \"%s\"",
- PQdb(con));
+ pg_log_error_hint("Perhaps you need to do initialization (\"pgbench -i\") in database \"%s\".",
+ PQdb(con));
exit(1);
}
scale = atoi(PQgetvalue(res, 0, 0));
if (scale < 0)
- {
- pg_log_fatal("invalid count(*) from pgbench_branches: \"%s\"",
- PQgetvalue(res, 0, 0));
- exit(1);
- }
+ pg_fatal("invalid count(*) from pgbench_branches: \"%s\"",
+ PQgetvalue(res, 0, 0));
PQclear(res);
/* warn if we override user-given -s switch */
* This case is unlikely as pgbench already found "pgbench_branches"
* above to compute the scale.
*/
- pg_log_fatal("no pgbench_accounts table found in search_path");
- pg_log_info("Perhaps you need to do initialization (\"pgbench -i\") in database \"%s\".", PQdb(con));
+ pg_log_error("no pgbench_accounts table found in search_path");
+ pg_log_error_hint("Perhaps you need to do initialization (\"pgbench -i\") in database \"%s\".", PQdb(con));
exit(1);
}
else /* PQntupes(res) == 1 */
else
{
/* possibly a newer version with new partition method */
- pg_log_fatal("unexpected partition method: \"%s\"", ps);
- exit(1);
+ pg_fatal("unexpected partition method: \"%s\"", ps);
}
}
if (command != NULL)
appendPQExpBuffer(&buf, " in command \"%s\"", command);
- pg_log_fatal("%s", buf.data);
+ pg_log_error("%s", buf.data);
termPQExpBuffer(&buf);
static void
ConditionError(const char *desc, int cmdn, const char *msg)
{
- pg_log_fatal("condition error in script \"%s\" command %d: %s",
- desc, cmdn, msg);
- exit(1);
+ pg_fatal("condition error in script \"%s\" command %d: %s",
+ desc, cmdn, msg);
}
/*
if (strcmp(filename, "-") == 0)
fd = stdin;
else if ((fd = fopen(filename, "r")) == NULL)
- {
- pg_log_fatal("could not open file \"%s\": %m", filename);
- exit(1);
- }
+ pg_fatal("could not open file \"%s\": %m", filename);
buf = read_file_contents(fd);
if (ferror(fd))
- {
- pg_log_fatal("could not read file \"%s\": %m", filename);
- exit(1);
- }
+ pg_fatal("could not read file \"%s\": %m", filename);
if (fd != stdin)
fclose(fd);
/* error cases */
if (found == 0)
- pg_log_fatal("no builtin script found for name \"%s\"", name);
+ pg_log_error("no builtin script found for name \"%s\"", name);
else /* found > 1 */
- pg_log_fatal("ambiguous builtin name: %d builtin scripts found for prefix \"%s\"", found, name);
+ pg_log_error("ambiguous builtin name: %d builtin scripts found for prefix \"%s\"", found, name);
listAvailableScripts();
exit(1);
errno = 0;
wtmp = strtol(sep + 1, &badp, 10);
if (errno != 0 || badp == sep + 1 || *badp != '\0')
- {
- pg_log_fatal("invalid weight specification: %s", sep);
- exit(1);
- }
+ pg_fatal("invalid weight specification: %s", sep);
if (wtmp > INT_MAX || wtmp < 0)
- {
- pg_log_fatal("weight specification out of range (0 .. %d): %lld",
- INT_MAX, (long long) wtmp);
- exit(1);
- }
+ pg_fatal("weight specification out of range (0 .. %d): %lld",
+ INT_MAX, (long long) wtmp);
weight = wtmp;
}
else
addScript(const ParsedScript *script)
{
if (script->commands == NULL || script->commands[0] == NULL)
- {
- pg_log_fatal("empty command list for script \"%s\"", script->desc);
- exit(1);
- }
+ pg_fatal("empty command list for script \"%s\"", script->desc);
if (num_scripts >= MAX_SCRIPTS)
- {
- pg_log_fatal("at most %d SQL scripts are allowed", MAX_SCRIPTS);
- exit(1);
- }
+ pg_fatal("at most %d SQL scripts are allowed", MAX_SCRIPTS);
CheckConditional(script);
if (sscanf(seed, "%lu%c", &ulseed, &garbage) != 1)
{
pg_log_error("unrecognized random seed option \"%s\"", seed);
- pg_log_info("Expecting an unsigned integer, \"time\" or \"rand\"");
+ pg_log_error_detail("Expecting an unsigned integer, \"time\" or \"rand\".");
return false;
}
iseed = (uint64) ulseed;
/* set random seed early, because it may be used while parsing scripts. */
if (!set_random_seed(getenv("PGBENCH_RANDOM_SEED")))
- {
- pg_log_fatal("error while setting random seed from PGBENCH_RANDOM_SEED environment variable");
- exit(1);
- }
+ pg_fatal("error while setting random seed from PGBENCH_RANDOM_SEED environment variable");
while ((c = getopt_long(argc, argv, "iI:h:nvp:dqb:SNc:j:Crs:t:T:U:lf:D:F:M:P:R:L:", long_options, &optindex)) != -1)
{
#else /* but BSD doesn't ... */
if (getrlimit(RLIMIT_OFILE, &rlim) == -1)
#endif /* RLIMIT_NOFILE */
- {
- pg_log_fatal("getrlimit failed: %m");
- exit(1);
- }
+ pg_fatal("getrlimit failed: %m");
if (rlim.rlim_cur < nclients + 3)
{
- pg_log_fatal("need at least %d open files, but system limit is %ld",
+ pg_log_error("need at least %d open files, but system limit is %ld",
nclients + 3, (long) rlim.rlim_cur);
- pg_log_info("Reduce number of clients, or use limit/ulimit to increase the system limit.");
+ pg_log_error_hint("Reduce number of clients, or use limit/ulimit to increase the system limit.");
exit(1);
}
#endif /* HAVE_GETRLIMIT */
}
#ifndef ENABLE_THREAD_SAFETY
if (nthreads != 1)
- {
- pg_log_fatal("threads are not supported on this platform; use -j1");
- exit(1);
- }
+ pg_fatal("threads are not supported on this platform; use -j1");
#endif /* !ENABLE_THREAD_SAFETY */
break;
case 'C':
benchmarking_option_set = true;
if ((p = strchr(optarg, '=')) == NULL || p == optarg || *(p + 1) == '\0')
- {
- pg_log_fatal("invalid variable definition: \"%s\"", optarg);
- exit(1);
- }
+ pg_fatal("invalid variable definition: \"%s\"", optarg);
*p++ = '\0';
if (!putVariable(&state[0].variables, "option", optarg, p))
if (strcmp(optarg, QUERYMODE[querymode]) == 0)
break;
if (querymode >= NUM_QUERYMODE)
- {
- pg_log_fatal("invalid query mode (-M): \"%s\"", optarg);
- exit(1);
- }
+ pg_fatal("invalid query mode (-M): \"%s\"", optarg);
break;
case 'P':
benchmarking_option_set = true;
benchmarking_option_set = true;
if (throttle_value <= 0.0)
- {
- pg_log_fatal("invalid rate limit: \"%s\"", optarg);
- exit(1);
- }
+ pg_fatal("invalid rate limit: \"%s\"", optarg);
/* Invert rate limit into per-transaction delay in usec */
throttle_delay = 1000000.0 / throttle_value;
}
double limit_ms = atof(optarg);
if (limit_ms <= 0.0)
- {
- pg_log_fatal("invalid latency limit: \"%s\"", optarg);
- exit(1);
- }
+ pg_fatal("invalid latency limit: \"%s\"", optarg);
benchmarking_option_set = true;
latency_limit = (int64) (limit_ms * 1000);
}
benchmarking_option_set = true;
sample_rate = atof(optarg);
if (sample_rate <= 0.0 || sample_rate > 1.0)
- {
- pg_log_fatal("invalid sampling rate: \"%s\"", optarg);
- exit(1);
- }
+ pg_fatal("invalid sampling rate: \"%s\"", optarg);
break;
case 5: /* aggregate-interval */
benchmarking_option_set = true;
case 9: /* random-seed */
benchmarking_option_set = true;
if (!set_random_seed(optarg))
- {
- pg_log_fatal("error while setting random seed from --random-seed option");
- exit(1);
- }
+ pg_fatal("error while setting random seed from --random-seed option");
break;
case 10: /* list */
{
else if (pg_strcasecmp(optarg, "hash") == 0)
partition_method = PART_HASH;
else
- {
- pg_log_fatal("invalid partition method, expecting \"range\" or \"hash\", got: \"%s\"",
- optarg);
- exit(1);
- }
+ pg_fatal("invalid partition method, expecting \"range\" or \"hash\", got: \"%s\"",
+ optarg);
break;
case 13: /* failures-detailed */
benchmarking_option_set = true;
int32 max_tries_arg = atoi(optarg);
if (max_tries_arg < 0)
- {
- pg_log_fatal("invalid number of maximum tries: \"%s\"", optarg);
- exit(1);
- }
+ pg_fatal("invalid number of maximum tries: \"%s\"", optarg);
benchmarking_option_set = true;
max_tries = (uint32) max_tries_arg;
verbose_errors = true;
break;
default:
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
+ /* getopt_long already emitted a complaint */
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
- break;
}
}
}
if (total_weight == 0 && !is_init_mode)
- {
- pg_log_fatal("total script weight must not be zero");
- exit(1);
- }
+ pg_fatal("total script weight must not be zero");
/* show per script stats if several scripts are used */
if (num_scripts > 1)
if (optind < argc)
{
- pg_log_fatal("too many command-line arguments (first is \"%s\")",
+ pg_log_error("too many command-line arguments (first is \"%s\")",
argv[optind]);
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
if (is_init_mode)
{
if (benchmarking_option_set)
- {
- pg_log_fatal("some of the specified options cannot be used in initialization (-i) mode");
- exit(1);
- }
+ pg_fatal("some of the specified options cannot be used in initialization (-i) mode");
if (partitions == 0 && partition_method != PART_NONE)
- {
- pg_log_fatal("--partition-method requires greater than zero --partitions");
- exit(1);
- }
+ pg_fatal("--partition-method requires greater than zero --partitions");
/* set default method */
if (partitions > 0 && partition_method == PART_NONE)
else
{
if (initialization_option_set)
- {
- pg_log_fatal("some of the specified options cannot be used in benchmarking mode");
- exit(1);
- }
+ pg_fatal("some of the specified options cannot be used in benchmarking mode");
}
if (nxacts > 0 && duration > 0)
- {
- pg_log_fatal("specify either a number of transactions (-t) or a duration (-T), not both");
- exit(1);
- }
+ pg_fatal("specify either a number of transactions (-t) or a duration (-T), not both");
/* Use DEFAULT_NXACTS if neither nxacts nor duration is specified. */
if (nxacts <= 0 && duration <= 0)
/* --sampling-rate may be used only with -l */
if (sample_rate > 0.0 && !use_log)
- {
- pg_log_fatal("log sampling (--sampling-rate) is allowed only when logging transactions (-l)");
- exit(1);
- }
+ pg_fatal("log sampling (--sampling-rate) is allowed only when logging transactions (-l)");
/* --sampling-rate may not be used with --aggregate-interval */
if (sample_rate > 0.0 && agg_interval > 0)
- {
- pg_log_fatal("log sampling (--sampling-rate) and aggregation (--aggregate-interval) cannot be used at the same time");
- exit(1);
- }
+ pg_fatal("log sampling (--sampling-rate) and aggregation (--aggregate-interval) cannot be used at the same time");
if (agg_interval > 0 && !use_log)
- {
- pg_log_fatal("log aggregation is allowed only when actually logging transactions");
- exit(1);
- }
+ pg_fatal("log aggregation is allowed only when actually logging transactions");
if (!use_log && logfile_prefix)
- {
- pg_log_fatal("log file prefix (--log-prefix) is allowed only when logging transactions (-l)");
- exit(1);
- }
+ pg_fatal("log file prefix (--log-prefix) is allowed only when logging transactions (-l)");
if (duration > 0 && agg_interval > duration)
- {
- pg_log_fatal("number of seconds for aggregation (%d) must not be higher than test duration (%d)", agg_interval, duration);
- exit(1);
- }
+ pg_fatal("number of seconds for aggregation (%d) must not be higher than test duration (%d)", agg_interval, duration);
if (duration > 0 && agg_interval > 0 && duration % agg_interval != 0)
- {
- pg_log_fatal("duration (%d) must be a multiple of aggregation interval (%d)", duration, agg_interval);
- exit(1);
- }
+ pg_fatal("duration (%d) must be a multiple of aggregation interval (%d)", duration, agg_interval);
if (progress_timestamp && progress == 0)
- {
- pg_log_fatal("--progress-timestamp is allowed only under --progress");
- exit(1);
- }
+ pg_fatal("--progress-timestamp is allowed only under --progress");
if (!max_tries)
{
if (!latency_limit && duration <= 0)
- {
- pg_log_fatal("an unlimited number of transaction tries can only be used with --latency-limit or a duration (-T)");
- exit(1);
- }
+ pg_fatal("an unlimited number of transaction tries can only be used with --latency-limit or a duration (-T)");
}
/*
/* opening connection... */
con = doConnect();
if (con == NULL)
- {
- pg_log_fatal("could not create connection for setup");
- exit(1);
- }
+ pg_fatal("could not create connection for setup");
/* report pgbench and server versions */
printVersion(con);
errno = THREAD_BARRIER_INIT(&barrier, nthreads);
if (errno != 0)
- {
- pg_log_fatal("could not initialize barrier: %m");
- exit(1);
- }
+ pg_fatal("could not initialize barrier: %m");
#ifdef ENABLE_THREAD_SAFETY
/* start all threads but thread 0 which is executed directly later */
errno = THREAD_CREATE(&thread->thread, threadRun, thread);
if (errno != 0)
- {
- pg_log_fatal("could not create thread: %m");
- exit(1);
- }
+ pg_fatal("could not create thread: %m");
}
#else
Assert(nthreads == 1);
THREAD_BARRIER_DESTROY(&barrier);
if (exit_code != 0)
- pg_log_fatal("Run was aborted; the above results are incomplete.");
+ pg_log_error("Run was aborted; the above results are incomplete.");
return exit_code;
}
thread->logfile = fopen(logpath, "w");
if (thread->logfile == NULL)
- {
- pg_log_fatal("could not open logfile \"%s\": %m", logpath);
- exit(1);
- }
+ pg_fatal("could not open logfile \"%s\": %m", logpath);
}
/* explicitly initialize the state machines */
if ((state[i].con = doConnect()) == NULL)
{
/* coldly abort on initial connection failure */
- pg_log_fatal("could not create connection for client %d",
- state[i].id);
- exit(1);
+ pg_fatal("could not create connection for client %d",
+ state[i].id);
}
}
}
!CreateTimerQueueTimer(&timer, queue,
win32_timer_callback, NULL, seconds * 1000, 0,
WT_EXECUTEINTIMERTHREAD | WT_EXECUTEONLYONCE))
- {
- pg_log_fatal("failed to set timer");
- exit(1);
- }
+ pg_fatal("failed to set timer");
}
#endif /* WIN32 */
* Doing a hard exit here is a bit grotty, but it doesn't seem worth
* complicating the API to make it less grotty.
*/
- pg_log_fatal("too many client connections for select()");
- exit(1);
+ pg_fatal("too many client connections for select()");
}
FD_SET(fd, &sa->fds);
if (fd > sa->maxfd)
{
pg_log_error("invalid command \\%s", cmd);
if (pset.cur_cmd_interactive)
- pg_log_info("Try \\? for help.");
+ pg_log_error_hint("Try \\? for help.");
status = PSQL_CMD_ERROR;
}
{
if (!pset.cur_cmd_interactive)
{
- pg_log_fatal("connection to server was lost");
+ pg_log_error("connection to server was lost");
exit(EXIT_BADCONN);
}
{
user = get_user_name(&errstr);
if (!user)
- {
- pg_log_fatal("%s", errstr);
- exit(EXIT_FAILURE);
- }
+ pg_fatal("%s", errstr);
}
/*
if (PQExpBufferBroken(query_buf) ||
PQExpBufferBroken(previous_buf) ||
PQExpBufferBroken(history_buf))
- {
- pg_log_error("out of memory");
- exit(EXIT_FAILURE);
- }
+ pg_fatal("out of memory");
/* main loop to get queries and execute them */
while (successResult == EXIT_SUCCESS)
prompt_status = prompt_tmp;
if (PQExpBufferBroken(query_buf))
- {
- pg_log_error("out of memory");
- exit(EXIT_FAILURE);
- }
+ pg_fatal("out of memory");
/*
* Increase statement line number counter for each linebreak added
/* Bail out if -1 was specified but will be ignored. */
if (options.single_txn && options.actions.head == NULL)
- {
- pg_log_fatal("-1 can only be used in non-interactive mode");
- exit(EXIT_FAILURE);
- }
+ pg_fatal("-1 can only be used in non-interactive mode");
if (!pset.popt.topt.fieldSep.separator &&
!pset.popt.topt.fieldSep.separator_zero)
{
pset.logfile = fopen(options.logfilename, "a");
if (!pset.logfile)
- {
- pg_log_fatal("could not open log file \"%s\": %m",
- options.logfilename);
- exit(EXIT_FAILURE);
- }
+ pg_fatal("could not open log file \"%s\": %m",
+ options.logfilename);
}
if (!options.no_psqlrc)
}
if (!result)
- {
- pg_log_fatal("could not set printing parameter \"%s\"", value);
- exit(EXIT_FAILURE);
- }
+ pg_fatal("could not set printing parameter \"%s\"", value);
free(value);
break;
break;
default:
unknown_option:
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- pset.progname);
+ /* getopt_long already emitted a complaint */
+ pg_log_error_hint("Try \"%s --help\" for more information.",
+ pset.progname);
exit(EXIT_FAILURE);
- break;
}
}
char *envrc = getenv("PSQLRC");
if (find_my_exec(argv0, my_exec_path) < 0)
- {
- pg_log_fatal("could not find own program executable");
- exit(EXIT_FAILURE);
- }
+ pg_fatal("could not find own program executable");
get_etc_path(my_exec_path, etc_path);
psql:<stdin>:2: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
-psql:<stdin>:2: fatal: connection to server was lost',
+psql:<stdin>:2: error: connection to server was lost',
'server crash: error message');
# test \errverbose
maintenance_db = pg_strdup(optarg);
break;
default:
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
+ /* getopt_long already emitted a complaint */
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
}
{
pg_log_error("too many command-line arguments (first is \"%s\")",
argv[optind]);
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
if (alldb)
{
if (dbname)
- {
- pg_log_error("cannot cluster all databases and a specific one at the same time");
- exit(1);
- }
+ pg_fatal("cannot cluster all databases and a specific one at the same time");
if (tables.head != NULL)
- {
- pg_log_error("cannot cluster specific table(s) in all databases");
- exit(1);
- }
+ pg_fatal("cannot cluster specific table(s) in all databases");
cparams.dbname = maintenance_db;
icu_locale = pg_strdup(optarg);
break;
default:
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
+ /* getopt_long already emitted a complaint */
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
}
default:
pg_log_error("too many command-line arguments (first is \"%s\")",
argv[optind + 2]);
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
if (locale)
{
if (lc_ctype)
- {
- pg_log_error("only one of --locale and --lc-ctype can be specified");
- exit(1);
- }
+ pg_fatal("only one of --locale and --lc-ctype can be specified");
if (lc_collate)
- {
- pg_log_error("only one of --locale and --lc-collate can be specified");
- exit(1);
- }
+ pg_fatal("only one of --locale and --lc-collate can be specified");
lc_ctype = locale;
lc_collate = locale;
}
if (encoding)
{
if (pg_char_to_encoding(encoding) < 0)
- {
- pg_log_error("\"%s\" is not a valid encoding name", encoding);
- exit(1);
- }
+ pg_fatal("\"%s\" is not a valid encoding name", encoding);
}
if (dbname == NULL)
interactive = true;
break;
default:
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
+ /* getopt_long already emitted a complaint */
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
}
default:
pg_log_error("too many command-line arguments (first is \"%s\")",
argv[optind + 1]);
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
newuser,
NULL);
if (!encrypted_password)
- {
- pg_log_error("password encryption failed: %s",
- PQerrorMessage(conn));
- exit(1);
- }
+ pg_fatal("password encryption failed: %s",
+ PQerrorMessage(conn));
appendStringLiteralConn(&sql, encrypted_password, conn);
PQfreemem(encrypted_password);
}
maintenance_db = pg_strdup(optarg);
break;
default:
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
+ /* getopt_long already emitted a complaint */
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
}
{
case 0:
pg_log_error("missing required argument database name");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
case 1:
dbname = argv[optind];
default:
pg_log_error("too many command-line arguments (first is \"%s\")",
argv[optind + 1]);
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
/* this covers the long options */
break;
default:
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
+ /* getopt_long already emitted a complaint */
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
}
default:
pg_log_error("too many command-line arguments (first is \"%s\")",
argv[optind + 1]);
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
else
{
pg_log_error("missing required argument role name");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
}
pguser = pg_strdup(optarg);
break;
default:
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
+ /* getopt_long already emitted a complaint */
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
/*
* We need to make sure we don't return 1 here because someone
{
pg_log_error("too many command-line arguments (first is \"%s\")",
argv[optind]);
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
/*
* We need to make sure we don't return 1 here because someone
tablespace = pg_strdup(optarg);
break;
default:
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
+ /* getopt_long already emitted a complaint */
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
}
{
pg_log_error("too many command-line arguments (first is \"%s\")",
argv[optind]);
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
if (alldb)
{
if (dbname)
- {
- pg_log_error("cannot reindex all databases and a specific one at the same time");
- exit(1);
- }
+ pg_fatal("cannot reindex all databases and a specific one at the same time");
if (syscatalog)
- {
- pg_log_error("cannot reindex all databases and system catalogs at the same time");
- exit(1);
- }
+ pg_fatal("cannot reindex all databases and system catalogs at the same time");
if (schemas.head != NULL)
- {
- pg_log_error("cannot reindex specific schema(s) in all databases");
- exit(1);
- }
+ pg_fatal("cannot reindex specific schema(s) in all databases");
if (tables.head != NULL)
- {
- pg_log_error("cannot reindex specific table(s) in all databases");
- exit(1);
- }
+ pg_fatal("cannot reindex specific table(s) in all databases");
if (indexes.head != NULL)
- {
- pg_log_error("cannot reindex specific index(es) in all databases");
- exit(1);
- }
+ pg_fatal("cannot reindex specific index(es) in all databases");
cparams.dbname = maintenance_db;
else if (syscatalog)
{
if (schemas.head != NULL)
- {
- pg_log_error("cannot reindex specific schema(s) and system catalogs at the same time");
- exit(1);
- }
+ pg_fatal("cannot reindex specific schema(s) and system catalogs at the same time");
if (tables.head != NULL)
- {
- pg_log_error("cannot reindex specific table(s) and system catalogs at the same time");
- exit(1);
- }
+ pg_fatal("cannot reindex specific table(s) and system catalogs at the same time");
if (indexes.head != NULL)
- {
- pg_log_error("cannot reindex specific index(es) and system catalogs at the same time");
- exit(1);
- }
+ pg_fatal("cannot reindex specific index(es) and system catalogs at the same time");
if (concurrentCons > 1)
- {
- pg_log_error("cannot use multiple jobs to reindex system catalogs");
- exit(1);
- }
+ pg_fatal("cannot use multiple jobs to reindex system catalogs");
if (dbname == NULL)
{
* depending on the same relation.
*/
if (concurrentCons > 1 && indexes.head != NULL)
- {
- pg_log_error("cannot use multiple jobs to reindex indexes");
- exit(1);
- }
+ pg_fatal("cannot use multiple jobs to reindex indexes");
if (dbname == NULL)
{
if (concurrently && PQserverVersion(conn) < 120000)
{
PQfinish(conn);
- pg_log_error("cannot use the \"%s\" option on server versions older than PostgreSQL %s",
- "concurrently", "12");
- exit(1);
+ pg_fatal("cannot use the \"%s\" option on server versions older than PostgreSQL %s",
+ "concurrently", "12");
}
if (tablespace && PQserverVersion(conn) < 140000)
{
PQfinish(conn);
- pg_log_error("cannot use the \"%s\" option on server versions older than PostgreSQL %s",
- "tablespace", "14");
- exit(1);
+ pg_fatal("cannot use the \"%s\" option on server versions older than PostgreSQL %s",
+ "tablespace", "14");
}
if (!parallel)
vacopts.process_toast = false;
break;
default:
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
+ /* getopt_long already emitted a complaint */
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
}
{
pg_log_error("too many command-line arguments (first is \"%s\")",
argv[optind]);
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
if (vacopts.analyze_only)
{
if (vacopts.full)
- {
- pg_log_error("cannot use the \"%s\" option when performing only analyze",
- "full");
- exit(1);
- }
+ pg_fatal("cannot use the \"%s\" option when performing only analyze",
+ "full");
if (vacopts.freeze)
- {
- pg_log_error("cannot use the \"%s\" option when performing only analyze",
- "freeze");
- exit(1);
- }
+ pg_fatal("cannot use the \"%s\" option when performing only analyze",
+ "freeze");
if (vacopts.disable_page_skipping)
- {
- pg_log_error("cannot use the \"%s\" option when performing only analyze",
- "disable-page-skipping");
- exit(1);
- }
+ pg_fatal("cannot use the \"%s\" option when performing only analyze",
+ "disable-page-skipping");
if (vacopts.no_index_cleanup)
- {
- pg_log_error("cannot use the \"%s\" option when performing only analyze",
- "no-index-cleanup");
- exit(1);
- }
+ pg_fatal("cannot use the \"%s\" option when performing only analyze",
+ "no-index-cleanup");
if (vacopts.force_index_cleanup)
- {
- pg_log_error("cannot use the \"%s\" option when performing only analyze",
- "force-index-cleanup");
- exit(1);
- }
+ pg_fatal("cannot use the \"%s\" option when performing only analyze",
+ "force-index-cleanup");
if (!vacopts.do_truncate)
- {
- pg_log_error("cannot use the \"%s\" option when performing only analyze",
- "no-truncate");
- exit(1);
- }
+ pg_fatal("cannot use the \"%s\" option when performing only analyze",
+ "no-truncate");
if (!vacopts.process_toast)
- {
- pg_log_error("cannot use the \"%s\" option when performing only analyze",
- "no-process-toast");
- exit(1);
- }
+ pg_fatal("cannot use the \"%s\" option when performing only analyze",
+ "no-process-toast");
/* allow 'and_analyze' with 'analyze_only' */
}
if (vacopts.parallel_workers >= 0)
{
if (vacopts.analyze_only)
- {
- pg_log_error("cannot use the \"%s\" option when performing only analyze",
- "parallel");
- exit(1);
- }
+ pg_fatal("cannot use the \"%s\" option when performing only analyze",
+ "parallel");
if (vacopts.full)
- {
- pg_log_error("cannot use the \"%s\" option when performing full vacuum",
- "parallel");
- exit(1);
- }
+ pg_fatal("cannot use the \"%s\" option when performing full vacuum",
+ "parallel");
}
/* Prohibit --no-index-cleanup and --force-index-cleanup together */
if (vacopts.no_index_cleanup && vacopts.force_index_cleanup)
- {
- pg_log_error("cannot use the \"%s\" option with the \"%s\" option",
- "no-index-cleanup", "force-index-cleanup");
- exit(1);
- }
+ pg_fatal("cannot use the \"%s\" option with the \"%s\" option",
+ "no-index-cleanup", "force-index-cleanup");
/* fill cparams except for dbname, which is set below */
cparams.pghost = host;
if (alldb)
{
if (dbname)
- {
- pg_log_error("cannot vacuum all databases and a specific one at the same time");
- exit(1);
- }
+ pg_fatal("cannot vacuum all databases and a specific one at the same time");
if (tables.head != NULL)
- {
- pg_log_error("cannot vacuum specific table(s) in all databases");
- exit(1);
- }
+ pg_fatal("cannot vacuum specific table(s) in all databases");
cparams.dbname = maintenance_db;
if (vacopts->disable_page_skipping && PQserverVersion(conn) < 90600)
{
PQfinish(conn);
- pg_log_error("cannot use the \"%s\" option on server versions older than PostgreSQL %s",
- "disable-page-skipping", "9.6");
- exit(1);
+ pg_fatal("cannot use the \"%s\" option on server versions older than PostgreSQL %s",
+ "disable-page-skipping", "9.6");
}
if (vacopts->no_index_cleanup && PQserverVersion(conn) < 120000)
{
PQfinish(conn);
- pg_log_error("cannot use the \"%s\" option on server versions older than PostgreSQL %s",
- "no-index-cleanup", "12");
- exit(1);
+ pg_fatal("cannot use the \"%s\" option on server versions older than PostgreSQL %s",
+ "no-index-cleanup", "12");
}
if (vacopts->force_index_cleanup && PQserverVersion(conn) < 120000)
{
PQfinish(conn);
- pg_log_error("cannot use the \"%s\" option on server versions older than PostgreSQL %s",
- "force-index-cleanup", "12");
- exit(1);
+ pg_fatal("cannot use the \"%s\" option on server versions older than PostgreSQL %s",
+ "force-index-cleanup", "12");
}
if (!vacopts->do_truncate && PQserverVersion(conn) < 120000)
{
PQfinish(conn);
- pg_log_error("cannot use the \"%s\" option on server versions older than PostgreSQL %s",
- "no-truncate", "12");
- exit(1);
+ pg_fatal("cannot use the \"%s\" option on server versions older than PostgreSQL %s",
+ "no-truncate", "12");
}
if (!vacopts->process_toast && PQserverVersion(conn) < 140000)
{
PQfinish(conn);
- pg_log_error("cannot use the \"%s\" option on server versions older than PostgreSQL %s",
- "no-process-toast", "14");
- exit(1);
+ pg_fatal("cannot use the \"%s\" option on server versions older than PostgreSQL %s",
+ "no-process-toast", "14");
}
if (vacopts->skip_locked && PQserverVersion(conn) < 120000)
{
PQfinish(conn);
- pg_log_error("cannot use the \"%s\" option on server versions older than PostgreSQL %s",
- "skip-locked", "12");
- exit(1);
+ pg_fatal("cannot use the \"%s\" option on server versions older than PostgreSQL %s",
+ "skip-locked", "12");
}
if (vacopts->min_xid_age != 0 && PQserverVersion(conn) < 90600)
- {
- pg_log_error("cannot use the \"%s\" option on server versions older than PostgreSQL %s",
- "--min-xid-age", "9.6");
- exit(1);
- }
+ pg_fatal("cannot use the \"%s\" option on server versions older than PostgreSQL %s",
+ "--min-xid-age", "9.6");
if (vacopts->min_mxid_age != 0 && PQserverVersion(conn) < 90600)
- {
- pg_log_error("cannot use the \"%s\" option on server versions older than PostgreSQL %s",
- "--min-mxid-age", "9.6");
- exit(1);
- }
+ pg_fatal("cannot use the \"%s\" option on server versions older than PostgreSQL %s",
+ "--min-mxid-age", "9.6");
if (vacopts->parallel_workers >= 0 && PQserverVersion(conn) < 130000)
- {
- pg_log_error("cannot use the \"%s\" option on server versions older than PostgreSQL %s",
- "--parallel", "13");
- exit(1);
- }
+ pg_fatal("cannot use the \"%s\" option on server versions older than PostgreSQL %s",
+ "--parallel", "13");
if (!quiet)
{
ControlFilePath)));
#else
if ((fd = open(ControlFilePath, O_RDONLY | PG_BINARY, 0)) == -1)
- {
- pg_log_fatal("could not open file \"%s\" for reading: %m",
- ControlFilePath);
- exit(EXIT_FAILURE);
- }
+ pg_fatal("could not open file \"%s\" for reading: %m",
+ ControlFilePath);
#endif
r = read(fd, ControlFile, sizeof(ControlFileData));
(errcode_for_file_access(),
errmsg("could not read file \"%s\": %m", ControlFilePath)));
#else
- {
- pg_log_fatal("could not read file \"%s\": %m", ControlFilePath);
- exit(EXIT_FAILURE);
- }
+ pg_fatal("could not read file \"%s\": %m", ControlFilePath);
#endif
else
#ifndef FRONTEND
errmsg("could not read file \"%s\": read %d of %zu",
ControlFilePath, r, sizeof(ControlFileData))));
#else
- {
- pg_log_fatal("could not read file \"%s\": read %d of %zu",
- ControlFilePath, r, sizeof(ControlFileData));
- exit(EXIT_FAILURE);
- }
+ pg_fatal("could not read file \"%s\": read %d of %zu",
+ ControlFilePath, r, sizeof(ControlFileData));
#endif
}
ControlFilePath)));
#else
if (close(fd) != 0)
- {
- pg_log_fatal("could not close file \"%s\": %m", ControlFilePath);
- exit(EXIT_FAILURE);
- }
+ pg_fatal("could not close file \"%s\": %m", ControlFilePath);
#endif
/* Check the CRC. */
#else
if ((fd = open(ControlFilePath, O_WRONLY | PG_BINARY,
pg_file_create_mode)) == -1)
- {
- pg_log_fatal("could not open file \"%s\": %m", ControlFilePath);
- exit(EXIT_FAILURE);
- }
+ pg_fatal("could not open file \"%s\": %m", ControlFilePath);
#endif
errno = 0;
errmsg("could not write file \"%s\": %m",
ControlFilePath)));
#else
- pg_log_fatal("could not write file \"%s\": %m", ControlFilePath);
- exit(EXIT_FAILURE);
+ pg_fatal("could not write file \"%s\": %m", ControlFilePath);
#endif
}
#ifndef FRONTEND
pgstat_report_wait_end();
#else
if (fsync(fd) != 0)
- {
- pg_log_fatal("could not fsync file \"%s\": %m", ControlFilePath);
- exit(EXIT_FAILURE);
- }
+ pg_fatal("could not fsync file \"%s\": %m", ControlFilePath);
#endif
}
errmsg("could not close file \"%s\": %m",
ControlFilePath)));
#else
- pg_log_fatal("could not close file \"%s\": %m", ControlFilePath);
- exit(EXIT_FAILURE);
+ pg_fatal("could not close file \"%s\": %m", ControlFilePath);
#endif
}
}
*/
if (returncode != 0 && !(isdir && (errno == EBADF || errno == EINVAL)))
{
- pg_log_fatal("could not fsync file \"%s\": %m", fname);
+ pg_log_error("could not fsync file \"%s\": %m", fname);
(void) close(fd);
exit(EXIT_FAILURE);
}
{
if (fsync(fd) != 0)
{
- pg_log_fatal("could not fsync file \"%s\": %m", newfile);
+ pg_log_error("could not fsync file \"%s\": %m", newfile);
close(fd);
exit(EXIT_FAILURE);
}
{
result = PGFILETYPE_ERROR;
#ifdef FRONTEND
- pg_log_generic(elevel, "could not stat file \"%s\": %m", path);
+ pg_log_generic(elevel, PG_LOG_PRIMARY, "could not stat file \"%s\": %m", path);
#else
ereport(elevel,
(errcode_for_file_access(),
}
}
+/*
+ * Change the logging flags.
+ */
void
pg_logging_config(int new_flags)
{
}
void
-pg_log_generic(enum pg_log_level level, const char *pg_restrict fmt,...)
+pg_log_generic(enum pg_log_level level, enum pg_log_part part,
+ const char *pg_restrict fmt,...)
{
va_list ap;
va_start(ap, fmt);
- pg_log_generic_v(level, fmt, ap);
+ pg_log_generic_v(level, part, fmt, ap);
va_end(ap);
}
void
-pg_log_generic_v(enum pg_log_level level, const char *pg_restrict fmt, va_list ap)
+pg_log_generic_v(enum pg_log_level level, enum pg_log_part part,
+ const char *pg_restrict fmt, va_list ap)
{
int save_errno = errno;
const char *filename = NULL;
fmt = _(fmt);
- if (!(log_flags & PG_LOG_FLAG_TERSE) || filename)
+ if (part == PG_LOG_PRIMARY &&
+ (!(log_flags & PG_LOG_FLAG_TERSE) || filename))
{
if (sgr_locus)
fprintf(stderr, ANSI_ESCAPE_FMT, sgr_locus);
if (!(log_flags & PG_LOG_FLAG_TERSE))
{
- switch (level)
+ switch (part)
{
- case PG_LOG_FATAL:
- if (sgr_error)
- fprintf(stderr, ANSI_ESCAPE_FMT, sgr_error);
- fprintf(stderr, _("fatal: "));
- if (sgr_error)
- fprintf(stderr, ANSI_ESCAPE_RESET);
- break;
- case PG_LOG_ERROR:
- if (sgr_error)
- fprintf(stderr, ANSI_ESCAPE_FMT, sgr_error);
- fprintf(stderr, _("error: "));
- if (sgr_error)
- fprintf(stderr, ANSI_ESCAPE_RESET);
+ case PG_LOG_PRIMARY:
+ switch (level)
+ {
+ case PG_LOG_ERROR:
+ if (sgr_error)
+ fprintf(stderr, ANSI_ESCAPE_FMT, sgr_error);
+ fprintf(stderr, _("error: "));
+ if (sgr_error)
+ fprintf(stderr, ANSI_ESCAPE_RESET);
+ break;
+ case PG_LOG_WARNING:
+ if (sgr_warning)
+ fprintf(stderr, ANSI_ESCAPE_FMT, sgr_warning);
+ fprintf(stderr, _("warning: "));
+ if (sgr_warning)
+ fprintf(stderr, ANSI_ESCAPE_RESET);
+ break;
+ default:
+ break;
+ }
break;
- case PG_LOG_WARNING:
- if (sgr_warning)
- fprintf(stderr, ANSI_ESCAPE_FMT, sgr_warning);
- fprintf(stderr, _("warning: "));
- if (sgr_warning)
- fprintf(stderr, ANSI_ESCAPE_RESET);
+ case PG_LOG_DETAIL:
+ fprintf(stderr, _("detail: "));
break;
- default:
+ case PG_LOG_HINT:
+ fprintf(stderr, _("hint: "));
break;
}
}
WaitForSingleObject(pi.hProcess, INFINITE);
if (!GetExitCodeProcess(pi.hProcess, &x))
- {
- pg_log_error("could not get exit code from subprocess: error code %lu", GetLastError());
- exit(1);
- }
+ pg_fatal("could not get exit code from subprocess: error code %lu", GetLastError());
exit(x);
}
pg_free(cmdline);
xlogRestoreCmd = BuildRestoreCommand(restoreCommand, xlogpath,
xlogfname, NULL);
if (xlogRestoreCmd == NULL)
- {
- pg_log_fatal("cannot use restore_command with %%r placeholder");
- exit(1);
- }
+ pg_fatal("cannot use restore_command with %%r placeholder");
/*
* Execute restore_command, which should copy the missing file from
if (stat(xlogpath, &stat_buf) == 0)
{
if (expectedSize > 0 && stat_buf.st_size != expectedSize)
- {
- pg_log_fatal("unexpected file size for \"%s\": %lld instead of %lld",
- xlogfname, (long long int) stat_buf.st_size,
- (long long int) expectedSize);
- exit(1);
- }
+ pg_fatal("unexpected file size for \"%s\": %lld instead of %lld",
+ xlogfname, (long long int) stat_buf.st_size,
+ (long long int) expectedSize);
else
{
int xlogfd = open(xlogpath, O_RDONLY | PG_BINARY, 0);
if (xlogfd < 0)
- {
- pg_log_fatal("could not open file \"%s\" restored from archive: %m",
- xlogpath);
- exit(1);
- }
+ pg_fatal("could not open file \"%s\" restored from archive: %m",
+ xlogpath);
else
return xlogfd;
}
else
{
if (errno != ENOENT)
- {
- pg_log_fatal("could not stat file \"%s\": %m",
- xlogpath);
- exit(1);
- }
+ pg_fatal("could not stat file \"%s\": %m",
+ xlogpath);
}
}
* fatal too.
*/
if (wait_result_is_any_signal(rc, true))
- {
- pg_log_fatal("restore_command failed: %s",
- wait_result_to_str(rc));
- exit(1);
- }
+ pg_fatal("restore_command failed: %s",
+ wait_result_to_str(rc));
/*
* The file is not available, so just let the caller decide what to do
conn = PQconnectdbParams(keywords, values, true);
if (!conn)
- {
- pg_log_error("could not connect to database %s: out of memory",
- cparams->dbname);
- exit(1);
- }
+ pg_fatal("could not connect to database %s: out of memory",
+ cparams->dbname);
/*
* No luck? Trying asking (again) for a password.
PQfinish(conn);
return NULL;
}
- pg_log_error("%s", PQerrorMessage(conn));
- exit(1);
+ pg_fatal("%s", PQerrorMessage(conn));
}
/* Start strict; callers may override this. */
sa->cparams->override_dbname = old_override;
if (PQsocket(slot->connection) >= FD_SETSIZE)
- {
- pg_log_fatal("too many jobs for this platform");
- exit(1);
- }
+ pg_fatal("too many jobs for this platform");
/* Setup the connection using the supplied command, if any. */
if (sa->initcmd)
PQresultStatus(res) != PGRES_TUPLES_OK)
{
pg_log_error("query failed: %s", PQerrorMessage(conn));
- pg_log_info("query was: %s", query);
+ pg_log_error_detail("Query was: %s", query);
PQfinish(conn);
exit(1);
}
PQresultStatus(res) != PGRES_COMMAND_OK)
{
pg_log_error("query failed: %s", PQerrorMessage(conn));
- pg_log_info("query was: %s", query);
+ pg_log_error_detail("Query was: %s", query);
PQfinish(conn);
exit(1);
}
contents = createPQExpBuffer();
if (!contents)
- {
- pg_log_error("out of memory");
- exit(1);
- }
+ pg_fatal("out of memory");
/*
* In PostgreSQL 12 and newer versions, standby_mode is gone, replaced by
connOptions = PQconninfo(pgconn);
if (connOptions == NULL)
- {
- pg_log_error("out of memory");
- exit(1);
- }
+ pg_fatal("out of memory");
initPQExpBuffer(&conninfo_buf);
for (PQconninfoOption *opt = connOptions; opt && opt->keyword; opt++)
appendConnStrVal(&conninfo_buf, opt->val);
}
if (PQExpBufferDataBroken(conninfo_buf))
- {
- pg_log_error("out of memory");
- exit(1);
- }
+ pg_fatal("out of memory");
/*
* Escape the connection string, so that it can be put in the config file.
}
if (PQExpBufferBroken(contents))
- {
- pg_log_error("out of memory");
- exit(1);
- }
+ pg_fatal("out of memory");
PQconninfoFree(connOptions);
cf = fopen(filename, use_recovery_conf ? "w" : "a");
if (cf == NULL)
- {
- pg_log_error("could not open file \"%s\": %m", filename);
- exit(1);
- }
+ pg_fatal("could not open file \"%s\": %m", filename);
if (fwrite(contents->data, contents->len, 1, cf) != 1)
- {
- pg_log_error("could not write to file \"%s\": %m", filename);
- exit(1);
- }
+ pg_fatal("could not write to file \"%s\": %m", filename);
fclose(cf);
snprintf(filename, MAXPGPATH, "%s/%s", target_dir, "standby.signal");
cf = fopen(filename, "w");
if (cf == NULL)
- {
- pg_log_error("could not create file \"%s\": %m", filename);
- exit(1);
- }
+ pg_fatal("could not create file \"%s\": %m", filename);
fclose(cf);
}
char *result = escape_single_quotes_ascii(src);
if (!result)
- {
- pg_log_error("out of memory");
- exit(1);
- }
+ pg_fatal("out of memory");
return result;
}
enum pg_log_level
{
/*
- * Not initialized yet
+ * Not initialized yet (not to be used as an actual message log level).
*/
PG_LOG_NOTSET = 0,
PG_LOG_ERROR,
/*
- * Severe errors that cause program termination. (One-shot programs may
- * chose to label even fatal errors as merely "errors". The distinction
- * is up to the program.)
+ * Turn all logging off (not to be used as an actual message log level).
*/
- PG_LOG_FATAL,
+ PG_LOG_OFF,
+};
+
+/*
+ * __pg_log_level is the minimum log level that will actually be shown.
+ */
+extern enum pg_log_level __pg_log_level;
+/*
+ * A log message can have several parts. The primary message is required,
+ * others are optional. When emitting multiple parts, do so in the order of
+ * this enum, for consistency.
+ */
+enum pg_log_part
+{
/*
- * Turn all logging off.
+ * The primary message. Try to keep it to one line; follow the backend's
+ * style guideline for primary messages.
*/
- PG_LOG_OFF,
-};
+ PG_LOG_PRIMARY,
-extern PGDLLIMPORT enum pg_log_level __pg_log_level;
+ /*
+ * Additional detail. Follow the backend's style guideline for detail
+ * messages.
+ */
+ PG_LOG_DETAIL,
+
+ /*
+ * Hint (not guaranteed correct) about how to fix the problem. Follow the
+ * backend's style guideline for hint messages.
+ */
+ PG_LOG_HINT,
+};
/*
* Kind of a hack to be able to produce the psql output exactly as required by
void pg_logging_set_pre_callback(void (*cb) (void));
void pg_logging_set_locus_callback(void (*cb) (const char **filename, uint64 *lineno));
-void pg_log_generic(enum pg_log_level level, const char *pg_restrict fmt,...) pg_attribute_printf(2, 3);
-void pg_log_generic_v(enum pg_log_level level, const char *pg_restrict fmt, va_list ap) pg_attribute_printf(2, 0);
+void pg_log_generic(enum pg_log_level level, enum pg_log_part part,
+ const char *pg_restrict fmt,...)
+ pg_attribute_printf(3, 4);
+void pg_log_generic_v(enum pg_log_level level, enum pg_log_part part,
+ const char *pg_restrict fmt, va_list ap)
+ pg_attribute_printf(3, 0);
+
+/*
+ * Preferred style is to use these macros to perform logging; don't call
+ * pg_log_generic[_v] directly, except perhaps in error interface code.
+ */
+#define pg_log_error(...) do { \
+ if (likely(__pg_log_level <= PG_LOG_ERROR)) \
+ pg_log_generic(PG_LOG_ERROR, PG_LOG_PRIMARY, __VA_ARGS__); \
+ } while(0)
-#define pg_log_fatal(...) do { \
- if (likely(__pg_log_level <= PG_LOG_FATAL)) pg_log_generic(PG_LOG_FATAL, __VA_ARGS__); \
+#define pg_log_error_detail(...) do { \
+ if (likely(__pg_log_level <= PG_LOG_ERROR)) \
+ pg_log_generic(PG_LOG_ERROR, PG_LOG_DETAIL, __VA_ARGS__); \
} while(0)
-#define pg_log_error(...) do { \
- if (likely(__pg_log_level <= PG_LOG_ERROR)) pg_log_generic(PG_LOG_ERROR, __VA_ARGS__); \
+#define pg_log_error_hint(...) do { \
+ if (likely(__pg_log_level <= PG_LOG_ERROR)) \
+ pg_log_generic(PG_LOG_ERROR, PG_LOG_HINT, __VA_ARGS__); \
} while(0)
#define pg_log_warning(...) do { \
- if (likely(__pg_log_level <= PG_LOG_WARNING)) pg_log_generic(PG_LOG_WARNING, __VA_ARGS__); \
+ if (likely(__pg_log_level <= PG_LOG_WARNING)) \
+ pg_log_generic(PG_LOG_WARNING, PG_LOG_PRIMARY, __VA_ARGS__); \
+ } while(0)
+
+#define pg_log_warning_detail(...) do { \
+ if (likely(__pg_log_level <= PG_LOG_WARNING)) \
+ pg_log_generic(PG_LOG_WARNING, PG_LOG_DETAIL, __VA_ARGS__); \
+ } while(0)
+
+#define pg_log_warning_hint(...) do { \
+ if (likely(__pg_log_level <= PG_LOG_WARNING)) \
+ pg_log_generic(PG_LOG_WARNING, PG_LOG_HINT, __VA_ARGS__); \
} while(0)
#define pg_log_info(...) do { \
- if (likely(__pg_log_level <= PG_LOG_INFO)) pg_log_generic(PG_LOG_INFO, __VA_ARGS__); \
+ if (likely(__pg_log_level <= PG_LOG_INFO)) \
+ pg_log_generic(PG_LOG_INFO, PG_LOG_PRIMARY, __VA_ARGS__); \
+ } while(0)
+
+#define pg_log_info_detail(...) do { \
+ if (likely(__pg_log_level <= PG_LOG_INFO)) \
+ pg_log_generic(PG_LOG_INFO, PG_LOG_DETAIL, __VA_ARGS__); \
+ } while(0)
+
+#define pg_log_info_hint(...) do { \
+ if (likely(__pg_log_level <= PG_LOG_INFO)) \
+ pg_log_generic(PG_LOG_INFO, PG_LOG_HINT, __VA_ARGS__); \
} while(0)
#define pg_log_debug(...) do { \
- if (unlikely(__pg_log_level <= PG_LOG_DEBUG)) pg_log_generic(PG_LOG_DEBUG, __VA_ARGS__); \
+ if (unlikely(__pg_log_level <= PG_LOG_DEBUG)) \
+ pg_log_generic(PG_LOG_DEBUG, PG_LOG_PRIMARY, __VA_ARGS__); \
+ } while(0)
+
+#define pg_log_debug_detail(...) do { \
+ if (unlikely(__pg_log_level <= PG_LOG_DEBUG)) \
+ pg_log_generic(PG_LOG_DEBUG, PG_LOG_DETAIL, __VA_ARGS__); \
+ } while(0)
+
+#define pg_log_debug_hint(...) do { \
+ if (unlikely(__pg_log_level <= PG_LOG_DEBUG)) \
+ pg_log_generic(PG_LOG_DEBUG, PG_LOG_HINT, __VA_ARGS__); \
+ } while(0)
+
+/*
+ * A common shortcut: pg_log_error() and immediately exit(1).
+ */
+#define pg_fatal(...) do { \
+ if (likely(__pg_log_level <= PG_LOG_ERROR)) \
+ pg_log_generic(PG_LOG_ERROR, PG_LOG_PRIMARY, __VA_ARGS__); \
+ exit(1); \
} while(0)
#endif /* COMMON_LOGGING_H */
#define SIMPLEHASH_H
#ifdef FRONTEND
-#define sh_error(...) \
- do { pg_log_fatal(__VA_ARGS__); exit(1); } while(0)
+#define sh_error(...) pg_fatal(__VA_ARGS__)
#define sh_log(...) pg_log_info(__VA_ARGS__)
#else
#define sh_error(...) elog(ERROR, __VA_ARGS__)
FRONTEND_COMMON_GETTEXT_FILES = $(top_srcdir)/src/common/logging.c
FRONTEND_COMMON_GETTEXT_TRIGGERS = \
- pg_log_fatal pg_log_error pg_log_warning pg_log_info pg_log_generic:2 pg_log_generic_v:2
+ pg_log_error pg_log_error_detail pg_log_error_hint \
+ pg_log_warning pg_log_warning_detail pg_log_warning_hint \
+ pg_log_info pg_log_info_detail pg_log_info_hint \
+ pg_fatal pg_log_generic:3 pg_log_generic_v:3
FRONTEND_COMMON_GETTEXT_FLAGS = \
- pg_log_fatal:1:c-format pg_log_error:1:c-format pg_log_warning:1:c-format pg_log_info:1:c-format pg_log_generic:2:c-format pg_log_generic_v:2:c-format
+ pg_log_error:1:c-format pg_log_error_detail:1:c-format pg_log_error_hint:1:c-format \
+ pg_log_warning:1:c-format pg_log_warning_detail:1:c-format pg_log_warning_hint:1:c-format \
+ pg_log_info:1:c-format pg_log_info_detail:1:c-format pg_log_info_hint:1:c-format \
+ pg_fatal:1:c-format pg_log_generic:3:c-format pg_log_generic_v:3:c-format
all-po: $(MO_FILES)