From 5cd672d4826e765362874dd938f14e843fd5ef32 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Tue, 7 Jul 2020 16:33:07 +0300 Subject: [PATCH 001/525] [Issue #224] New flag '--color' for coloring the show plain backup info and console ERROR and WARNING messages --- doc/pgprobackup.xml | 14 +++++- src/backup.c | 2 +- src/catalog.c | 19 +++++++-- src/delete.c | 2 +- src/help.c | 9 +++- src/pg_probackup.c | 6 +++ src/pg_probackup.h | 21 ++++++++- src/show.c | 15 ++++--- src/util.c | 23 ++++++++++ src/utils/logger.c | 101 +++++++++++++++++++++++++++++++++++++++++++- src/utils/logger.h | 1 + tests/show.py | 36 ++++++++++++++++ 12 files changed, 233 insertions(+), 16 deletions(-) diff --git a/doc/pgprobackup.xml b/doc/pgprobackup.xml index 26211b30e..b1469946e 100644 --- a/doc/pgprobackup.xml +++ b/doc/pgprobackup.xml @@ -3566,7 +3566,7 @@ pg_probackup show-config -B backup_dir --instance show pg_probackup show -B backup_dir -[--help] [--instance instance_name [-i backup_id | --archive]] [--format=plain|json] +[--help] [--instance instance_name [-i backup_id | --archive]] [--format=plain|json] [--color] Shows the contents of the backup catalog. If @@ -3581,6 +3581,8 @@ pg_probackup show -B backup_dir plain text. You can specify the --format=json option to get the result in the JSON format. + If --color flag is used with plain text format, + then output is colored. For details on usage, see the sections @@ -4603,6 +4605,16 @@ pg_probackup archive-get -B backup_dir --instance + + + + + + Color the console log messages of warning and error levels. + + + + diff --git a/src/backup.c b/src/backup.c index e2293fe4c..2d8fbb467 100644 --- a/src/backup.c +++ b/src/backup.c @@ -843,7 +843,7 @@ do_backup(time_t start_time, pgSetBackupParams *set_backup_params, elog(INFO, "Backup start, pg_probackup version: %s, instance: %s, backup ID: %s, backup mode: %s, " "wal mode: %s, remote: %s, compress-algorithm: %s, compress-level: %i", - PROGRAM_VERSION, instance_name, base36enc(start_time), pgBackupGetBackupMode(¤t), + PROGRAM_VERSION, instance_name, base36enc(start_time), pgBackupGetBackupMode(¤t, false), current.stream ? "STREAM" : "ARCHIVE", IsSshProtocol() ? "true" : "false", deparse_compress_alg(current.compress_alg), current.compress_level); diff --git a/src/catalog.c b/src/catalog.c index e47f0367b..939d46749 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -331,9 +331,22 @@ lock_backup(pgBackup *backup, bool strict) * Get backup_mode in string representation. */ const char * -pgBackupGetBackupMode(pgBackup *backup) +pgBackupGetBackupMode(pgBackup *backup, bool show_color) { - return backupModes[backup->backup_mode]; + if (show_color) + { + /* color the Backup mode */ + char *mode = pgut_malloc(24); /* leaking memory here */ + + if (backup->backup_mode == BACKUP_MODE_FULL) + snprintf(mode, 24, "%s%s%s", TC_GREEN_BOLD, backupModes[backup->backup_mode], TC_RESET); + else + snprintf(mode, 24, "%s%s%s", TC_BLUE_BOLD, backupModes[backup->backup_mode], TC_RESET); + + return mode; + } + else + return backupModes[backup->backup_mode]; } static bool @@ -1684,7 +1697,7 @@ pgBackupWriteControl(FILE *out, pgBackup *backup) char timestamp[100]; fio_fprintf(out, "#Configuration\n"); - fio_fprintf(out, "backup-mode = %s\n", pgBackupGetBackupMode(backup)); + fio_fprintf(out, "backup-mode = %s\n", pgBackupGetBackupMode(backup, false)); fio_fprintf(out, "stream = %s\n", backup->stream ? "true" : "false"); fio_fprintf(out, "compress-alg = %s\n", deparse_compress_alg(backup->compress_alg)); diff --git a/src/delete.c b/src/delete.c index b3c50a4b9..542352aac 100644 --- a/src/delete.c +++ b/src/delete.c @@ -402,7 +402,7 @@ do_retention_internal(parray *backup_list, parray *to_keep_list, parray *to_purg /* TODO: add ancestor(chain full backup) ID */ elog(INFO, "Backup %s, mode: %s, status: %s. Redundancy: %i/%i, Time Window: %ud/%ud. %s", base36enc(backup->start_time), - pgBackupGetBackupMode(backup), + pgBackupGetBackupMode(backup, false), status2str(backup->status), cur_full_backup_num, instance_config.retention_redundancy, diff --git a/src/help.c b/src/help.c index 2b5bcd06e..f4f7dce58 100644 --- a/src/help.c +++ b/src/help.c @@ -310,6 +310,7 @@ help_backup(void) printf(_(" (example: --note='backup before app update to v13.1')\n")); printf(_("\n Logging options:\n")); + printf(_(" --color color the error and warning console messages\n")); printf(_(" --log-level-console=log-level-console\n")); printf(_(" level for console logging (default: info)\n")); printf(_(" available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n")); @@ -470,6 +471,7 @@ help_restore(void) printf(_(" -S, --primary-slot-name=slotname replication slot to be used for WAL streaming from the primary server\n")); printf(_("\n Logging options:\n")); + printf(_(" --color color the error and warning console messages\n")); printf(_(" --log-level-console=log-level-console\n")); printf(_(" level for console logging (default: info)\n")); printf(_(" available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n")); @@ -536,6 +538,7 @@ help_validate(void) printf(_(" --skip-block-validation set to validate only file-level checksum\n")); printf(_("\n Logging options:\n")); + printf(_(" --color color the error and warning console messages\n")); printf(_(" --log-level-console=log-level-console\n")); printf(_(" level for console logging (default: info)\n")); printf(_(" available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n")); @@ -580,6 +583,7 @@ help_checkdb(void) printf(_(" can be used only with '--amcheck' option\n")); printf(_("\n Logging options:\n")); + printf(_(" --color color the error and warning console messages\n")); printf(_(" --log-level-console=log-level-console\n")); printf(_(" level for console logging (default: info)\n")); printf(_(" available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n")); @@ -620,7 +624,8 @@ help_show(void) printf(_(" --instance=instance_name show info about specific instance\n")); printf(_(" -i, --backup-id=backup-id show info about specific backups\n")); printf(_(" --archive show WAL archive information\n")); - printf(_(" --format=format show format=PLAIN|JSON\n\n")); + printf(_(" --format=format show format=PLAIN|JSON\n")); + printf(_(" --color color the info for plain format\n\n")); } static void @@ -655,6 +660,7 @@ help_delete(void) printf(_(" --status=backup_status delete all backups with specified status\n")); printf(_("\n Logging options:\n")); + printf(_(" --color color the error and warning console messages\n")); printf(_(" --log-level-console=log-level-console\n")); printf(_(" level for console logging (default: info)\n")); printf(_(" available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n")); @@ -697,6 +703,7 @@ help_merge(void) printf(_(" --progress show progress\n")); printf(_("\n Logging options:\n")); + printf(_(" --color color the error and warning console messages\n")); printf(_(" --log-level-console=log-level-console\n")); printf(_(" level for console logging (default: info)\n")); printf(_(" available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n")); diff --git a/src/pg_probackup.c b/src/pg_probackup.c index f2aca75fd..aa2b09f19 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -67,6 +67,7 @@ char *externaldir = NULL; static char *backup_id_string = NULL; int num_threads = 1; bool stream_wal = false; +bool show_color = false; bool is_archive_cmd = false; pid_t my_pid = 0; __thread int my_thread_num = 1; @@ -178,6 +179,7 @@ static ConfigOption cmd_options[] = { 'b', 132, "progress", &progress, SOURCE_CMD_STRICT }, { 's', 'i', "backup-id", &backup_id_string, SOURCE_CMD_STRICT }, { 'b', 133, "no-sync", &no_sync, SOURCE_CMD_STRICT }, + { 'b', 134, "color", &show_color, SOURCE_CMD_STRICT }, /* backup options */ { 'b', 180, "backup-pg-log", &backup_logs, SOURCE_CMD_STRICT }, { 'f', 'b', "backup-mode", opt_backup_mode, SOURCE_CMD_STRICT }, @@ -440,6 +442,10 @@ main(int argc, char *argv[]) pgut_init(); + /* Check terminal presense and initialize ANSI escape codes for Windows */ + if (show_color) + init_console(); + if (help_opt) help_command(command_name); diff --git a/src/pg_probackup.h b/src/pg_probackup.h index b995be062..6cad098fe 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -111,6 +111,22 @@ extern const char *PROGRAM_EMAIL; #define XRecOffIsNull(xlrp) \ ((xlrp) % XLOG_BLCKSZ == 0) +/* Text Coloring macro */ +#define TC_LEN 11 +#define TC_RED "\033[0;31m" +#define TC_RED_BOLD "\033[1;31m" +#define TC_BLUE "\033[0;34m" +#define TC_BLUE_BOLD "\033[1;34m" +#define TC_GREEN "\033[0;32m" +#define TC_GREEN_BOLD "\033[1;32m" +#define TC_YELLOW "\033[0;33m" +#define TC_YELLOW_BOLD "\033[1;33m" +#define TC_MAGENTA "\033[0;35m" +#define TC_MAGENTA_BOLD "\033[1;35m" +#define TC_CYAN "\033[0;36m" +#define TC_CYAN_BOLD "\033[1;36m" +#define TC_RESET "\033[0m" + typedef struct RedoParams { TimeLineID tli; @@ -715,6 +731,7 @@ extern pid_t my_pid; extern __thread int my_thread_num; extern int num_threads; extern bool stream_wal; +extern bool show_color; extern bool progress; extern bool is_archive_cmd; /* true for archive-{get,push} */ #if PG_VERSION_NUM >= 100000 @@ -866,7 +883,8 @@ extern void write_backup_status(pgBackup *backup, BackupStatus status, extern void write_backup_data_bytes(pgBackup *backup); extern bool lock_backup(pgBackup *backup, bool strict); -extern const char *pgBackupGetBackupMode(pgBackup *backup); +extern const char *pgBackupGetBackupMode(pgBackup *backup, bool show_color); +extern void pgBackupGetBackupModeColor(pgBackup *backup, char *mode); extern parray *catalog_get_instance_list(void); extern parray *catalog_get_backup_list(const char *instance_name, time_t requested_backup_id); @@ -1076,6 +1094,7 @@ extern void copy_pgcontrol_file(const char *from_fullpath, fio_location from_loc extern void time2iso(char *buf, size_t len, time_t time); extern const char *status2str(BackupStatus status); +const char *status2str_color(BackupStatus status); extern BackupStatus str2status(const char *status); extern const char *base36enc(long unsigned int value); extern char *base36enc_dup(long unsigned int value); diff --git a/src/show.c b/src/show.c index 81a16ad64..d8c0cba2e 100644 --- a/src/show.c +++ b/src/show.c @@ -321,7 +321,7 @@ print_backup_json_object(PQExpBuffer buf, pgBackup *backup) json_add_value(buf, "parent-backup-id", base36enc(backup->parent_backup), json_level, true); - json_add_value(buf, "backup-mode", pgBackupGetBackupMode(backup), + json_add_value(buf, "backup-mode", pgBackupGetBackupMode(backup, false), json_level, true); json_add_value(buf, "wal", backup->stream ? "STREAM": "ARCHIVE", @@ -554,8 +554,8 @@ show_instance_plain(const char *instance_name, parray *backup_list, bool show_na cur++; /* Mode */ - row->mode = pgBackupGetBackupMode(backup); - widths[cur] = Max(widths[cur], strlen(row->mode)); + row->mode = pgBackupGetBackupMode(backup, show_color); + widths[cur] = Max(widths[cur], strlen(row->mode) - (show_color ? TC_LEN : 0)); cur++; /* WAL mode*/ @@ -628,8 +628,9 @@ show_instance_plain(const char *instance_name, parray *backup_list, bool show_na cur++; /* Status */ - row->status = status2str(backup->status); - widths[cur] = Max(widths[cur], strlen(row->status)); + row->status = show_color ? status2str_color(backup->status) : status2str(backup->status); + widths[cur] = Max(widths[cur], strlen(row->status) - (show_color ? TC_LEN : 0)); + } for (i = 0; i < SHOW_FIELDS_COUNT; i++) @@ -679,7 +680,7 @@ show_instance_plain(const char *instance_name, parray *backup_list, bool show_na row->recovery_time); cur++; - appendPQExpBuffer(&show_buf, field_formats[cur], widths[cur], + appendPQExpBuffer(&show_buf, field_formats[cur], widths[cur] + (show_color ? TC_LEN : 0), row->mode); cur++; @@ -715,7 +716,7 @@ show_instance_plain(const char *instance_name, parray *backup_list, bool show_na row->stop_lsn); cur++; - appendPQExpBuffer(&show_buf, field_formats[cur], widths[cur], + appendPQExpBuffer(&show_buf, field_formats[cur], widths[cur] + (show_color ? TC_LEN : 0), row->status); cur++; diff --git a/src/util.c b/src/util.c index 5ad751df2..0d6974bfb 100644 --- a/src/util.c +++ b/src/util.c @@ -516,6 +516,29 @@ status2str(BackupStatus status) return statusName[status]; } +const char * +status2str_color(BackupStatus status) +{ + char *status_str = pgut_malloc(20); + + /* UNKNOWN */ + if (status == BACKUP_STATUS_INVALID) + snprintf(status_str, 20, "%s%s%s", TC_YELLOW_BOLD, "UNKNOWN", TC_RESET); + /* CORRUPT, ERROR and ORPHAN */ + else if (status == BACKUP_STATUS_CORRUPT || status == BACKUP_STATUS_ERROR || + status == BACKUP_STATUS_ORPHAN) + snprintf(status_str, 20, "%s%s%s", TC_RED_BOLD, statusName[status], TC_RESET); + /* MERGING, MERGED, DELETING and DELETED */ + else if (status == BACKUP_STATUS_MERGING || status == BACKUP_STATUS_MERGED || + status == BACKUP_STATUS_DELETING || status == BACKUP_STATUS_DELETED) + snprintf(status_str, 20, "%s%s%s", TC_YELLOW_BOLD, statusName[status], TC_RESET); + /* OK and DONE */ + else + snprintf(status_str, 20, "%s%s%s", TC_GREEN_BOLD, statusName[status], TC_RESET); + + return status_str; +} + BackupStatus str2status(const char *status) { diff --git a/src/utils/logger.c b/src/utils/logger.c index 5aee41b46..aad8303dc 100644 --- a/src/utils/logger.c +++ b/src/utils/logger.c @@ -39,6 +39,10 @@ typedef enum PG_FATAL } eLogType; +#ifndef ENABLE_VIRTUAL_TERMINAL_PROCESSING +#define ENABLE_VIRTUAL_TERMINAL_PROCESSING 0x0004 +#endif + void pg_log(eLogType type, const char *fmt,...) pg_attribute_printf(2, 3); static void elog_internal(int elevel, bool file_only, const char *message); @@ -115,6 +119,85 @@ init_logger(const char *root_path, LoggerConfig *config) #endif } +/* + * Check that we are connected to terminal and + * enable ANSI escape codes for Windows if possible + */ +void +init_console(void) +{ + + /* no point in tex coloring if we do not connected to terminal */ + if (!isatty(fileno(stderr)) || + !isatty(fileno(stdout))) + { + show_color = false; + elog(WARNING, "No terminal detected, ignoring '--color' flag"); + return; + } + +#ifdef WIN32 + HANDLE hOut = INVALID_HANDLE_VALUE; + HANDLE hErr = INVALID_HANDLE_VALUE; + DWORD dwMode_out = 0; + DWORD dwMode_err = 0; + + hOut = GetStdHandle(STD_OUTPUT_HANDLE); + if (hOut == INVALID_HANDLE_VALUE || !hOut) + { + show_color = false; + _dosmaperr(GetLastError()); + elog(WARNING, "Failed to get terminal stdout handle: %s", strerror(errno)); + return; + } + + hErr = GetStdHandle(STD_ERROR_HANDLE); + if (hErr == INVALID_HANDLE_VALUE || !hErr) + { + show_color = false; + _dosmaperr(GetLastError()); + elog(WARNING, "Failed to get terminal stderror handle: %s", strerror(errno)); + return; + } + + if (!GetConsoleMode(hOut, &dwMode_out)) + { + show_color = false; + _dosmaperr(GetLastError()); + elog(WARNING, "Failed to get console mode for stdout: %s", strerror(errno)); + return; + } + + if (!GetConsoleMode(hErr, &dwMode_err)) + { + show_color = false; + _dosmaperr(GetLastError()); + elog(WARNING, "Failed to get console mode for stderr: %s", strerror(errno)); + return; + } + + /* Add ANSI codes support */ + dwMode_out |= ENABLE_VIRTUAL_TERMINAL_PROCESSING; + dwMode_err |= ENABLE_VIRTUAL_TERMINAL_PROCESSING; + + if (!SetConsoleMode(hOut, dwMode_out)) + { + show_color = false; + _dosmaperr(GetLastError()); + elog(WARNING, "Cannot set console mode for stdout: %s", strerror(errno)); + return; + } + + if (!SetConsoleMode(hErr, dwMode_err)) + { + show_color = false; + _dosmaperr(GetLastError()); + elog(WARNING, "Cannot set console mode for stderr: %s", strerror(errno)); + return; + } +#endif +} + static void write_elevel(FILE *stream, int elevel) { @@ -272,10 +355,26 @@ elog_internal(int elevel, bool file_only, const char *message) fprintf(stderr, "%s ", str_pid); fprintf(stderr, "%s ", str_thread); } + else if (show_color) + { + /* color WARNING and ERROR messages */ + if (elevel == WARNING) + fprintf(stderr, "%s", TC_YELLOW_BOLD); + else if (elevel == ERROR) + fprintf(stderr, "%s", TC_RED_BOLD); + } write_elevel(stderr, elevel); - fprintf(stderr, "%s\n", message); + /* main payload */ + fprintf(stderr, "%s", message); + + /* reset color to default */ + if (show_color && (elevel == WARNING || elevel == ERROR)) + fprintf(stderr, "%s", TC_RESET); + + fprintf(stderr, "\n"); + fflush(stderr); } diff --git a/src/utils/logger.h b/src/utils/logger.h index 37b6ff095..6a7407e41 100644 --- a/src/utils/logger.h +++ b/src/utils/logger.h @@ -51,6 +51,7 @@ extern void elog(int elevel, const char *fmt, ...) pg_attribute_printf(2, 3); extern void elog_file(int elevel, const char *fmt, ...) pg_attribute_printf(2, 3); extern void init_logger(const char *root_path, LoggerConfig *config); +extern void init_console(void); extern int parse_log_level(const char *level); extern const char *deparse_log_level(int level); diff --git a/tests/show.py b/tests/show.py index 0da95dcbb..1018ed63d 100644 --- a/tests/show.py +++ b/tests/show.py @@ -538,3 +538,39 @@ def test_corrupt_correctness_2(self): # Clean after yourself self.del_test_dir(module_name, fname) + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_color_with_no_terminal(self): + """backup.control contains invalid option""" + fname = self.id().split('.')[3] + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node'), + initdb_params=['--data-checksums'], + pg_options={'autovacuum': 'off'}) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + node.pgbench_init(scale=1) + + # FULL + try: + self.backup_node( + backup_dir, 'node', node, options=['--color', '--archive-timeout=1s']) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because archiving is disabled\n " + "Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertNotIn( + '[0m', e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + # Clean after yourself + self.del_test_dir(module_name, fname) From 26578edda1391396e63609928329b98f024ea396 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Tue, 7 Jul 2020 16:36:09 +0300 Subject: [PATCH 002/525] bump version to 2.5.0 --- src/pg_probackup.h | 4 ++-- tests/expected/option_version.out | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 6cad098fe..f25fdb66d 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -292,8 +292,8 @@ typedef enum ShowFormat #define BYTES_INVALID (-1) /* file didn`t changed since previous backup, DELTA backup do not rely on it */ #define FILE_NOT_FOUND (-2) /* file disappeared during backup */ #define BLOCKNUM_INVALID (-1) -#define PROGRAM_VERSION "2.4.2" -#define AGENT_PROTOCOL_VERSION 20402 +#define PROGRAM_VERSION "2.5.0" +#define AGENT_PROTOCOL_VERSION 20500 typedef struct ConnectionOptions diff --git a/tests/expected/option_version.out b/tests/expected/option_version.out index 47c19fef5..342842b29 100644 --- a/tests/expected/option_version.out +++ b/tests/expected/option_version.out @@ -1 +1 @@ -pg_probackup 2.4.2 \ No newline at end of file +pg_probackup 2.5.0 \ No newline at end of file From 383650988f4068b486c3afbaeb4ccca478f194a0 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Tue, 7 Jul 2020 23:58:28 +0300 Subject: [PATCH 003/525] [Issue #224] enable the stdout and stderr coloring by default --- doc/pgprobackup.xml | 10 +++++----- src/help.c | 26 +++++++++++++------------- src/pg_probackup.c | 13 ++++++++----- src/utils/logger.c | 1 - tests/archive.py | 5 +++++ tests/show.py | 2 +- 6 files changed, 32 insertions(+), 25 deletions(-) diff --git a/doc/pgprobackup.xml b/doc/pgprobackup.xml index b1469946e..844cf5b44 100644 --- a/doc/pgprobackup.xml +++ b/doc/pgprobackup.xml @@ -3566,7 +3566,7 @@ pg_probackup show-config -B backup_dir --instance show pg_probackup show -B backup_dir -[--help] [--instance instance_name [-i backup_id | --archive]] [--format=plain|json] [--color] +[--help] [--instance instance_name [-i backup_id | --archive]] [--format=plain|json] [--no-color] Shows the contents of the backup catalog. If @@ -3581,8 +3581,8 @@ pg_probackup show -B backup_dir plain text. You can specify the --format=json option to get the result in the JSON format. - If --color flag is used with plain text format, - then output is colored. + If --no-color flag is used, + then the output is not colored. For details on usage, see the sections @@ -4607,10 +4607,10 @@ pg_probackup archive-get -B backup_dir --instance - + - Color the console log messages of warning and error levels. + Disable the coloring for console log messages of warning and error levels. diff --git a/src/help.c b/src/help.c index f4f7dce58..c82c8cf63 100644 --- a/src/help.c +++ b/src/help.c @@ -127,7 +127,7 @@ help_pg_probackup(void) printf(_(" [--error-log-filename=error-log-filename]\n")); printf(_(" [--log-directory=log-directory]\n")); printf(_(" [--log-rotation-size=log-rotation-size]\n")); - printf(_(" [--log-rotation-age=log-rotation-age]\n")); + printf(_(" [--log-rotation-age=log-rotation-age] [--no-color]\n")); printf(_(" [--delete-expired] [--delete-wal] [--merge-expired]\n")); printf(_(" [--retention-redundancy=retention-redundancy]\n")); printf(_(" [--retention-window=retention-window]\n")); @@ -188,7 +188,7 @@ help_pg_probackup(void) printf(_("\n %s show -B backup-path\n"), PROGRAM_NAME); printf(_(" [--instance=instance_name [-i backup-id]]\n")); printf(_(" [--format=format] [--archive]\n")); - printf(_(" [--help]\n")); + printf(_(" [--no-color] [--help]\n")); printf(_("\n %s delete -B backup-path --instance=instance_name\n"), PROGRAM_NAME); printf(_(" [-j num-threads] [--progress]\n")); @@ -273,7 +273,7 @@ help_backup(void) printf(_(" [--error-log-filename=error-log-filename]\n")); printf(_(" [--log-directory=log-directory]\n")); printf(_(" [--log-rotation-size=log-rotation-size]\n")); - printf(_(" [--log-rotation-age=log-rotation-age]\n")); + printf(_(" [--log-rotation-age=log-rotation-age] [--no-color]\n")); printf(_(" [--delete-expired] [--delete-wal] [--merge-expired]\n")); printf(_(" [--retention-redundancy=retention-redundancy]\n")); printf(_(" [--retention-window=retention-window]\n")); @@ -310,7 +310,6 @@ help_backup(void) printf(_(" (example: --note='backup before app update to v13.1')\n")); printf(_("\n Logging options:\n")); - printf(_(" --color color the error and warning console messages\n")); printf(_(" --log-level-console=log-level-console\n")); printf(_(" level for console logging (default: info)\n")); printf(_(" available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n")); @@ -330,6 +329,7 @@ help_backup(void) printf(_(" --log-rotation-age=log-rotation-age\n")); printf(_(" rotate logfile if its age exceeds this value; 0 disables; (default: 0)\n")); printf(_(" available units: 'ms', 's', 'min', 'h', 'd' (default: min)\n")); + printf(_(" --no-color disable the coloring of error and warning console messages\n")); printf(_("\n Retention options:\n")); printf(_(" --delete-expired delete backups expired according to current\n")); @@ -471,7 +471,6 @@ help_restore(void) printf(_(" -S, --primary-slot-name=slotname replication slot to be used for WAL streaming from the primary server\n")); printf(_("\n Logging options:\n")); - printf(_(" --color color the error and warning console messages\n")); printf(_(" --log-level-console=log-level-console\n")); printf(_(" level for console logging (default: info)\n")); printf(_(" available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n")); @@ -491,6 +490,7 @@ help_restore(void) printf(_(" --log-rotation-age=log-rotation-age\n")); printf(_(" rotate logfile if its age exceeds this value; 0 disables; (default: 0)\n")); printf(_(" available units: 'ms', 's', 'min', 'h', 'd' (default: min)\n")); + printf(_(" --no-color disable the coloring of error and warning console messages\n")); printf(_("\n Remote options:\n")); printf(_(" --remote-proto=protocol remote protocol to use\n")); @@ -538,7 +538,6 @@ help_validate(void) printf(_(" --skip-block-validation set to validate only file-level checksum\n")); printf(_("\n Logging options:\n")); - printf(_(" --color color the error and warning console messages\n")); printf(_(" --log-level-console=log-level-console\n")); printf(_(" level for console logging (default: info)\n")); printf(_(" available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n")); @@ -557,7 +556,8 @@ help_validate(void) printf(_(" available units: 'kB', 'MB', 'GB', 'TB' (default: kB)\n")); printf(_(" --log-rotation-age=log-rotation-age\n")); printf(_(" rotate logfile if its age exceeds this value; 0 disables; (default: 0)\n")); - printf(_(" available units: 'ms', 's', 'min', 'h', 'd' (default: min)\n\n")); + printf(_(" available units: 'ms', 's', 'min', 'h', 'd' (default: min)\n")); + printf(_(" --no-color disable the coloring of error and warning console messages\n\n")); } static void @@ -583,7 +583,6 @@ help_checkdb(void) printf(_(" can be used only with '--amcheck' option\n")); printf(_("\n Logging options:\n")); - printf(_(" --color color the error and warning console messages\n")); printf(_(" --log-level-console=log-level-console\n")); printf(_(" level for console logging (default: info)\n")); printf(_(" available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n")); @@ -603,6 +602,7 @@ help_checkdb(void) printf(_(" --log-rotation-age=log-rotation-age\n")); printf(_(" rotate logfile if its age exceeds this value; 0 disables; (default: 0)\n")); printf(_(" available units: 'ms', 's', 'min', 'h', 'd' (default: min)\n")); + printf(_(" --no-color disable the coloring of error and warning console messages\n")); printf(_("\n Connection options:\n")); printf(_(" -U, --pguser=USERNAME user name to connect as (default: current local user)\n")); @@ -625,7 +625,7 @@ help_show(void) printf(_(" -i, --backup-id=backup-id show info about specific backups\n")); printf(_(" --archive show WAL archive information\n")); printf(_(" --format=format show format=PLAIN|JSON\n")); - printf(_(" --color color the info for plain format\n\n")); + printf(_(" --no-color disable the coloring for plain format\n\n")); } static void @@ -660,7 +660,6 @@ help_delete(void) printf(_(" --status=backup_status delete all backups with specified status\n")); printf(_("\n Logging options:\n")); - printf(_(" --color color the error and warning console messages\n")); printf(_(" --log-level-console=log-level-console\n")); printf(_(" level for console logging (default: info)\n")); printf(_(" available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n")); @@ -679,7 +678,8 @@ help_delete(void) printf(_(" available units: 'kB', 'MB', 'GB', 'TB' (default: kB)\n")); printf(_(" --log-rotation-age=log-rotation-age\n")); printf(_(" rotate logfile if its age exceeds this value; 0 disables; (default: 0)\n")); - printf(_(" available units: 'ms', 's', 'min', 'h', 'd' (default: min)\n\n")); + printf(_(" available units: 'ms', 's', 'min', 'h', 'd' (default: min)\n")); + printf(_(" --no-color disable the coloring of error and warning console messages\n\n")); } static void @@ -703,7 +703,6 @@ help_merge(void) printf(_(" --progress show progress\n")); printf(_("\n Logging options:\n")); - printf(_(" --color color the error and warning console messages\n")); printf(_(" --log-level-console=log-level-console\n")); printf(_(" level for console logging (default: info)\n")); printf(_(" available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n")); @@ -722,7 +721,8 @@ help_merge(void) printf(_(" available units: 'kB', 'MB', 'GB', 'TB' (default: kB)\n")); printf(_(" --log-rotation-age=log-rotation-age\n")); printf(_(" rotate logfile if its age exceeds this value; 0 disables; (default: 0)\n")); - printf(_(" available units: 'ms', 's', 'min', 'h', 'd' (default: min)\n\n")); + printf(_(" available units: 'ms', 's', 'min', 'h', 'd' (default: min)\n")); + printf(_(" --no-color disable the coloring of error and warning console messages\n\n")); } static void diff --git a/src/pg_probackup.c b/src/pg_probackup.c index aa2b09f19..bbc2a5bf0 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -67,7 +67,8 @@ char *externaldir = NULL; static char *backup_id_string = NULL; int num_threads = 1; bool stream_wal = false; -bool show_color = false; +bool no_color = false; +bool show_color = true; bool is_archive_cmd = false; pid_t my_pid = 0; __thread int my_thread_num = 1; @@ -179,7 +180,7 @@ static ConfigOption cmd_options[] = { 'b', 132, "progress", &progress, SOURCE_CMD_STRICT }, { 's', 'i', "backup-id", &backup_id_string, SOURCE_CMD_STRICT }, { 'b', 133, "no-sync", &no_sync, SOURCE_CMD_STRICT }, - { 'b', 134, "color", &show_color, SOURCE_CMD_STRICT }, + { 'b', 134, "no-color", &no_color, SOURCE_CMD_STRICT }, /* backup options */ { 'b', 180, "backup-pg-log", &backup_logs, SOURCE_CMD_STRICT }, { 'f', 'b', "backup-mode", opt_backup_mode, SOURCE_CMD_STRICT }, @@ -287,6 +288,9 @@ main(int argc, char *argv[]) PROGRAM_NAME_FULL = argv[0]; + /* Check terminal presense and initialize ANSI escape codes for Windows */ + init_console(); + /* Initialize current backup */ pgBackupInit(¤t); @@ -442,9 +446,8 @@ main(int argc, char *argv[]) pgut_init(); - /* Check terminal presense and initialize ANSI escape codes for Windows */ - if (show_color) - init_console(); + if (no_color) + show_color = false; if (help_opt) help_command(command_name); diff --git a/src/utils/logger.c b/src/utils/logger.c index aad8303dc..149f4c62e 100644 --- a/src/utils/logger.c +++ b/src/utils/logger.c @@ -132,7 +132,6 @@ init_console(void) !isatty(fileno(stdout))) { show_color = false; - elog(WARNING, "No terminal detected, ignoring '--color' flag"); return; } diff --git a/tests/archive.py b/tests/archive.py index 01ff5c062..576a08ab4 100644 --- a/tests/archive.py +++ b/tests/archive.py @@ -432,6 +432,11 @@ def test_archive_push_file_exists(self): 'pg_probackup archive-push completed successfully', log_content) + # btw check that console coloring codes are not slipped into log file + self.assertNotIn('[0m', log_content) + + print(log_content) + # Clean after yourself self.del_test_dir(module_name, fname) diff --git a/tests/show.py b/tests/show.py index 1018ed63d..92ef392da 100644 --- a/tests/show.py +++ b/tests/show.py @@ -559,7 +559,7 @@ def test_color_with_no_terminal(self): # FULL try: self.backup_node( - backup_dir, 'node', node, options=['--color', '--archive-timeout=1s']) + backup_dir, 'node', node, options=['--archive-timeout=1s']) # we should die here because exception is what we expect to happen self.assertEqual( 1, 0, From 3f0984591192e909121609ea0d5d5a6ff000624b Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Tue, 29 Dec 2020 08:47:20 +0300 Subject: [PATCH 004/525] remove backup_subcmd global variable and useless command_name variable --- src/help.c | 99 ++++++++++-------- src/pg_probackup.c | 206 +++++++++++++------------------------- src/pg_probackup.h | 4 +- src/utils/configuration.c | 56 +++++++++++ src/utils/configuration.h | 27 +++++ src/utils/file.c | 18 ++++ 6 files changed, 231 insertions(+), 179 deletions(-) diff --git a/src/help.c b/src/help.c index c82c8cf63..2af045cde 100644 --- a/src/help.c +++ b/src/help.c @@ -7,8 +7,10 @@ *------------------------------------------------------------------------- */ +#include #include "pg_probackup.h" +static void help_nocmd(void); static void help_init(void); static void help_backup(void); static void help_restore(void); @@ -24,50 +26,52 @@ static void help_del_instance(void); static void help_archive_push(void); static void help_archive_get(void); static void help_checkdb(void); +static void help_help(void); void -help_command(char *command) +help_print_version(void) { - if (strcmp(command, "init") == 0) - help_init(); - else if (strcmp(command, "backup") == 0) - help_backup(); - else if (strcmp(command, "restore") == 0) - help_restore(); - else if (strcmp(command, "validate") == 0) - help_validate(); - else if (strcmp(command, "show") == 0) - help_show(); - else if (strcmp(command, "delete") == 0) - help_delete(); - else if (strcmp(command, "merge") == 0) - help_merge(); - else if (strcmp(command, "set-backup") == 0) - help_set_backup(); - else if (strcmp(command, "set-config") == 0) - help_set_config(); - else if (strcmp(command, "show-config") == 0) - help_show_config(); - else if (strcmp(command, "add-instance") == 0) - help_add_instance(); - else if (strcmp(command, "del-instance") == 0) - help_del_instance(); - else if (strcmp(command, "archive-push") == 0) - help_archive_push(); - else if (strcmp(command, "archive-get") == 0) - help_archive_get(); - else if (strcmp(command, "checkdb") == 0) - help_checkdb(); - else if (strcmp(command, "--help") == 0 - || strcmp(command, "help") == 0 - || strcmp(command, "-?") == 0 - || strcmp(command, "--version") == 0 - || strcmp(command, "version") == 0 - || strcmp(command, "-V") == 0) - printf(_("No help page for \"%s\" command. Try pg_probackup help\n"), command); - else - printf(_("Unknown command \"%s\". Try pg_probackup help\n"), command); - exit(0); +#ifdef PGPRO_VERSION + fprintf(stdout, "%s %s (Postgres Pro %s %s)\n", + PROGRAM_NAME, PROGRAM_VERSION, + PGPRO_VERSION, PGPRO_EDITION); +#else + fprintf(stdout, "%s %s (PostgreSQL %s)\n", + PROGRAM_NAME, PROGRAM_VERSION, PG_VERSION); +#endif +} + +void +help_command(ProbackupSubcmd const subcmd) +{ + typedef void (* help_function_ptr)(void); + /* Order is important, keep it in sync with utils/configuration.h:enum ProbackupSubcmd declaration */ + static help_function_ptr const help_functions[] = + { + &help_nocmd, + &help_init, + &help_add_instance, + &help_del_instance, + &help_archive_push, + &help_archive_get, + &help_backup, + &help_restore, + &help_validate, + &help_delete, + &help_merge, + &help_show, + &help_set_config, + &help_set_backup, + &help_show_config, + &help_checkdb, + &help_nocmd, // SSH_CMD + &help_nocmd, // AGENT_CMD + &help_help, + &help_help, // VERSION_CMD + }; + + Assert((int)subcmd < sizeof(help_functions) / sizeof(help_functions[0])); + help_functions[(int)subcmd](); } void @@ -247,7 +251,12 @@ help_pg_probackup(void) if (PROGRAM_EMAIL) printf("Report bugs to <%s>.\n", PROGRAM_EMAIL); } - exit(0); +} + +static void +help_nocmd(void) +{ + printf(_("Unknown command. Try pg_probackup help\n")); } static void @@ -971,3 +980,9 @@ help_archive_get(void) printf(_(" --ssh-options=ssh_options additional ssh options (default: none)\n")); printf(_(" (example: --ssh-options='-c cipher_spec -F configfile')\n\n")); } + +static void +help_help(void) +{ + printf(_("No help page required for \"help\" and \"version\" commands. Just try it!\n")); +} diff --git a/src/pg_probackup.c b/src/pg_probackup.c index 6fdf8bafb..855d24d92 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -27,27 +27,6 @@ const char *PROGRAM_FULL_PATH = NULL; const char *PROGRAM_URL = "https://github.com/postgrespro/pg_probackup"; const char *PROGRAM_EMAIL = "https://github.com/postgrespro/pg_probackup/issues"; -typedef enum ProbackupSubcmd -{ - NO_CMD = 0, - INIT_CMD, - ADD_INSTANCE_CMD, - DELETE_INSTANCE_CMD, - ARCHIVE_PUSH_CMD, - ARCHIVE_GET_CMD, - BACKUP_CMD, - RESTORE_CMD, - VALIDATE_CMD, - DELETE_CMD, - MERGE_CMD, - SHOW_CMD, - SET_CONFIG_CMD, - SET_BACKUP_CMD, - SHOW_CONFIG_CMD, - CHECKDB_CMD -} ProbackupSubcmd; - - /* directory options */ char *backup_path = NULL; /* @@ -152,7 +131,6 @@ static pgSetBackupParams *set_backup_params = NULL; /* current settings */ pgBackup current; -static ProbackupSubcmd backup_subcmd = NO_CMD; static bool help_opt = false; @@ -160,7 +138,7 @@ static void opt_incr_restore_mode(ConfigOption *opt, const char *arg); static void opt_backup_mode(ConfigOption *opt, const char *arg); static void opt_show_format(ConfigOption *opt, const char *arg); -static void compress_init(void); +static void compress_init(ProbackupSubcmd const subcmd); static void opt_datname_exclude_list(ConfigOption *opt, const char *arg); static void opt_datname_include_list(ConfigOption *opt, const char *arg); @@ -259,32 +237,14 @@ static ConfigOption cmd_options[] = { 0 } }; -static void -setMyLocation(void) -{ - -#ifdef WIN32 - if (IsSshProtocol()) - elog(ERROR, "Currently remote operations on Windows are not supported"); -#endif - - MyLocation = IsSshProtocol() - ? (backup_subcmd == ARCHIVE_PUSH_CMD || backup_subcmd == ARCHIVE_GET_CMD) - ? FIO_DB_HOST - : (backup_subcmd == BACKUP_CMD || backup_subcmd == RESTORE_CMD || backup_subcmd == ADD_INSTANCE_CMD) - ? FIO_BACKUP_HOST - : FIO_LOCAL_HOST - : FIO_LOCAL_HOST; -} - /* * Entry point of pg_probackup command. */ int main(int argc, char *argv[]) { - char *command = NULL, - *command_name; + char *command = NULL; + ProbackupSubcmd backup_subcmd = NO_CMD; PROGRAM_NAME_FULL = argv[0]; @@ -322,91 +282,58 @@ main(int argc, char *argv[]) /* Parse subcommands and non-subcommand options */ if (argc > 1) { - if (strcmp(argv[1], "archive-push") == 0) - backup_subcmd = ARCHIVE_PUSH_CMD; - else if (strcmp(argv[1], "archive-get") == 0) - backup_subcmd = ARCHIVE_GET_CMD; - else if (strcmp(argv[1], "add-instance") == 0) - backup_subcmd = ADD_INSTANCE_CMD; - else if (strcmp(argv[1], "del-instance") == 0) - backup_subcmd = DELETE_INSTANCE_CMD; - else if (strcmp(argv[1], "init") == 0) - backup_subcmd = INIT_CMD; - else if (strcmp(argv[1], "backup") == 0) - backup_subcmd = BACKUP_CMD; - else if (strcmp(argv[1], "restore") == 0) - backup_subcmd = RESTORE_CMD; - else if (strcmp(argv[1], "validate") == 0) - backup_subcmd = VALIDATE_CMD; - else if (strcmp(argv[1], "delete") == 0) - backup_subcmd = DELETE_CMD; - else if (strcmp(argv[1], "merge") == 0) - backup_subcmd = MERGE_CMD; - else if (strcmp(argv[1], "show") == 0) - backup_subcmd = SHOW_CMD; - else if (strcmp(argv[1], "set-config") == 0) - backup_subcmd = SET_CONFIG_CMD; - else if (strcmp(argv[1], "set-backup") == 0) - backup_subcmd = SET_BACKUP_CMD; - else if (strcmp(argv[1], "show-config") == 0) - backup_subcmd = SHOW_CONFIG_CMD; - else if (strcmp(argv[1], "checkdb") == 0) - backup_subcmd = CHECKDB_CMD; -#ifdef WIN32 - else if (strcmp(argv[1], "ssh") == 0) - launch_ssh(argv); -#endif - else if (strcmp(argv[1], "agent") == 0) - { - /* 'No forward compatibility' sanity: - * /old/binary -> ssh execute -> /newer/binary agent version_num - * If we are executed as an agent for older binary, then exit with error - */ - if (argc > 2) - { - elog(ERROR, "Version mismatch, pg_probackup binary with version '%s' " - "is launched as an agent for pg_probackup binary with version '%s'", - PROGRAM_VERSION, argv[2]); - } - fio_communicate(STDIN_FILENO, STDOUT_FILENO); - return 0; - } - else if (strcmp(argv[1], "--help") == 0 || - strcmp(argv[1], "-?") == 0 || - strcmp(argv[1], "help") == 0) - { - if (argc > 2) - help_command(argv[2]); - else - help_pg_probackup(); - } - else if (strcmp(argv[1], "--version") == 0 - || strcmp(argv[1], "version") == 0 - || strcmp(argv[1], "-V") == 0) + backup_subcmd = parse_subcmd(argv[1]); + switch(backup_subcmd) { -#ifdef PGPRO_VERSION - fprintf(stdout, "%s %s (Postgres Pro %s %s)\n", - PROGRAM_NAME, PROGRAM_VERSION, - PGPRO_VERSION, PGPRO_EDITION); + case SSH_CMD: +#ifdef WIN32 + launch_ssh(argv); + break; #else - fprintf(stdout, "%s %s (PostgreSQL %s)\n", - PROGRAM_NAME, PROGRAM_VERSION, PG_VERSION); + elog(ERROR, "\"ssh\" command implemented only for Windows"); #endif - exit(0); + case AGENT_CMD: + /* 'No forward compatibility' sanity: + * /old/binary -> ssh execute -> /newer/binary agent version_num + * If we are executed as an agent for older binary, then exit with error + */ + if (argc > 2) + elog(ERROR, "Version mismatch, pg_probackup binary with version '%s' " + "is launched as an agent for pg_probackup binary with version '%s'", + PROGRAM_VERSION, argv[2]); + fio_communicate(STDIN_FILENO, STDOUT_FILENO); + return 0; + case HELP_CMD: + if (argc > 2) + { + /* 'pg_probackup help command' style */ + help_command(parse_subcmd(argv[2])); + exit(0); + } + else + { + help_pg_probackup(); + exit(0); + } + break; + case VERSION_CMD: + help_print_version(); + exit(0); + case NO_CMD: + elog(ERROR, "Unknown subcommand \"%s\"", argv[1]); + default: + /* Silence compiler warnings */ + break; } - else - elog(ERROR, "Unknown subcommand \"%s\"", argv[1]); } - - if (backup_subcmd == NO_CMD) - elog(ERROR, "No subcommand specified"); + else + elog(ERROR, "No subcommand specified. Please run with \"help\" argument to see possible subcommands."); /* * Make command string before getopt_long() will call. It permutes the * content of argv. */ /* TODO why do we do that only for some commands? */ - command_name = pstrdup(argv[1]); if (backup_subcmd == BACKUP_CMD || backup_subcmd == RESTORE_CMD || backup_subcmd == VALIDATE_CMD || @@ -450,9 +377,14 @@ main(int argc, char *argv[]) show_color = false; if (help_opt) - help_command(command_name); + { + /* 'pg_probackup command --help' style */ + help_command(backup_subcmd); + exit(0); + } + + setMyLocation(backup_subcmd); - /* backup_path is required for all pg_probackup commands except help and checkdb */ if (backup_path == NULL) { /* @@ -460,12 +392,8 @@ main(int argc, char *argv[]) * from environment variable */ backup_path = getenv("BACKUP_PATH"); - if (backup_path == NULL && backup_subcmd != CHECKDB_CMD) - elog(ERROR, "required parameter not specified: BACKUP_PATH (-B, --backup-path)"); } - setMyLocation(); - if (backup_path != NULL) { canonicalize_path(backup_path); @@ -474,11 +402,9 @@ main(int argc, char *argv[]) if (!is_absolute_path(backup_path)) elog(ERROR, "-B, --backup-path must be an absolute path"); } - - /* Ensure that backup_path is an absolute path */ - if (backup_path && !is_absolute_path(backup_path)) - elog(ERROR, "-B, --backup-path must be an absolute path"); - + /* backup_path is required for all pg_probackup commands except help, version and checkdb */ + if (backup_path == NULL && backup_subcmd != CHECKDB_CMD && backup_subcmd != HELP_CMD && backup_subcmd != VERSION_CMD) + elog(ERROR, "required parameter not specified: BACKUP_PATH (-B, --backup-path)"); /* * Option --instance is required for all commands except @@ -574,7 +500,8 @@ main(int argc, char *argv[]) else config_read_opt(path, instance_options, ERROR, true, false); } - setMyLocation(); + /* Зачем второй раз устанавливать? */ + setMyLocation(backup_subcmd); } /* @@ -676,7 +603,7 @@ main(int argc, char *argv[]) backup_subcmd != SET_BACKUP_CMD && backup_subcmd != SHOW_CMD) elog(ERROR, "Cannot use -i (--backup-id) option together with the \"%s\" command", - command_name); + get_subcmd_name(backup_subcmd)); current.backup_id = base36dec(backup_id_string); if (current.backup_id == 0) @@ -709,7 +636,7 @@ main(int argc, char *argv[]) if (force && backup_subcmd != RESTORE_CMD) elog(ERROR, "You cannot specify \"--force\" flag with the \"%s\" command", - command_name); + get_subcmd_name(backup_subcmd)); if (force) no_validate = true; @@ -779,7 +706,7 @@ main(int argc, char *argv[]) /* sanity */ if (backup_subcmd == VALIDATE_CMD && restore_params->no_validate) elog(ERROR, "You cannot specify \"--no-validate\" option with the \"%s\" command", - command_name); + get_subcmd_name(backup_subcmd)); if (num_threads < 1) num_threads = 1; @@ -787,7 +714,7 @@ main(int argc, char *argv[]) if (batch_size < 1) batch_size = 1; - compress_init(); + compress_init(backup_subcmd); /* do actual operation */ switch (backup_subcmd) @@ -881,6 +808,13 @@ main(int argc, char *argv[]) case NO_CMD: /* Should not happen */ elog(ERROR, "Unknown subcommand"); + case SSH_CMD: + case AGENT_CMD: + /* Может перейти на использование какого-нибудь do_agent() для однобразия? */ + case HELP_CMD: + case VERSION_CMD: + /* Silence compiler warnings, these already handled earlier */ + break; } return 0; @@ -943,13 +877,13 @@ opt_show_format(ConfigOption *opt, const char *arg) * Initialize compress and sanity checks for compress. */ static void -compress_init(void) +compress_init(ProbackupSubcmd const subcmd) { /* Default algorithm is zlib */ if (compress_shortcut) instance_config.compress_alg = ZLIB_COMPRESS; - if (backup_subcmd != SET_CONFIG_CMD) + if (subcmd != SET_CONFIG_CMD) { if (instance_config.compress_level != COMPRESS_LEVEL_DEFAULT && instance_config.compress_alg == NOT_DEFINED_COMPRESS) @@ -963,7 +897,7 @@ compress_init(void) if (instance_config.compress_alg == ZLIB_COMPRESS && instance_config.compress_level == 0) elog(WARNING, "Compression level 0 will lead to data bloat!"); - if (backup_subcmd == BACKUP_CMD || backup_subcmd == ARCHIVE_PUSH_CMD) + if (subcmd == BACKUP_CMD || subcmd == ARCHIVE_PUSH_CMD) { #ifndef HAVE_LIBZ if (instance_config.compress_alg == ZLIB_COMPRESS) diff --git a/src/pg_probackup.h b/src/pg_probackup.h index c53d31e95..a2c3309f8 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -876,8 +876,9 @@ extern char *slurpFile(const char *datadir, extern char *fetchFile(PGconn *conn, const char *filename, size_t *filesize); /* in help.c */ +extern void help_print_version(void); extern void help_pg_probackup(void); -extern void help_command(char *command); +extern void help_command(ProbackupSubcmd const subcmd); /* in validate.c */ extern void pgBackupValidate(pgBackup* backup, pgRestoreParams *params); @@ -1162,6 +1163,7 @@ extern int send_pages(ConnectionArgs* conn_arg, const char *to_fullpath, const c BackupMode backup_mode, int ptrack_version_num, const char *ptrack_schema); /* FIO */ +extern void setMyLocation(ProbackupSubcmd const subcmd); extern void fio_delete(mode_t mode, const char *fullpath, fio_location location); extern int fio_send_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, XLogRecPtr horizonLsn, int calg, int clevel, uint32 checksum_version, diff --git a/src/utils/configuration.c b/src/utils/configuration.c index d6a7d069e..05baaae53 100644 --- a/src/utils/configuration.c +++ b/src/utils/configuration.c @@ -87,6 +87,62 @@ static const unit_conversion time_unit_conversion_table[] = {""} /* end of table marker */ }; +/* Order is important, keep it in sync with utils/configuration.h:enum ProbackupSubcmd declaration */ +static char const * const subcmd_names[] = +{ + "NO_CMD", + "init", + "add-instance", + "del-instance", + "archive-push", + "archive-get", + "backup", + "restore", + "validate", + "delete", + "merge", + "show", + "set-config", + "set-backup", + "show-config", + "checkdb", + "ssh", + "agent", + "help", + "version", +}; + +ProbackupSubcmd +parse_subcmd(char const * const subcmd_str) +{ + struct { + ProbackupSubcmd id; + char *name; + } + static const subcmd_additional_names[] = { + { HELP_CMD, "--help" }, + { HELP_CMD, "-?" }, + { VERSION_CMD, "--version" }, + { VERSION_CMD, "-V" }, + }; + + int i; + for(i = (int)NO_CMD + 1; i < sizeof(subcmd_names) / sizeof(subcmd_names[0]); ++i) + if(strcmp(subcmd_str, subcmd_names[i]) == 0) + return (ProbackupSubcmd)i; + for(i = 0; i < sizeof(subcmd_additional_names) / sizeof(subcmd_additional_names[0]); ++i) + if(strcmp(subcmd_str, subcmd_additional_names[i].name) == 0) + return subcmd_additional_names[i].id; + return NO_CMD; +} + +char const * +get_subcmd_name(ProbackupSubcmd const subcmd) +{ + Assert((int)subcmd < sizeof(subcmd_names) / sizeof(subcmd_names[0])); + return subcmd_names[(int)subcmd]; +} + /* * Reading functions. */ diff --git a/src/utils/configuration.h b/src/utils/configuration.h index eea8c7746..4ed4e0e61 100644 --- a/src/utils/configuration.h +++ b/src/utils/configuration.h @@ -16,6 +16,31 @@ #define INFINITE_STR "INFINITE" +/* Order is important, keep it in sync with configuration.c:subcmd_names[] and help.c:help_command() */ +typedef enum ProbackupSubcmd +{ + NO_CMD = 0, + INIT_CMD, + ADD_INSTANCE_CMD, + DELETE_INSTANCE_CMD, + ARCHIVE_PUSH_CMD, + ARCHIVE_GET_CMD, + BACKUP_CMD, + RESTORE_CMD, + VALIDATE_CMD, + DELETE_CMD, + MERGE_CMD, + SHOW_CMD, + SET_CONFIG_CMD, + SET_BACKUP_CMD, + SHOW_CONFIG_CMD, + CHECKDB_CMD, + SSH_CMD, + AGENT_CMD, + HELP_CMD, + VERSION_CMD +} ProbackupSubcmd; + typedef enum OptionSource { SOURCE_DEFAULT, @@ -75,6 +100,8 @@ struct ConfigOption #define OPTION_UNIT (OPTION_UNIT_MEMORY | OPTION_UNIT_TIME) +extern ProbackupSubcmd parse_subcmd(char const * const subcmd_str); +extern char const *get_subcmd_name(ProbackupSubcmd const subcmd); extern int config_get_opt(int argc, char **argv, ConfigOption cmd_options[], ConfigOption options[]); extern int config_read_opt(const char *path, ConfigOption options[], int elevel, diff --git a/src/utils/file.c b/src/utils/file.c index b29a67070..4adfa3fee 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -83,6 +83,24 @@ typedef struct #undef fopen(a, b) #endif +void +setMyLocation(ProbackupSubcmd const subcmd) +{ + +#ifdef WIN32 + if (IsSshProtocol()) + elog(ERROR, "Currently remote operations on Windows are not supported"); +#endif + + MyLocation = IsSshProtocol() + ? (subcmd == ARCHIVE_PUSH_CMD || subcmd == ARCHIVE_GET_CMD) + ? FIO_DB_HOST + : (subcmd == BACKUP_CMD || subcmd == RESTORE_CMD || subcmd == ADD_INSTANCE_CMD) + ? FIO_BACKUP_HOST + : FIO_LOCAL_HOST + : FIO_LOCAL_HOST; +} + /* Use specified file descriptors as stdin/stdout for FIO functions */ void fio_redirect(int in, int out, int err) { From 8aea21d70380c5f89d301afce4cc195b026c1aa9 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Wed, 30 Dec 2020 06:59:32 +0300 Subject: [PATCH 005/525] add version and help help pages --- src/help.c | 30 +++++++++++++++++++++++------- src/utils/configuration.c | 2 +- 2 files changed, 24 insertions(+), 8 deletions(-) diff --git a/src/help.c b/src/help.c index 2af045cde..cab143ad8 100644 --- a/src/help.c +++ b/src/help.c @@ -11,6 +11,7 @@ #include "pg_probackup.h" static void help_nocmd(void); +static void help_internal(void); static void help_init(void); static void help_backup(void); static void help_restore(void); @@ -27,6 +28,7 @@ static void help_archive_push(void); static void help_archive_get(void); static void help_checkdb(void); static void help_help(void); +static void help_version(void); void help_print_version(void) @@ -64,10 +66,10 @@ help_command(ProbackupSubcmd const subcmd) &help_set_backup, &help_show_config, &help_checkdb, - &help_nocmd, // SSH_CMD - &help_nocmd, // AGENT_CMD + &help_internal, // SSH_CMD + &help_internal, // AGENT_CMD &help_help, - &help_help, // VERSION_CMD + &help_version, }; Assert((int)subcmd < sizeof(help_functions) / sizeof(help_functions[0])); @@ -77,9 +79,9 @@ help_command(ProbackupSubcmd const subcmd) void help_pg_probackup(void) { - printf(_("\n%s - utility to manage backup/recovery of PostgreSQL database.\n\n"), PROGRAM_NAME); + printf(_("\n%s - utility to manage backup/recovery of PostgreSQL database.\n"), PROGRAM_NAME); - printf(_(" %s help [COMMAND]\n"), PROGRAM_NAME); + printf(_("\n %s help [COMMAND]\n"), PROGRAM_NAME); printf(_("\n %s version\n"), PROGRAM_NAME); @@ -256,7 +258,13 @@ help_pg_probackup(void) static void help_nocmd(void) { - printf(_("Unknown command. Try pg_probackup help\n")); + printf(_("\nUnknown command. Try pg_probackup help\n\n")); +} + +static void +help_internal(void) +{ + printf(_("\nThis command is intended for internal use\n\n")); } static void @@ -984,5 +992,13 @@ help_archive_get(void) static void help_help(void) { - printf(_("No help page required for \"help\" and \"version\" commands. Just try it!\n")); + printf(_("\n%s help [command]\n"), PROGRAM_NAME); + printf(_("%s command --help\n\n"), PROGRAM_NAME); +} + +static void +help_version(void) +{ + printf(_("\n%s version\n"), PROGRAM_NAME); + printf(_("%s --version\n\n"), PROGRAM_NAME); } diff --git a/src/utils/configuration.c b/src/utils/configuration.c index 05baaae53..afc1bc056 100644 --- a/src/utils/configuration.c +++ b/src/utils/configuration.c @@ -140,7 +140,7 @@ char const * get_subcmd_name(ProbackupSubcmd const subcmd) { Assert((int)subcmd < sizeof(subcmd_names) / sizeof(subcmd_names[0])); - return subcmd_names[(int)subcmd]; + return subcmd_names[(int)subcmd]; } /* From 7383ddd69c9f4b5f66ea5601f3764fdfda408fe3 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Wed, 6 Jan 2021 19:15:59 +0300 Subject: [PATCH 006/525] add some comments --- src/pg_probackup.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/src/pg_probackup.c b/src/pg_probackup.c index 855d24d92..4b03f6a8c 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -383,6 +383,7 @@ main(int argc, char *argv[]) exit(0); } + /* set location based on cmdline options only */ setMyLocation(backup_subcmd); if (backup_path == NULL) @@ -499,9 +500,14 @@ main(int argc, char *argv[]) config_read_opt(path, instance_options, ERROR, true, true); else config_read_opt(path, instance_options, ERROR, true, false); + + /* + * We can determine our location only after reading the configuration file, + * unless we are running arcive-push/archive-get - they are allowed to trust + * cmdline only. + */ + setMyLocation(backup_subcmd); } - /* Зачем второй раз устанавливать? */ - setMyLocation(backup_subcmd); } /* From 1d5015705342be21b363d609f01178a73ec7a1e0 Mon Sep 17 00:00:00 2001 From: anastasia Date: Fri, 22 Jan 2021 21:48:21 +0300 Subject: [PATCH 007/525] calm down compiler warning --- src/pg_probackup.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/pg_probackup.c b/src/pg_probackup.c index 30af998a4..dd2ac97ee 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -291,6 +291,7 @@ main(int argc, char *argv[]) break; #else elog(ERROR, "\"ssh\" command implemented only for Windows"); + break; #endif case AGENT_CMD: /* 'No forward compatibility' sanity: From 40aeb8be175e41aa9edc206faa4f13b0c11f12a8 Mon Sep 17 00:00:00 2001 From: anastasia Date: Fri, 22 Jan 2021 22:43:06 +0300 Subject: [PATCH 008/525] fix merge conflict --- tests/expected/option_version.out | 4 ---- 1 file changed, 4 deletions(-) diff --git a/tests/expected/option_version.out b/tests/expected/option_version.out index 55d7c7f04..560b6b592 100644 --- a/tests/expected/option_version.out +++ b/tests/expected/option_version.out @@ -1,5 +1 @@ -<<<<<<< HEAD pg_probackup 2.5.0 -======= -pg_probackup 2.4.8 ->>>>>>> master From 2620042019c702423febc58444e11495d746417e Mon Sep 17 00:00:00 2001 From: Elena Indrupskaya Date: Wed, 27 Jan 2021 10:25:25 +0300 Subject: [PATCH 009/525] [DOC] Remove outdated content from the documentation --- doc/pgprobackup.xml | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/doc/pgprobackup.xml b/doc/pgprobackup.xml index a394cbcd4..b0a0f6763 100644 --- a/doc/pgprobackup.xml +++ b/doc/pgprobackup.xml @@ -1206,18 +1206,6 @@ CREATE EXTENSION ptrack; does not affect PTRACK operation. The maximum allowed value is 1024. - - - Grant the right to execute PTRACK - functions to the backup role - in the database used to connect to the cluster: - - -GRANT EXECUTE ON FUNCTION pg_ptrack_get_pagemapset(pg_lsn) TO backup; -GRANT EXECUTE ON FUNCTION pg_ptrack_control_lsn() TO backup; -GRANT EXECUTE ON FUNCTION pg_ptrack_get_block(oid, oid, oid, bigint) TO backup; - - @@ -1254,7 +1242,6 @@ GRANT EXECUTE ON FUNCTION pg_ptrack_get_block(oid, oid, oid, bigint) TO backup; GRANT EXECUTE ON FUNCTION pg_catalog.pg_ptrack_clear() TO backup; GRANT EXECUTE ON FUNCTION pg_catalog.pg_ptrack_get_and_clear(oid, oid) TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; From 0fbf1a2fabb97fd9004bf6a4a74739024eb9bda8 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Wed, 27 Jan 2021 15:55:36 +0300 Subject: [PATCH 010/525] README: update link to latest Windows installers --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 1c61413ce..2ecaf9695 100644 --- a/README.md +++ b/README.md @@ -66,7 +66,7 @@ For detailed release plans check [Milestones](https://github.com/postgrespro/pg_ ## Installation and Setup ### Windows Installation -Installers are available in release **assets**. [Latests](https://github.com/postgrespro/pg_probackup/releases/2.4.4). +Installers are available in release **assets**. [Latests](https://github.com/postgrespro/pg_probackup/releases/2.4.9). ### Linux Installation #### pg_probackup for vanilla PostgreSQL From 7d64d58755bd5314d035ff66a65ee2ffb240ceea Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Mon, 1 Feb 2021 16:09:02 +0300 Subject: [PATCH 011/525] [Issue #308] Wait on empty exclusive lock file --- src/catalog.c | 52 ++++++++++++++++++++++++++++++++++------------ src/pg_probackup.h | 4 +++- 2 files changed, 42 insertions(+), 14 deletions(-) diff --git a/src/catalog.c b/src/catalog.c index 302154178..94c01cff9 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -205,7 +205,7 @@ lock_backup(pgBackup *backup, bool strict, bool exclusive) { /* release exclusive lock */ if (fio_unlink(lock_file, FIO_BACKUP_HOST) < 0) - elog(ERROR, "Could not remove old lock file \"%s\": %s", + elog(ERROR, "Could not remove exclusive lock file \"%s\": %s", lock_file, strerror(errno)); /* we are done */ @@ -261,7 +261,7 @@ lock_backup_exclusive(pgBackup *backup, bool strict) int fd = 0; char buffer[MAXPGPATH * 2 + 256]; int ntries = LOCK_TIMEOUT; - int log_freq = ntries / 5; + int empty_tries = LOCK_STALE_TIMEOUT; int len; int encoded_pid; pid_t my_p_pid; @@ -351,13 +351,39 @@ lock_backup_exclusive(pgBackup *backup, bool strict) fclose(fp_out); /* - * It should be possible only as a result of system crash, - * so its hypothetical owner should be dead by now + * There are several possible reasons for lock file + * to be empty: + * - system crash + * - process crash + * - race between writer and reader + * + * Consider empty file to stale after LOCK_STALE_TIMEOUT + * attempts. + * + * TODO: alternatively we can write into temp file (lock_file_%pid), + * rename it and then re-read lock file to make sure, + * that we are successfully acquired the lock. */ if (len == 0) { - elog(WARNING, "Lock file \"%s\" is empty", lock_file); - goto grab_lock; + if (empty_tries == 0) + { + elog(WARNING, "Lock file \"%s\" is empty", lock_file); + goto grab_lock; + } + + if ((empty_tries % LOG_FREQ) == 0) + elog(WARNING, "Waiting %u seconds on empty exclusive lock for backup %s", + empty_tries, base36enc(backup->start_time)); + + sleep(1); + /* + * waiting on empty lock file should not affect + * the timer for concurrent lockers (ntries). + */ + empty_tries--; + ntries++; + continue; } encoded_pid = atoi(buffer); @@ -383,12 +409,13 @@ lock_backup_exclusive(pgBackup *backup, bool strict) if (kill(encoded_pid, 0) == 0) { /* complain every fifth interval */ - if ((ntries % log_freq) == 0) + if ((ntries % LOG_FREQ) == 0) { elog(WARNING, "Process %d is using backup %s, and is still running", encoded_pid, base36enc(backup->start_time)); - elog(WARNING, "Waiting %u seconds on lock for backup %s", ntries, base36enc(backup->start_time)); + elog(WARNING, "Waiting %u seconds on exclusive lock for backup %s", + ntries, base36enc(backup->start_time)); } sleep(1); @@ -435,7 +462,7 @@ lock_backup_exclusive(pgBackup *backup, bool strict) errno = 0; if (fio_write(fd, buffer, strlen(buffer)) != strlen(buffer)) { - int save_errno = errno; + int save_errno = errno; fio_close(fd); fio_unlink(lock_file, FIO_BACKUP_HOST); @@ -453,7 +480,7 @@ lock_backup_exclusive(pgBackup *backup, bool strict) if (fio_flush(fd) != 0) { - int save_errno = errno; + int save_errno = errno; fio_close(fd); fio_unlink(lock_file, FIO_BACKUP_HOST); @@ -471,7 +498,7 @@ lock_backup_exclusive(pgBackup *backup, bool strict) if (fio_close(fd) != 0) { - int save_errno = errno; + int save_errno = errno; fio_unlink(lock_file, FIO_BACKUP_HOST); @@ -493,7 +520,6 @@ wait_read_only_owners(pgBackup *backup) char buffer[256]; pid_t encoded_pid; int ntries = LOCK_TIMEOUT; - int log_freq = ntries / 5; char lock_file[MAXPGPATH]; join_path_components(lock_file, backup->root_dir, BACKUP_RO_LOCK_FILE); @@ -523,7 +549,7 @@ wait_read_only_owners(pgBackup *backup) { if (kill(encoded_pid, 0) == 0) { - if ((ntries % log_freq) == 0) + if ((ntries % LOG_FREQ) == 0) { elog(WARNING, "Process %d is using backup %s in read only mode, and is still running", encoded_pid, base36enc(backup->start_time)); diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 2ec09babb..f4adc98cc 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -79,7 +79,9 @@ extern const char *PROGRAM_EMAIL; /* Timeout defaults */ #define ARCHIVE_TIMEOUT_DEFAULT 300 #define REPLICA_TIMEOUT_DEFAULT 300 -#define LOCK_TIMEOUT 30 +#define LOCK_TIMEOUT 60 +#define LOCK_STALE_TIMEOUT 30 +#define LOG_FREQ 10 /* Directory/File permission */ #define DIR_PERMISSION (0700) From d9d6c34e2576e75e06ced80276d41e8e0db555ea Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Mon, 1 Feb 2021 16:42:44 +0300 Subject: [PATCH 012/525] [Issue #308] fix typo --- src/catalog.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/catalog.c b/src/catalog.c index 94c01cff9..63bb6862e 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -357,8 +357,7 @@ lock_backup_exclusive(pgBackup *backup, bool strict) * - process crash * - race between writer and reader * - * Consider empty file to stale after LOCK_STALE_TIMEOUT - * attempts. + * Consider empty file to be stale after LOCK_STALE_TIMEOUT attempts. * * TODO: alternatively we can write into temp file (lock_file_%pid), * rename it and then re-read lock file to make sure, From 2ae2908ea7d327ae57b56389060cd040a56a4dcb Mon Sep 17 00:00:00 2001 From: anastasia Date: Tue, 2 Feb 2021 18:19:47 +0300 Subject: [PATCH 013/525] Code cleanup. Remove unused functions --- src/catalog.c | 47 ---------------------------------------------- src/pg_probackup.h | 4 ---- 2 files changed, 51 deletions(-) diff --git a/src/catalog.c b/src/catalog.c index 57da50c8d..2936033e4 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -2741,33 +2741,6 @@ pgBackupGetPath2(const pgBackup *backup, char *path, size_t len, base36enc(backup->start_time), subdir1, subdir2); } -/* - * independent from global variable backup_instance_path - * Still depends from backup_path - */ -void -pgBackupGetPathInInstance(const char *instance_name, - const pgBackup *backup, char *path, size_t len, - const char *subdir1, const char *subdir2) -{ - char backup_instance_path[MAXPGPATH]; - - sprintf(backup_instance_path, "%s/%s/%s", - backup_path, BACKUPS_DIR, instance_name); - - /* If "subdir1" is NULL do not check "subdir2" */ - if (!subdir1) - snprintf(path, len, "%s/%s", backup_instance_path, - base36enc(backup->start_time)); - else if (!subdir2) - snprintf(path, len, "%s/%s/%s", backup_instance_path, - base36enc(backup->start_time), subdir1); - /* "subdir1" and "subdir2" is not NULL */ - else - snprintf(path, len, "%s/%s/%s/%s", backup_instance_path, - base36enc(backup->start_time), subdir1, subdir2); -} - /* * Check if multiple backups consider target backup to be their direct parent */ @@ -2917,26 +2890,6 @@ is_parent(time_t parent_backup_time, pgBackup *child_backup, bool inclusive) return false; } -/* - * Return backup index number. - * Note: this index number holds true until new sorting of backup list - */ -int -get_backup_index_number(parray *backup_list, pgBackup *backup) -{ - int i; - - for (i = 0; i < parray_num(backup_list); i++) - { - pgBackup *tmp_backup = (pgBackup *) parray_get(backup_list, i); - - if (tmp_backup->start_time == backup->start_time) - return i; - } - elog(WARNING, "Failed to find backup %s", base36enc(backup->start_time)); - return -1; -} - /* On backup_list lookup children of target_backup and append them to append_list */ void append_children(parray *backup_list, pgBackup *target_backup, parray *append_list) diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 217c8a7f1..721840af2 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -948,9 +948,6 @@ extern void pgBackupGetPath(const pgBackup *backup, char *path, size_t len, const char *subdir); extern void pgBackupGetPath2(const pgBackup *backup, char *path, size_t len, const char *subdir1, const char *subdir2); -extern void pgBackupGetPathInInstance(const char *instance_name, - const pgBackup *backup, char *path, size_t len, - const char *subdir1, const char *subdir2); extern void pgBackupCreateDir(pgBackup *backup, const char *backup_instance_path); extern void pgNodeInit(PGNodeInfo *node); extern void pgBackupInit(pgBackup *backup); @@ -968,7 +965,6 @@ extern int scan_parent_chain(pgBackup *current_backup, pgBackup **result_backup) extern bool is_parent(time_t parent_backup_time, pgBackup *child_backup, bool inclusive); extern bool is_prolific(parray *backup_list, pgBackup *target_backup); -extern int get_backup_index_number(parray *backup_list, pgBackup *backup); extern void append_children(parray *backup_list, pgBackup *target_backup, parray *append_list); extern bool launch_agent(void); extern void launch_ssh(char* argv[]); From 0fd2fdeec0ead56468af408e2566b3d2b04fd53c Mon Sep 17 00:00:00 2001 From: anastasia Date: Tue, 2 Feb 2021 19:12:08 +0300 Subject: [PATCH 014/525] Code cleanup. Remove unused pgBackupGetPath() function --- src/backup.c | 2 +- src/catalog.c | 10 ---------- src/dir.c | 1 - src/pg_probackup.h | 2 -- src/validate.c | 1 - 5 files changed, 1 insertion(+), 15 deletions(-) diff --git a/src/backup.c b/src/backup.c index 7bdf9d6fb..0b61234f1 100644 --- a/src/backup.c +++ b/src/backup.c @@ -1774,7 +1774,7 @@ pg_stop_backup(pgBackup *backup, PGconn *pg_startbackup_conn, if (!exclusive_backup) { Assert(PQnfields(res) >= 4); - pgBackupGetPath(backup, path, lengthof(path), DATABASE_DIR); + pgBackupGetPath2(backup, path, lengthof(path), DATABASE_DIR, NULL); /* Write backup_label */ join_path_components(backup_label, path, PG_BACKUP_LABEL_FILE); diff --git a/src/catalog.c b/src/catalog.c index 2936033e4..741a007ff 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -2710,16 +2710,6 @@ pgBackupCompareIdDesc(const void *l, const void *r) return -pgBackupCompareId(l, r); } -/* - * Construct absolute path of the backup directory. - * If subdir is not NULL, it will be appended after the path. - */ -void -pgBackupGetPath(const pgBackup *backup, char *path, size_t len, const char *subdir) -{ - pgBackupGetPath2(backup, path, len, subdir, NULL); -} - /* * Construct absolute path of the backup directory. * Append "subdir1" and "subdir2" to the backup directory. diff --git a/src/dir.c b/src/dir.c index d07a4d2f5..2bcd87b57 100644 --- a/src/dir.c +++ b/src/dir.c @@ -1880,7 +1880,6 @@ read_database_map(pgBackup *backup) char path[MAXPGPATH]; char database_map_path[MAXPGPATH]; -// pgBackupGetPath(backup, path, lengthof(path), DATABASE_DIR); join_path_components(path, backup->root_dir, DATABASE_DIR); join_path_components(database_map_path, path, DATABASE_MAP); diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 721840af2..06fab18dd 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -944,8 +944,6 @@ extern void pgBackupWriteControl(FILE *out, pgBackup *backup, bool utc); extern void write_backup_filelist(pgBackup *backup, parray *files, const char *root, parray *external_list, bool sync); -extern void pgBackupGetPath(const pgBackup *backup, char *path, size_t len, - const char *subdir); extern void pgBackupGetPath2(const pgBackup *backup, char *path, size_t len, const char *subdir1, const char *subdir2); extern void pgBackupCreateDir(pgBackup *backup, const char *backup_instance_path); diff --git a/src/validate.c b/src/validate.c index 21900c8e4..b44f4b1b8 100644 --- a/src/validate.c +++ b/src/validate.c @@ -205,7 +205,6 @@ pgBackupValidate(pgBackup *backup, pgRestoreParams *params) { char path[MAXPGPATH]; - //pgBackupGetPath(backup, path, lengthof(path), DATABASE_FILE_LIST); join_path_components(path, backup->root_dir, DATABASE_FILE_LIST); if (pgFileSize(path) >= (BLCKSZ*500)) From f26c95964701a7666b585d3e8ce61eab3e3a0bf1 Mon Sep 17 00:00:00 2001 From: anastasia Date: Tue, 2 Feb 2021 19:36:39 +0300 Subject: [PATCH 015/525] remove unneeded funtion dir_read_file_list() --- src/catalog.c | 109 +++++++++++++++++++++++++++++++++++++++-- src/dir.c | 119 +-------------------------------------------- src/pg_probackup.h | 4 +- 3 files changed, 107 insertions(+), 125 deletions(-) diff --git a/src/catalog.c b/src/catalog.c index 741a007ff..11b9a27d1 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -876,19 +876,118 @@ catalog_get_backup_list(const char *instance_name, time_t requested_backup_id) } /* - * Create list of backup datafiles. - * If 'requested_backup_id' is INVALID_BACKUP_ID, exit with error. - * If valid backup id is passed only matching backup will be added to the list. - * TODO this function only used once. Is it really needed? + * Get list of files in the backup from the DATABASE_FILE_LIST. */ parray * get_backup_filelist(pgBackup *backup, bool strict) { parray *files = NULL; char backup_filelist_path[MAXPGPATH]; + FILE *fp; + char buf[BLCKSZ]; + char stdio_buf[STDIO_BUFSIZE]; + pg_crc32 content_crc = 0; join_path_components(backup_filelist_path, backup->root_dir, DATABASE_FILE_LIST); - files = dir_read_file_list(NULL, NULL, backup_filelist_path, FIO_BACKUP_HOST, backup->content_crc); + + fp = fio_open_stream(backup_filelist_path, FIO_BACKUP_HOST); + if (fp == NULL) + elog(ERROR, "cannot open \"%s\": %s", backup_filelist_path, strerror(errno)); + + /* enable stdio buffering for local file */ + if (!fio_is_remote(FIO_BACKUP_HOST)) + setvbuf(fp, stdio_buf, _IOFBF, STDIO_BUFSIZE); + + files = parray_new(); + + INIT_FILE_CRC32(true, content_crc); + + while (fgets(buf, lengthof(buf), fp)) + { + char path[MAXPGPATH]; + char linked[MAXPGPATH]; + char compress_alg_string[MAXPGPATH]; + int64 write_size, + mode, /* bit length of mode_t depends on platforms */ + is_datafile, + is_cfs, + external_dir_num, + crc, + segno, + n_blocks, + n_headers, + dbOid, /* used for partial restore */ + hdr_crc, + hdr_off, + hdr_size; + pgFile *file; + + COMP_FILE_CRC32(true, content_crc, buf, strlen(buf)); + + get_control_value(buf, "path", path, NULL, true); + get_control_value(buf, "size", NULL, &write_size, true); + get_control_value(buf, "mode", NULL, &mode, true); + get_control_value(buf, "is_datafile", NULL, &is_datafile, true); + get_control_value(buf, "is_cfs", NULL, &is_cfs, false); + get_control_value(buf, "crc", NULL, &crc, true); + get_control_value(buf, "compress_alg", compress_alg_string, NULL, false); + get_control_value(buf, "external_dir_num", NULL, &external_dir_num, false); + get_control_value(buf, "dbOid", NULL, &dbOid, false); + + file = pgFileInit(path); + file->write_size = (int64) write_size; + file->mode = (mode_t) mode; + file->is_datafile = is_datafile ? true : false; + file->is_cfs = is_cfs ? true : false; + file->crc = (pg_crc32) crc; + file->compress_alg = parse_compress_alg(compress_alg_string); + file->external_dir_num = external_dir_num; + file->dbOid = dbOid ? dbOid : 0; + + /* + * Optional fields + */ + if (get_control_value(buf, "linked", linked, NULL, false) && linked[0]) + { + file->linked = pgut_strdup(linked); + canonicalize_path(file->linked); + } + + if (get_control_value(buf, "segno", NULL, &segno, false)) + file->segno = (int) segno; + + if (get_control_value(buf, "n_blocks", NULL, &n_blocks, false)) + file->n_blocks = (int) n_blocks; + + if (get_control_value(buf, "n_headers", NULL, &n_headers, false)) + file->n_headers = (int) n_headers; + + if (get_control_value(buf, "hdr_crc", NULL, &hdr_crc, false)) + file->hdr_crc = (pg_crc32) hdr_crc; + + if (get_control_value(buf, "hdr_off", NULL, &hdr_off, false)) + file->hdr_off = hdr_off; + + if (get_control_value(buf, "hdr_size", NULL, &hdr_size, false)) + file->hdr_size = (int) hdr_size; + + parray_append(files, file); + } + + FIN_FILE_CRC32(true, content_crc); + + if (ferror(fp)) + elog(ERROR, "Failed to read from file: \"%s\"", backup_filelist_path); + + fio_close_stream(fp); + + if (backup->content_crc != 0 && + backup->content_crc != content_crc) + { + elog(WARNING, "Invalid CRC of backup control file '%s': %u. Expected: %u", + backup_filelist_path, content_crc, backup->content_crc); + return NULL; + } /* redundant sanity? */ if (!files) diff --git a/src/dir.c b/src/dir.c index 2bcd87b57..1573f6880 100644 --- a/src/dir.c +++ b/src/dir.c @@ -1430,7 +1430,7 @@ get_external_remap(char *current_dir) * * Returns true if the value was found in the line. */ -static bool +bool get_control_value(const char *str, const char *name, char *value_str, int64 *value_int64, bool is_mandatory) { @@ -1554,123 +1554,6 @@ get_control_value(const char *str, const char *name, return false; /* Make compiler happy */ } -/* - * Construct parray of pgFile from the backup content list. - * If root is not NULL, path will be absolute path. - */ -parray * -dir_read_file_list(const char *root, const char *external_prefix, - const char *file_txt, fio_location location, pg_crc32 expected_crc) -{ - FILE *fp; - parray *files; - char buf[BLCKSZ]; - char stdio_buf[STDIO_BUFSIZE]; - pg_crc32 content_crc = 0; - - fp = fio_open_stream(file_txt, location); - if (fp == NULL) - elog(ERROR, "cannot open \"%s\": %s", file_txt, strerror(errno)); - - /* enable stdio buffering for local file */ - if (!fio_is_remote(location)) - setvbuf(fp, stdio_buf, _IOFBF, STDIO_BUFSIZE); - - files = parray_new(); - - INIT_FILE_CRC32(true, content_crc); - - while (fgets(buf, lengthof(buf), fp)) - { - char path[MAXPGPATH]; - char linked[MAXPGPATH]; - char compress_alg_string[MAXPGPATH]; - int64 write_size, - mode, /* bit length of mode_t depends on platforms */ - is_datafile, - is_cfs, - external_dir_num, - crc, - segno, - n_blocks, - n_headers, - dbOid, /* used for partial restore */ - hdr_crc, - hdr_off, - hdr_size; - pgFile *file; - - COMP_FILE_CRC32(true, content_crc, buf, strlen(buf)); - - get_control_value(buf, "path", path, NULL, true); - get_control_value(buf, "size", NULL, &write_size, true); - get_control_value(buf, "mode", NULL, &mode, true); - get_control_value(buf, "is_datafile", NULL, &is_datafile, true); - get_control_value(buf, "is_cfs", NULL, &is_cfs, false); - get_control_value(buf, "crc", NULL, &crc, true); - get_control_value(buf, "compress_alg", compress_alg_string, NULL, false); - get_control_value(buf, "external_dir_num", NULL, &external_dir_num, false); - get_control_value(buf, "dbOid", NULL, &dbOid, false); - - file = pgFileInit(path); - file->write_size = (int64) write_size; - file->mode = (mode_t) mode; - file->is_datafile = is_datafile ? true : false; - file->is_cfs = is_cfs ? true : false; - file->crc = (pg_crc32) crc; - file->compress_alg = parse_compress_alg(compress_alg_string); - file->external_dir_num = external_dir_num; - file->dbOid = dbOid ? dbOid : 0; - - /* - * Optional fields - */ - - if (get_control_value(buf, "linked", linked, NULL, false) && linked[0]) - { - file->linked = pgut_strdup(linked); - canonicalize_path(file->linked); - } - - if (get_control_value(buf, "segno", NULL, &segno, false)) - file->segno = (int) segno; - - if (get_control_value(buf, "n_blocks", NULL, &n_blocks, false)) - file->n_blocks = (int) n_blocks; - - if (get_control_value(buf, "n_headers", NULL, &n_headers, false)) - file->n_headers = (int) n_headers; - - if (get_control_value(buf, "hdr_crc", NULL, &hdr_crc, false)) - file->hdr_crc = (pg_crc32) hdr_crc; - - if (get_control_value(buf, "hdr_off", NULL, &hdr_off, false)) - file->hdr_off = hdr_off; - - if (get_control_value(buf, "hdr_size", NULL, &hdr_size, false)) - file->hdr_size = (int) hdr_size; - - parray_append(files, file); - } - - FIN_FILE_CRC32(true, content_crc); - - if (ferror(fp)) - elog(ERROR, "Failed to read from file: \"%s\"", file_txt); - - fio_close_stream(fp); - - if (expected_crc != 0 && - expected_crc != content_crc) - { - elog(WARNING, "Invalid CRC of backup control file '%s': %u. Expected: %u", - file_txt, content_crc, expected_crc); - return NULL; - } - - return files; -} - /* * Check if directory empty. */ diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 06fab18dd..2e72fe864 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -975,6 +975,8 @@ extern CompressAlg parse_compress_alg(const char *arg); extern const char* deparse_compress_alg(int alg); /* in dir.c */ +extern bool get_control_value(const char *str, const char *name, + char *value_str, int64 *value_int64, bool is_mandatory); extern void dir_list_file(parray *files, const char *root, bool exclude, bool follow_symlink, bool add_root, bool backup_logs, bool skip_hidden, int external_dir_num, fio_location location); @@ -1000,8 +1002,6 @@ extern void db_map_entry_free(void *map); extern void print_file_list(FILE *out, const parray *files, const char *root, const char *external_prefix, parray *external_list); -extern parray *dir_read_file_list(const char *root, const char *external_prefix, - const char *file_txt, fio_location location, pg_crc32 expected_crc); extern parray *make_external_directory_list(const char *colon_separated_dirs, bool remap); extern void free_dir_list(parray *list); From 53be6243f96aeb9e94f3d04ed57cfc1a34cfff40 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Tue, 2 Feb 2021 20:17:31 +0300 Subject: [PATCH 016/525] tests: remove debug messages in module "incr_restore" --- tests/incr_restore.py | 99 +++++++++++++++++++++++-------------------- 1 file changed, 53 insertions(+), 46 deletions(-) diff --git a/tests/incr_restore.py b/tests/incr_restore.py index add485b3c..3aa84121f 100644 --- a/tests/incr_restore.py +++ b/tests/incr_restore.py @@ -70,9 +70,9 @@ def test_basic_incr_restore(self): node.stop() - print(self.restore_node( + self.restore_node( backup_dir, 'node', node, - options=["-j", "4", "--incremental-mode=checksum"])) + options=["-j", "4", "--incremental-mode=checksum"]) pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) @@ -119,9 +119,9 @@ def test_basic_incr_restore_into_missing_directory(self): node.cleanup() - print(self.restore_node( + self.restore_node( backup_dir, 'node', node, - options=["-j", "4", "--incremental-mode=checksum"])) + options=["-j", "4", "--incremental-mode=checksum"]) pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) @@ -453,7 +453,6 @@ def test_incr_restore_with_tablespace_4(self): "Output: {0} \n CMD: {1}".format( repr(self.output), self.cmd)) except ProbackupException as e: - print(e.message) self.assertIn( 'WARNING: Backup catalog was initialized for system id', e.message, @@ -649,8 +648,6 @@ def test_incr_restore_with_tablespace_7(self): options=[ "-j", "4", "--incremental-mode=checksum"]) - print(out) - pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) @@ -800,9 +797,9 @@ def test_incr_checksum_restore(self): pgdata = self.pgdata_content(node_1.data_dir) - print(self.restore_node( + self.restore_node( backup_dir, 'node', node, - options=["-j", "4", "--incremental-mode=checksum"])) + options=["-j", "4", "--incremental-mode=checksum"]) pgdata_restored = self.pgdata_content(node.data_dir) @@ -890,8 +887,8 @@ def test_incr_lsn_restore(self): pgdata = self.pgdata_content(node_1.data_dir) - print(self.restore_node( - backup_dir, 'node', node, options=["-j", "4", "--incremental-mode=lsn"])) + self.restore_node( + backup_dir, 'node', node, options=["-j", "4", "--incremental-mode=lsn"]) pgdata_restored = self.pgdata_content(node.data_dir) @@ -1079,9 +1076,9 @@ def test_incr_checksum_corruption_detection(self): f.flush() f.close - print(self.restore_node( + self.restore_node( backup_dir, 'node', node, data_dir=node.data_dir, - options=["-j", "4", "--incremental-mode=checksum"])) + options=["-j", "4", "--incremental-mode=checksum"]) pgdata_restored = self.pgdata_content(node.data_dir) @@ -1209,9 +1206,9 @@ def test_incr_restore_multiple_external(self): node.stop() - print(self.restore_node( + self.restore_node( backup_dir, 'node', node, - options=["-j", "4", '--incremental-mode=checksum', '--log-level-console=VERBOSE'])) + options=["-j", "4", '--incremental-mode=checksum']) pgdata_restored = self.pgdata_content( node.base_dir, exclude_dirs=['logs']) @@ -1281,9 +1278,9 @@ def test_incr_lsn_restore_multiple_external(self): node.stop() - print(self.restore_node( + self.restore_node( backup_dir, 'node', node, - options=["-j", "4", '--incremental-mode=lsn'])) + options=["-j", "4", '--incremental-mode=lsn']) pgdata_restored = self.pgdata_content( node.base_dir, exclude_dirs=['logs']) @@ -1341,11 +1338,13 @@ def test_incr_lsn_restore_backward(self): node.stop() - print(self.restore_node( + self.restore_node( backup_dir, 'node', node, backup_id=full_id, options=[ - "-j", "4", '--incremental-mode=lsn', '--log-level-file=VERBOSE', - '--recovery-target=immediate', '--recovery-target-action=pause'])) + "-j", "4", + '--incremental-mode=lsn', + '--recovery-target=immediate', + '--recovery-target-action=pause']) pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(full_pgdata, pgdata_restored) @@ -1384,11 +1383,13 @@ def test_incr_lsn_restore_backward(self): node.slow_start(replica=True) node.stop() - print(self.restore_node( + self.restore_node( backup_dir, 'node', node, backup_id=delta_id, options=[ - "-j", "4", '--incremental-mode=lsn', - '--recovery-target=immediate', '--recovery-target-action=pause'])) + "-j", "4", + '--incremental-mode=lsn', + '--recovery-target=immediate', + '--recovery-target-action=pause']) pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(delta_pgdata, pgdata_restored) @@ -1447,11 +1448,13 @@ def test_incr_checksum_restore_backward(self): node.stop() - print(self.restore_node( + self.restore_node( backup_dir, 'node', node, backup_id=full_id, options=[ - "-j", "4", '--incremental-mode=checksum', - '--recovery-target=immediate', '--recovery-target-action=pause'])) + "-j", "4", + '--incremental-mode=checksum', + '--recovery-target=immediate', + '--recovery-target-action=pause']) pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(full_pgdata, pgdata_restored) @@ -1459,11 +1462,13 @@ def test_incr_checksum_restore_backward(self): node.slow_start(replica=True) node.stop() - print(self.restore_node( + self.restore_node( backup_dir, 'node', node, backup_id=page_id, options=[ - "-j", "4", '--incremental-mode=checksum', - '--recovery-target=immediate', '--recovery-target-action=pause'])) + "-j", "4", + '--incremental-mode=checksum', + '--recovery-target=immediate', + '--recovery-target-action=pause']) pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(page_pgdata, pgdata_restored) @@ -1471,11 +1476,13 @@ def test_incr_checksum_restore_backward(self): node.slow_start(replica=True) node.stop() - print(self.restore_node( + self.restore_node( backup_dir, 'node', node, backup_id=delta_id, options=[ - "-j", "4", '--incremental-mode=checksum', - '--recovery-target=immediate', '--recovery-target-action=pause'])) + "-j", "4", + '--incremental-mode=checksum', + '--recovery-target=immediate', + '--recovery-target-action=pause']) pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(delta_pgdata, pgdata_restored) @@ -1542,9 +1549,9 @@ def test_make_replica_via_incr_checksum_restore(self): data_dir=new_master.data_dir, backup_type='page') # restore old master as replica - print(self.restore_node( + self.restore_node( backup_dir, 'node', old_master, data_dir=old_master.data_dir, - options=['-R', '--incremental-mode=checksum'])) + options=['-R', '--incremental-mode=checksum']) self.set_replica(new_master, old_master, synchronous=True) @@ -1615,9 +1622,9 @@ def test_make_replica_via_incr_lsn_restore(self): data_dir=new_master.data_dir, backup_type='page') # restore old master as replica - print(self.restore_node( + self.restore_node( backup_dir, 'node', old_master, data_dir=old_master.data_dir, - options=['-R', '--incremental-mode=lsn'])) + options=['-R', '--incremental-mode=lsn']) self.set_replica(new_master, old_master, synchronous=True) @@ -1762,9 +1769,9 @@ def test_incr_lsn_long_xact_1(self): node.stop() try: - print(self.restore_node( + self.restore_node( backup_dir, 'node', node, backup_id=full_id, - options=["-j", "4", '--incremental-mode=lsn'])) + options=["-j", "4", '--incremental-mode=lsn']) # we should die here because exception is what we expect to happen self.assertEqual( 1, 0, @@ -1920,9 +1927,9 @@ def test_incr_restore_zero_size_file_checksum(self): node.stop() - print(self.restore_node( + self.restore_node( backup_dir, 'node', node, backup_id=id1, - options=["-j", "4", '-I', 'checksum'])) + options=["-j", "4", '-I', 'checksum']) pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata1, pgdata_restored) @@ -1994,9 +2001,9 @@ def test_incr_restore_zero_size_file_lsn(self): node.stop() - print(self.restore_node( + self.restore_node( backup_dir, 'node', node, backup_id=id1, - options=["-j", "4", '-I', 'checksum'])) + options=["-j", "4", '-I', 'checksum']) pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata1, pgdata_restored) @@ -2088,12 +2095,12 @@ def test_incremental_partial_restore_exclude_checksum(self): pgdata1 = self.pgdata_content(node1.data_dir) # partial incremental restore backup into node2 - print(self.restore_node( + self.restore_node( backup_dir, 'node', node2, options=[ "--db-exclude=db1", "--db-exclude=db5", - "-I", "checksum"])) + "-I", "checksum"]) pgdata2 = self.pgdata_content(node2.data_dir) @@ -2198,12 +2205,12 @@ def test_incremental_partial_restore_exclude_lsn(self): node2.port = node.port node2.slow_start() node2.stop() - print(self.restore_node( + self.restore_node( backup_dir, 'node', node2, options=[ "--db-exclude=db1", "--db-exclude=db5", - "-I", "lsn"])) + "-I", "lsn"]) pgdata2 = self.pgdata_content(node2.data_dir) From da2c49dfe1807238f3beb733ac0f1a951e09fe94 Mon Sep 17 00:00:00 2001 From: anastasia Date: Tue, 2 Feb 2021 22:41:45 +0300 Subject: [PATCH 017/525] code cleanup --- src/dir.c | 2 +- src/pg_probackup.c | 2 +- src/pg_probackup.h | 17 ----------------- 3 files changed, 2 insertions(+), 19 deletions(-) diff --git a/src/dir.c b/src/dir.c index 1573f6880..0b724036a 100644 --- a/src/dir.c +++ b/src/dir.c @@ -28,7 +28,7 @@ * start so they are not included in backups. The directories themselves are * kept and included as empty to preserve access permissions. */ -const char *pgdata_exclude_dir[] = +static const char *pgdata_exclude_dir[] = { PG_XLOG_DIR, /* diff --git a/src/pg_probackup.c b/src/pg_probackup.c index dd2ac97ee..37d872309 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -104,7 +104,7 @@ bool force = false; bool dry_run = false; static char *delete_status = NULL; /* compression options */ -bool compress_shortcut = false; +static bool compress_shortcut = false; /* other options */ char *instance_name; diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 2e72fe864..18bf87bbc 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -672,16 +672,6 @@ typedef struct BackupPageHeader2 #define PageIsTruncated -2 #define PageIsCorrupted -3 /* used by checkdb */ - -/* - * return pointer that exceeds the length of prefix from character string. - * ex. str="/xxx/yyy/zzz", prefix="/xxx/yyy", return="zzz". - * - * Deprecated. Do not use this in new code. - */ -#define GetRelativePath(str, prefix) \ - ((strlen(str) <= strlen(prefix)) ? "" : str + strlen(prefix) + 1) - /* * Return timeline, xlog ID and record offset from an LSN of the type * 0/B000188, usual result from pg_stop_backup() and friends. @@ -789,9 +779,6 @@ extern bool delete_expired; extern bool merge_expired; extern bool dry_run; -/* compression options */ -extern bool compress_shortcut; - /* other options */ extern char *instance_name; @@ -808,10 +795,6 @@ extern pgBackup current; /* argv of the process */ extern char** commands_args; -/* in dir.c */ -/* exclude directory list for $PGDATA file listing */ -extern const char *pgdata_exclude_dir[]; - /* in backup.c */ extern int do_backup(pgSetBackupParams *set_backup_params, bool no_validate, bool no_sync, bool backup_logs); From 94ada4c13745bc2e48a38a6d4b027bbbcbe9e644 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Wed, 3 Feb 2021 01:41:49 +0300 Subject: [PATCH 018/525] [Issue #308] test coverage and comments improvement --- src/catalog.c | 49 ++++++++------------------------- tests/helpers/ptrack_helpers.py | 14 +++++----- tests/locking.py | 46 +++++++++++++++++++++++++++++++ 3 files changed, 64 insertions(+), 45 deletions(-) diff --git a/src/catalog.c b/src/catalog.c index 63bb6862e..8ac3e5799 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -166,6 +166,9 @@ write_backup_status(pgBackup *backup, BackupStatus status, * * TODO: lock-timeout as parameter * TODO: we must think about more fine grain unlock mechanism - separate unlock_backup() function. + * TODO: more accurate naming + * -> exclusive lock -> acquire HW_LATCH and wait until all LW_LATCH`es are clear + * -> shared lock -> acquire HW_LATCH, acquire LW_LATCH, release HW_LATCH */ bool lock_backup(pgBackup *backup, bool strict, bool exclusive) @@ -264,45 +267,13 @@ lock_backup_exclusive(pgBackup *backup, bool strict) int empty_tries = LOCK_STALE_TIMEOUT; int len; int encoded_pid; - pid_t my_p_pid; join_path_components(lock_file, backup->root_dir, BACKUP_LOCK_FILE); - /* - * TODO: is this stuff with ppid below is relevant for us ? - * - * If the PID in the lockfile is our own PID or our parent's or - * grandparent's PID, then the file must be stale (probably left over from - * a previous system boot cycle). We need to check this because of the - * likelihood that a reboot will assign exactly the same PID as we had in - * the previous reboot, or one that's only one or two counts larger and - * hence the lockfile's PID now refers to an ancestor shell process. We - * allow pg_ctl to pass down its parent shell PID (our grandparent PID) - * via the environment variable PG_GRANDPARENT_PID; this is so that - * launching the postmaster via pg_ctl can be just as reliable as - * launching it directly. There is no provision for detecting - * further-removed ancestor processes, but if the init script is written - * carefully then all but the immediate parent shell will be root-owned - * processes and so the kill test will fail with EPERM. Note that we - * cannot get a false negative this way, because an existing postmaster - * would surely never launch a competing postmaster or pg_ctl process - * directly. - */ -#ifndef WIN32 - my_p_pid = getppid(); -#else - - /* - * Windows hasn't got getppid(), but doesn't need it since it's not using - * real kill() either... - */ - my_p_pid = 0; -#endif - /* * We need a loop here because of race conditions. But don't loop forever * (for example, a non-writable $backup_instance_path directory might cause a failure - * that won't go away). 100 tries seems like plenty. + * that won't go away). */ do { @@ -396,14 +367,12 @@ lock_backup_exclusive(pgBackup *backup, bool strict) /* * Check to see if the other process still exists - * - * Per discussion above, my_pid, my_p_pid can be - * ignored as false matches. - * * Normally kill() will fail with ESRCH if the given PID doesn't * exist. */ - if (encoded_pid != my_pid && encoded_pid != my_p_pid) + if (encoded_pid == my_pid) + return 0; + else { if (kill(encoded_pid, 0) == 0) { @@ -508,6 +477,10 @@ lock_backup_exclusive(pgBackup *backup, bool strict) lock_file, strerror(save_errno)); } +// elog(LOG, "Acquired exclusive lock for backup %s after %ds", +// base36enc(backup->start_time), +// LOCK_TIMEOUT - ntries + LOCK_STALE_TIMEOUT - empty_tries); + return 0; } diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index 3c75ca2e7..833e95a36 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -757,7 +757,7 @@ def run_pb(self, command, asynchronous=False, gdb=False, old_binary=False, retur return GDBobj([binary_path] + command, self.verbose) if asynchronous: return subprocess.Popen( - self.cmd, + [binary_path] + command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env @@ -1133,8 +1133,8 @@ def show_archive( exit(1) def validate_pb( - self, backup_dir, instance=None, - backup_id=None, options=[], old_binary=False, gdb=False + self, backup_dir, instance=None, backup_id=None, + options=[], old_binary=False, gdb=False, asynchronous=False ): cmd_list = [ @@ -1146,11 +1146,11 @@ def validate_pb( if backup_id: cmd_list += ['-i', backup_id] - return self.run_pb(cmd_list + options, old_binary=old_binary, gdb=gdb) + return self.run_pb(cmd_list + options, old_binary=old_binary, gdb=gdb, asynchronous=asynchronous) def delete_pb( - self, backup_dir, instance, - backup_id=None, options=[], old_binary=False, gdb=False): + self, backup_dir, instance, backup_id=None, + options=[], old_binary=False, gdb=False, asynchronous=False): cmd_list = [ 'delete', '-B', backup_dir @@ -1160,7 +1160,7 @@ def delete_pb( if backup_id: cmd_list += ['-i', backup_id] - return self.run_pb(cmd_list + options, old_binary=old_binary, gdb=gdb) + return self.run_pb(cmd_list + options, old_binary=old_binary, gdb=gdb, asynchronous=asynchronous) def delete_expired( self, backup_dir, instance, options=[], old_binary=False): diff --git a/tests/locking.py b/tests/locking.py index 92c779c8a..540007838 100644 --- a/tests/locking.py +++ b/tests/locking.py @@ -535,6 +535,52 @@ def test_backup_directory_name(self): # Clean after yourself self.del_test_dir(module_name, fname) + def test_empty_lock_file(self): + """ + https://github.com/postgrespro/pg_probackup/issues/308 + """ + fname = self.id().split('.')[3] + node = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # Fill with data + node.pgbench_init(scale=100) + + # FULL + backup_id = self.backup_node(backup_dir, 'node', node) + + lockfile = os.path.join(backup_dir, 'backups', 'node', backup_id, 'backup.pid') + with open(lockfile, "w+") as f: + f.truncate() + + out = self.validate_pb(backup_dir, 'node', backup_id) + + self.assertIn( + "Waiting 30 seconds on empty exclusive lock for backup", out) + +# lockfile = os.path.join(backup_dir, 'backups', 'node', backup_id, 'backup.pid') +# with open(lockfile, "w+") as f: +# f.truncate() +# +# p1 = self.validate_pb(backup_dir, 'node', backup_id, asynchronous=True, +# options=['--log-level-file=LOG', '--log-filename=validate.log']) +# sleep(3) +# p2 = self.delete_pb(backup_dir, 'node', backup_id, asynchronous=True, +# options=['--log-level-file=LOG', '--log-filename=delete.log']) +# +# p1.wait() +# p2.wait() + + # Clean after yourself + self.del_test_dir(module_name, fname) + # TODO: # test that concurrent validation and restore are not locking each other # check that quick exclusive lock, when taking RO-lock, is really quick From c1e81edd49fdd93a0b6943b62fdf0b4c98ed828c Mon Sep 17 00:00:00 2001 From: anastasia Date: Wed, 3 Feb 2021 13:03:46 +0300 Subject: [PATCH 019/525] Code cleanup. Start removing global variables in pg_probackup.c --- src/init.c | 14 +++++++------- src/pg_probackup.c | 10 +++++++++- src/pg_probackup.h | 2 +- 3 files changed, 17 insertions(+), 9 deletions(-) diff --git a/src/init.c b/src/init.c index 1ab6dc0f9..51e628f0e 100644 --- a/src/init.c +++ b/src/init.c @@ -17,34 +17,34 @@ * Initialize backup catalog. */ int -do_init(void) +do_init(char *backup_catalog_path) { char path[MAXPGPATH]; char arclog_path_dir[MAXPGPATH]; int results; - results = pg_check_dir(backup_path); + results = pg_check_dir(backup_catalog_path); if (results == 4) /* exists and not empty*/ elog(ERROR, "backup catalog already exist and it's not empty"); else if (results == -1) /*trouble accessing directory*/ { int errno_tmp = errno; elog(ERROR, "cannot open backup catalog directory \"%s\": %s", - backup_path, strerror(errno_tmp)); + backup_catalog_path, strerror(errno_tmp)); } /* create backup catalog root directory */ - dir_create_dir(backup_path, DIR_PERMISSION, false); + dir_create_dir(backup_catalog_path, DIR_PERMISSION, false); /* create backup catalog data directory */ - join_path_components(path, backup_path, BACKUPS_DIR); + join_path_components(path, backup_catalog_path, BACKUPS_DIR); dir_create_dir(path, DIR_PERMISSION, false); /* create backup catalog wal directory */ - join_path_components(arclog_path_dir, backup_path, "wal"); + join_path_components(arclog_path_dir, backup_catalog_path, "wal"); dir_create_dir(arclog_path_dir, DIR_PERMISSION, false); - elog(INFO, "Backup catalog '%s' successfully inited", backup_path); + elog(INFO, "Backup catalog '%s' successfully inited", backup_catalog_path); return 0; } diff --git a/src/pg_probackup.c b/src/pg_probackup.c index 37d872309..5479f78c7 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -2,6 +2,13 @@ * * pg_probackup.c: Backup/Recovery manager for PostgreSQL. * + * This is an entry point for the program. + * Parse command name and it's options, verify them and call a + * do_***() function that implements the command. + * + * Avoid using global variables in the code. + * Pass all needed information as funciton arguments. + * * Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION * Portions Copyright (c) 2015-2019, Postgres Professional * @@ -28,6 +35,7 @@ const char *PROGRAM_URL = "https://github.com/postgrespro/pg_probackup"; const char *PROGRAM_EMAIL = "https://github.com/postgrespro/pg_probackup/issues"; /* directory options */ +/* TODO make it local variable, pass as an argument to all commands that need it. */ char *backup_path = NULL; /* * path or to the data files in the backup catalog @@ -739,7 +747,7 @@ main(int argc, char *argv[]) case DELETE_INSTANCE_CMD: return do_delete_instance(); case INIT_CMD: - return do_init(); + return do_init(backup_path); case BACKUP_CMD: { current.stream = stream_wal; diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 18bf87bbc..b169b2237 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -839,7 +839,7 @@ extern void merge_chain(parray *parent_chain, extern parray *read_database_map(pgBackup *backup); /* in init.c */ -extern int do_init(void); +extern int do_init(char *backup_catalog_path); extern int do_add_instance(InstanceConfig *instance); /* in archive.c */ From 24a7a085c379e511910ff7b4407487e0e77f8a86 Mon Sep 17 00:00:00 2001 From: anastasia Date: Wed, 3 Feb 2021 13:41:01 +0300 Subject: [PATCH 020/525] Code cleanup. Pass backup_catalog_path explicitly to functions that need it --- src/archive.c | 2 +- src/backup.c | 2 +- src/catalog.c | 4 ++-- src/init.c | 10 +++++----- src/pg_probackup.c | 6 +++--- src/pg_probackup.h | 9 +++++---- src/show.c | 7 ++++--- src/validate.c | 8 ++++---- 8 files changed, 25 insertions(+), 23 deletions(-) diff --git a/src/archive.c b/src/archive.c index 2d858a64c..28622cc57 100644 --- a/src/archive.c +++ b/src/archive.c @@ -1046,7 +1046,7 @@ do_archive_get(InstanceConfig *instance, const char *prefetch_dir_arg, join_path_components(absolute_wal_file_path, current_dir, wal_file_path); /* full filepath to WAL file in archive directory. - * backup_path/wal/instance_name/000000010000000000000001 */ + * $BACKUP_PATH/wal/instance_name/000000010000000000000001 */ join_path_components(backup_wal_file_path, instance->arclog_path, wal_file_name); INSTR_TIME_SET_CURRENT(start_time); diff --git a/src/backup.c b/src/backup.c index 0b61234f1..c84b1b7d5 100644 --- a/src/backup.c +++ b/src/backup.c @@ -89,7 +89,7 @@ backup_stopbackup_callback(bool fatal, void *userdata) /* * Take a backup of a single postgresql instance. - * Move files from 'pgdata' to a subdirectory in 'backup_path'. + * Move files from 'pgdata' to a subdirectory in backup catalog. */ static void do_backup_instance(PGconn *backup_conn, PGNodeInfo *nodeInfo, bool no_sync, bool backup_logs) diff --git a/src/catalog.c b/src/catalog.c index 11b9a27d1..4d4df123d 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -692,7 +692,7 @@ IsDir(const char *dirpath, const char *entry, fio_location location) * actual config of each instance. */ parray * -catalog_get_instance_list(void) +catalog_get_instance_list(char *backup_catalog_path) { char path[MAXPGPATH]; DIR *dir; @@ -702,7 +702,7 @@ catalog_get_instance_list(void) instances = parray_new(); /* open directory and list contents */ - join_path_components(path, backup_path, BACKUPS_DIR); + join_path_components(path, backup_catalog_path, BACKUPS_DIR); dir = opendir(path); if (dir == NULL) elog(ERROR, "Cannot open directory \"%s\": %s", diff --git a/src/init.c b/src/init.c index 51e628f0e..255f5425d 100644 --- a/src/init.c +++ b/src/init.c @@ -49,7 +49,7 @@ do_init(char *backup_catalog_path) } int -do_add_instance(InstanceConfig *instance) +do_add_instance(char *backup_catalog_path, InstanceConfig *instance) { char path[MAXPGPATH]; char arclog_path_dir[MAXPGPATH]; @@ -66,14 +66,14 @@ do_add_instance(InstanceConfig *instance) instance->xlog_seg_size = get_xlog_seg_size(instance->pgdata); /* Ensure that all root directories already exist */ - if (access(backup_path, F_OK) != 0) - elog(ERROR, "Directory does not exist: '%s'", backup_path); + if (access(backup_catalog_path, F_OK) != 0) + elog(ERROR, "Directory does not exist: '%s'", backup_catalog_path); - join_path_components(path, backup_path, BACKUPS_DIR); + join_path_components(path, backup_catalog_path, BACKUPS_DIR); if (access(path, F_OK) != 0) elog(ERROR, "Directory does not exist: '%s'", path); - join_path_components(arclog_path_dir, backup_path, "wal"); + join_path_components(arclog_path_dir, backup_catalog_path, "wal"); if (access(arclog_path_dir, F_OK) != 0) elog(ERROR, "Directory does not exist: '%s'", arclog_path_dir); diff --git a/src/pg_probackup.c b/src/pg_probackup.c index 5479f78c7..8a52ba515 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -743,7 +743,7 @@ main(int argc, char *argv[]) wal_file_path, wal_file_name, batch_size, !no_validate_wal); break; case ADD_INSTANCE_CMD: - return do_add_instance(&instance_config); + return do_add_instance(backup_path, &instance_config); case DELETE_INSTANCE_CMD: return do_delete_instance(); case INIT_CMD: @@ -770,7 +770,7 @@ main(int argc, char *argv[]) if (datname_exclude_list || datname_include_list) elog(ERROR, "You must specify parameter (-i, --backup-id) for partial validation"); - return do_validate_all(); + return do_validate_all(backup_path); } else /* PITR validation and, optionally, partial validation */ @@ -779,7 +779,7 @@ main(int argc, char *argv[]) restore_params, no_sync); case SHOW_CMD: - return do_show(instance_name, current.backup_id, show_archive); + return do_show(backup_path, instance_name, current.backup_id, show_archive); case DELETE_CMD: if (delete_expired && backup_id_string) elog(ERROR, "You cannot specify --delete-expired and (-i, --backup-id) options together"); diff --git a/src/pg_probackup.h b/src/pg_probackup.h index b169b2237..a0120dc8a 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -840,7 +840,7 @@ extern parray *read_database_map(pgBackup *backup); /* in init.c */ extern int do_init(char *backup_catalog_path); -extern int do_add_instance(InstanceConfig *instance); +extern int do_add_instance(char *backup_catalog_path, InstanceConfig *instance); /* in archive.c */ extern void do_archive_push(InstanceConfig *instance, char *wal_file_path, @@ -856,7 +856,8 @@ extern void init_config(InstanceConfig *config, const char *instance_name); extern InstanceConfig *readInstanceConfigFile(const char *instance_name); /* in show.c */ -extern int do_show(const char *instance_name, time_t requested_backup_id, bool show_archive); +extern int do_show(char *backup_catalog_path, const char *instance_name, + time_t requested_backup_id, bool show_archive); /* in delete.c */ extern void do_delete(time_t backup_id); @@ -880,7 +881,7 @@ extern void help_command(ProbackupSubcmd const subcmd); /* in validate.c */ extern void pgBackupValidate(pgBackup* backup, pgRestoreParams *params); -extern int do_validate_all(void); +extern int do_validate_all(char *backup_catalog_path); extern int validate_one_page(Page page, BlockNumber absolute_blkno, XLogRecPtr stop_lsn, PageState *page_st, uint32 checksum_version); @@ -906,7 +907,7 @@ extern bool lock_backup(pgBackup *backup, bool strict, bool exclusive); extern const char *pgBackupGetBackupMode(pgBackup *backup, bool show_color); extern void pgBackupGetBackupModeColor(pgBackup *backup, char *mode); -extern parray *catalog_get_instance_list(void); +extern parray *catalog_get_instance_list(char *backup_catalog_path); extern parray *catalog_get_backup_list(const char *instance_name, time_t requested_backup_id); extern void catalog_lock_backup_list(parray *backup_list, int from_idx, int to_idx, bool strict, bool exclusive); diff --git a/src/show.c b/src/show.c index 61bde9ef3..c88f4fae9 100644 --- a/src/show.c +++ b/src/show.c @@ -75,7 +75,8 @@ static int32 json_level = 0; * Entry point of pg_probackup SHOW subcommand. */ int -do_show(const char *instance_name, time_t requested_backup_id, bool show_archive) +do_show(char *backup_catalog_path, const char *instance_name, + time_t requested_backup_id, bool show_archive) { int i; @@ -93,7 +94,7 @@ do_show(const char *instance_name, time_t requested_backup_id, bool show_archive */ if (instance_name == NULL) { - parray *instances = catalog_get_instance_list(); + parray *instances = catalog_get_instance_list(backup_catalog_path); show_instance_start(); for (i = 0; i < parray_num(instances); i++) @@ -104,7 +105,7 @@ do_show(const char *instance_name, time_t requested_backup_id, bool show_archive if (interrupted) elog(ERROR, "Interrupted during show"); - sprintf(backup_instance_path, "%s/%s/%s", backup_path, BACKUPS_DIR, instance->name); + sprintf(backup_instance_path, "%s/%s/%s", backup_catalog_path, BACKUPS_DIR, instance->name); if (show_archive) show_instance_archive(instance); diff --git a/src/validate.c b/src/validate.c index b44f4b1b8..40c94af67 100644 --- a/src/validate.c +++ b/src/validate.c @@ -382,7 +382,7 @@ pgBackupValidateFiles(void *arg) * If --instance option was provided, validate only backups of this instance. */ int -do_validate_all(void) +do_validate_all(char *backup_catalog_path) { corrupted_backup_found = false; skipped_due_to_lock = false; @@ -395,7 +395,7 @@ do_validate_all(void) struct dirent *dent; /* open directory and list contents */ - join_path_components(path, backup_path, BACKUPS_DIR); + join_path_components(path, backup_catalog_path, BACKUPS_DIR); dir = opendir(path); if (dir == NULL) elog(ERROR, "cannot open directory \"%s\": %s", path, strerror(errno)); @@ -425,8 +425,8 @@ do_validate_all(void) */ instance_name = dent->d_name; sprintf(backup_instance_path, "%s/%s/%s", - backup_path, BACKUPS_DIR, instance_name); - sprintf(arclog_path, "%s/%s/%s", backup_path, "wal", instance_name); + backup_catalog_path, BACKUPS_DIR, instance_name); + sprintf(arclog_path, "%s/%s/%s", backup_catalog_path, "wal", instance_name); join_path_components(conf_path, backup_instance_path, BACKUP_CATALOG_CONF_FILE); if (config_read_opt(conf_path, instance_options, ERROR, false, From 7ef802d02f7c14a0a3f54729ddcb0f3e0f3bd5cc Mon Sep 17 00:00:00 2001 From: anastasia Date: Wed, 3 Feb 2021 14:21:21 +0300 Subject: [PATCH 021/525] Add a comment declaring code refactoring plan --- src/pg_probackup.c | 37 +++++++++++++++++++++++++++++++++---- 1 file changed, 33 insertions(+), 4 deletions(-) diff --git a/src/pg_probackup.c b/src/pg_probackup.c index 8a52ba515..8e09bccfc 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -7,7 +7,29 @@ * do_***() function that implements the command. * * Avoid using global variables in the code. - * Pass all needed information as funciton arguments. + * Pass all needed information as funciton arguments: + * + + * + * TODO: + * + * Functions that work with a backup catalog accept catalogState, + * which currently only contains pathes to backup catalog subdirectories + * + function specific options. + * + * Functions that work with an instance accept instanceState argument, which + * includes catalogState, instance_name, + * info about pgdata associated with the instance, + * various instance config options, and list of backups belonging to the instance. + * + function specific options. + * + * Functions that work with multiple backups in the catalog + * accept instanceState and info needed to determine the range of backups to handle. + * + function specific options. + * + * Functions that work with a single backup accept backupState argument, + * which includes link to the instanceState, backup_id and backup-specific info. + * + function specific options. * * Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION * Portions Copyright (c) 2015-2019, Postgres Professional @@ -34,6 +56,7 @@ const char *PROGRAM_FULL_PATH = NULL; const char *PROGRAM_URL = "https://github.com/postgrespro/pg_probackup"; const char *PROGRAM_EMAIL = "https://github.com/postgrespro/pg_probackup/issues"; +/* ================ catalogState =========== */ /* directory options */ /* TODO make it local variable, pass as an argument to all commands that need it. */ char *backup_path = NULL; @@ -48,10 +71,13 @@ char backup_instance_path[MAXPGPATH]; */ char arclog_path[MAXPGPATH] = ""; +/* ================ catalogState (END) =========== */ + + + /* colon separated external directories list ("/path1:/path2") */ char *externaldir = NULL; /* common options */ -static char *backup_id_string = NULL; int num_threads = 1; bool stream_wal = false; bool no_color = false; @@ -114,8 +140,9 @@ static char *delete_status = NULL; /* compression options */ static bool compress_shortcut = false; -/* other options */ +/* ================ instanceState =========== */ char *instance_name; +/* ================ instanceState (END) =========== */ /* archive push options */ int batch_size = 1; @@ -137,8 +164,10 @@ int64 ttl = -1; static char *expire_time_string = NULL; static pgSetBackupParams *set_backup_params = NULL; -/* current settings */ +/* ================ backupState =========== */ +static char *backup_id_string = NULL; pgBackup current; +/* ================ backupState (END) =========== */ static bool help_opt = false; From 2284c2b0069dcd6feadef524c1b8bd72a17ed5cb Mon Sep 17 00:00:00 2001 From: anastasia Date: Wed, 3 Feb 2021 15:14:00 +0300 Subject: [PATCH 022/525] Refactoring. Introduce catalogState structure. Update code in init.c --- src/init.c | 38 ++++++++++++++++---------------------- src/pg_probackup.c | 24 +++++++++++++++++++----- src/pg_probackup.h | 20 ++++++++++++++++++-- 3 files changed, 53 insertions(+), 29 deletions(-) diff --git a/src/init.c b/src/init.c index 255f5425d..07de8a98f 100644 --- a/src/init.c +++ b/src/init.c @@ -17,42 +17,37 @@ * Initialize backup catalog. */ int -do_init(char *backup_catalog_path) +do_init(CatalogState *catalogState) { - char path[MAXPGPATH]; - char arclog_path_dir[MAXPGPATH]; int results; - results = pg_check_dir(backup_catalog_path); + results = pg_check_dir(catalogState->catalog_path); + if (results == 4) /* exists and not empty*/ elog(ERROR, "backup catalog already exist and it's not empty"); else if (results == -1) /*trouble accessing directory*/ { int errno_tmp = errno; elog(ERROR, "cannot open backup catalog directory \"%s\": %s", - backup_catalog_path, strerror(errno_tmp)); + catalogState->catalog_path, strerror(errno_tmp)); } /* create backup catalog root directory */ - dir_create_dir(backup_catalog_path, DIR_PERMISSION, false); + dir_create_dir(catalogState->catalog_path, DIR_PERMISSION, false); /* create backup catalog data directory */ - join_path_components(path, backup_catalog_path, BACKUPS_DIR); - dir_create_dir(path, DIR_PERMISSION, false); + dir_create_dir(catalogState->backup_subdir_path, DIR_PERMISSION, false); /* create backup catalog wal directory */ - join_path_components(arclog_path_dir, backup_catalog_path, "wal"); - dir_create_dir(arclog_path_dir, DIR_PERMISSION, false); + dir_create_dir(catalogState->wal_subdir_path, DIR_PERMISSION, false); - elog(INFO, "Backup catalog '%s' successfully inited", backup_catalog_path); + elog(INFO, "Backup catalog '%s' successfully inited", catalogState->catalog_path); return 0; } int -do_add_instance(char *backup_catalog_path, InstanceConfig *instance) +do_add_instance(CatalogState *catalogState, InstanceConfig *instance) { - char path[MAXPGPATH]; - char arclog_path_dir[MAXPGPATH]; struct stat st; /* PGDATA is always required */ @@ -66,16 +61,15 @@ do_add_instance(char *backup_catalog_path, InstanceConfig *instance) instance->xlog_seg_size = get_xlog_seg_size(instance->pgdata); /* Ensure that all root directories already exist */ - if (access(backup_catalog_path, F_OK) != 0) - elog(ERROR, "Directory does not exist: '%s'", backup_catalog_path); + /* TODO maybe call do_init() here instead of error?*/ + if (access(catalogState->catalog_path, F_OK) != 0) + elog(ERROR, "Directory does not exist: '%s'", catalogState->catalog_path); - join_path_components(path, backup_catalog_path, BACKUPS_DIR); - if (access(path, F_OK) != 0) - elog(ERROR, "Directory does not exist: '%s'", path); + if (access(catalogState->backup_subdir_path, F_OK) != 0) + elog(ERROR, "Directory does not exist: '%s'", catalogState->backup_subdir_path); - join_path_components(arclog_path_dir, backup_catalog_path, "wal"); - if (access(arclog_path_dir, F_OK) != 0) - elog(ERROR, "Directory does not exist: '%s'", arclog_path_dir); + if (access(catalogState->wal_subdir_path, F_OK) != 0) + elog(ERROR, "Directory does not exist: '%s'", catalogState->wal_subdir_path); if (stat(instance->backup_instance_path, &st) == 0 && S_ISDIR(st.st_mode)) elog(ERROR, "Instance '%s' backup directory already exists: '%s'", diff --git a/src/pg_probackup.c b/src/pg_probackup.c index 8e09bccfc..04693bdff 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -71,9 +71,9 @@ char backup_instance_path[MAXPGPATH]; */ char arclog_path[MAXPGPATH] = ""; -/* ================ catalogState (END) =========== */ - +static CatalogState *catalogState = NULL; +/* ================ catalogState (END) =========== */ /* colon separated external directories list ("/path1:/path2") */ char *externaldir = NULL; @@ -424,6 +424,7 @@ main(int argc, char *argv[]) /* set location based on cmdline options only */ setMyLocation(backup_subcmd); + /* ===== catalogState ======*/ if (backup_path == NULL) { /* @@ -440,11 +441,24 @@ main(int argc, char *argv[]) /* Ensure that backup_path is an absolute path */ if (!is_absolute_path(backup_path)) elog(ERROR, "-B, --backup-path must be an absolute path"); + + catalogState = pgut_new(CatalogState); + strncpy(catalogState->catalog_path, backup_path, MAXPGPATH); + join_path_components(catalogState->backup_subdir_path, + catalogState->catalog_path, BACKUPS_DIR); + join_path_components(catalogState->wal_subdir_path, + catalogState->catalog_path, WAL_SUBDIR); } + /* backup_path is required for all pg_probackup commands except help, version and checkdb */ - if (backup_path == NULL && backup_subcmd != CHECKDB_CMD && backup_subcmd != HELP_CMD && backup_subcmd != VERSION_CMD) + if (backup_path == NULL && + backup_subcmd != CHECKDB_CMD && + backup_subcmd != HELP_CMD && + backup_subcmd != VERSION_CMD) elog(ERROR, "required parameter not specified: BACKUP_PATH (-B, --backup-path)"); + /* ===== catalogState (END) ======*/ + /* * Option --instance is required for all commands except * init, show, checkdb and validate @@ -772,11 +786,11 @@ main(int argc, char *argv[]) wal_file_path, wal_file_name, batch_size, !no_validate_wal); break; case ADD_INSTANCE_CMD: - return do_add_instance(backup_path, &instance_config); + return do_add_instance(catalogState, &instance_config); case DELETE_INSTANCE_CMD: return do_delete_instance(); case INIT_CMD: - return do_init(backup_path); + return do_init(catalogState); case BACKUP_CMD: { current.stream = stream_wal; diff --git a/src/pg_probackup.h b/src/pg_probackup.h index a0120dc8a..46575beae 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -55,6 +55,7 @@ extern const char *PROGRAM_EMAIL; /* Directory/File names */ #define DATABASE_DIR "database" #define BACKUPS_DIR "backups" +#define WAL_SUBDIR "wal" #if PG_VERSION_NUM >= 100000 #define PG_XLOG_DIR "pg_wal" #define PG_LOG_DIR "log" @@ -129,6 +130,7 @@ extern const char *PROGRAM_EMAIL; #define TC_CYAN_BOLD "\033[1;36m" #define TC_RESET "\033[0m" + typedef struct RedoParams { TimeLineID tli; @@ -746,11 +748,25 @@ typedef struct BackupPageHeader2 #define IsSshProtocol() (instance_config.remote.host && strcmp(instance_config.remote.proto, "ssh") == 0) +/* ====== CatalogState ======= */ + /* directory options */ extern char *backup_path; extern char backup_instance_path[MAXPGPATH]; extern char arclog_path[MAXPGPATH]; +typedef struct CatalogState +{ + /* $BACKUP_PATH */ + char catalog_path[MAXPGPATH]; //previously global var backup_path + /* $BACKUP_PATH/backups */ + char backup_subdir_path[MAXPGPATH]; + /* $BACKUP_PATH/wal */ + char wal_subdir_path[MAXPGPATH]; // previously global var arclog_path +} CatalogState; + +/* ====== CatalogState (END) ======= */ + /* common options */ extern pid_t my_pid; extern __thread int my_thread_num; @@ -839,8 +855,8 @@ extern void merge_chain(parray *parent_chain, extern parray *read_database_map(pgBackup *backup); /* in init.c */ -extern int do_init(char *backup_catalog_path); -extern int do_add_instance(char *backup_catalog_path, InstanceConfig *instance); +extern int do_init(CatalogState *catalogState); +extern int do_add_instance(CatalogState *catalogState, InstanceConfig *instance); /* in archive.c */ extern void do_archive_push(InstanceConfig *instance, char *wal_file_path, From 48b8f60fdbd89d83d8652f7ff29529cdeac73f57 Mon Sep 17 00:00:00 2001 From: anastasia Date: Wed, 3 Feb 2021 17:35:14 +0300 Subject: [PATCH 023/525] Refactoring. Introduce instanceState structure. Update code in init.c --- src/init.c | 15 ++++++++------- src/pg_probackup.c | 20 +++++++++++++++++++- src/pg_probackup.h | 19 +++++++++++++++++-- 3 files changed, 44 insertions(+), 10 deletions(-) diff --git a/src/init.c b/src/init.c index 07de8a98f..aa376389d 100644 --- a/src/init.c +++ b/src/init.c @@ -46,9 +46,10 @@ do_init(CatalogState *catalogState) } int -do_add_instance(CatalogState *catalogState, InstanceConfig *instance) +do_add_instance(InstanceState *instanceState, InstanceConfig *instance) { struct stat st; + CatalogState *catalogState = instanceState->catalog_state; /* PGDATA is always required */ if (instance->pgdata == NULL) @@ -71,22 +72,22 @@ do_add_instance(CatalogState *catalogState, InstanceConfig *instance) if (access(catalogState->wal_subdir_path, F_OK) != 0) elog(ERROR, "Directory does not exist: '%s'", catalogState->wal_subdir_path); - if (stat(instance->backup_instance_path, &st) == 0 && S_ISDIR(st.st_mode)) + if (stat(instanceState->instance_backup_subdir_path, &st) == 0 && S_ISDIR(st.st_mode)) elog(ERROR, "Instance '%s' backup directory already exists: '%s'", - instance->name, instance->backup_instance_path); + instanceState->instance_name, instanceState->instance_backup_subdir_path); /* * Create directory for wal files of this specific instance. * Existence check is extra paranoid because if we don't have such a * directory in data dir, we shouldn't have it in wal as well. */ - if (stat(instance->arclog_path, &st) == 0 && S_ISDIR(st.st_mode)) + if (stat(instanceState->instance_wal_subdir_path, &st) == 0 && S_ISDIR(st.st_mode)) elog(ERROR, "Instance '%s' WAL archive directory already exists: '%s'", - instance->name, instance->arclog_path); + instanceState->instance_name, instanceState->instance_wal_subdir_path); /* Create directory for data files of this specific instance */ - dir_create_dir(instance->backup_instance_path, DIR_PERMISSION, false); - dir_create_dir(instance->arclog_path, DIR_PERMISSION, false); + dir_create_dir(instanceState->instance_backup_subdir_path, DIR_PERMISSION, false); + dir_create_dir(instanceState->instance_wal_subdir_path, DIR_PERMISSION, false); /* * Write initial configuration file. diff --git a/src/pg_probackup.c b/src/pg_probackup.c index 04693bdff..ae5555beb 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -142,6 +142,9 @@ static bool compress_shortcut = false; /* ================ instanceState =========== */ char *instance_name; + +static InstanceState *instanceState = NULL; + /* ================ instanceState (END) =========== */ /* archive push options */ @@ -292,6 +295,7 @@ main(int argc, char *argv[]) pgBackupInit(¤t); /* Initialize current instance configuration */ + //TODO get git of this global variable craziness init_config(&instance_config, instance_name); PROGRAM_NAME = get_progname(argv[0]); @@ -459,6 +463,8 @@ main(int argc, char *argv[]) /* ===== catalogState (END) ======*/ + /* ===== instanceState ======*/ + /* * Option --instance is required for all commands except * init, show, checkdb and validate @@ -470,9 +476,21 @@ main(int argc, char *argv[]) elog(ERROR, "required parameter not specified: --instance"); } else + { /* Set instance name */ instance_config.name = pgut_strdup(instance_name); + instanceState = pgut_new(InstanceState); + instanceState->catalog_state = catalogState; + + strncpy(instanceState->instance_name, instance_name, MAXPGPATH); + join_path_components(instanceState->instance_backup_subdir_path, + catalogState->backup_subdir_path, instance_name); + join_path_components(instanceState->instance_wal_subdir_path, + catalogState->wal_subdir_path, instance_name); + } + /* ===== instanceState (END) ======*/ + /* * If --instance option was passed, construct paths for backup data and * xlog files of this backup instance. @@ -786,7 +804,7 @@ main(int argc, char *argv[]) wal_file_path, wal_file_name, batch_size, !no_validate_wal); break; case ADD_INSTANCE_CMD: - return do_add_instance(catalogState, &instance_config); + return do_add_instance(instanceState, &instance_config); case DELETE_INSTANCE_CMD: return do_delete_instance(); case INIT_CMD: diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 46575beae..635739a59 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -795,8 +795,23 @@ extern bool delete_expired; extern bool merge_expired; extern bool dry_run; -/* other options */ +/* ===== instanceState ===== */ extern char *instance_name; +typedef struct InstanceState +{ + /* catalog, this instance belongs to */ + CatalogState *catalog_state; + + char instance_name[MAXPGPATH]; //previously global var instance_name + /* $BACKUP_PATH/backups/instance_name */ + char instance_backup_subdir_path[MAXPGPATH]; + /* $BACKUP_PATH/backups/instance_name */ + char instance_wal_subdir_path[MAXPGPATH]; // previously global var arclog_path + + //TODO add config here +} InstanceState; + +/* ===== instanceState (END) ===== */ /* show options */ extern ShowFormat show_format; @@ -856,7 +871,7 @@ extern parray *read_database_map(pgBackup *backup); /* in init.c */ extern int do_init(CatalogState *catalogState); -extern int do_add_instance(CatalogState *catalogState, InstanceConfig *instance); +extern int do_add_instance(InstanceState *instanceState, InstanceConfig *instance); /* in archive.c */ extern void do_archive_push(InstanceConfig *instance, char *wal_file_path, From 3d62e2a1d612c80d313b45c2c5f68480ddaf0bb8 Mon Sep 17 00:00:00 2001 From: anastasia Date: Wed, 3 Feb 2021 17:48:59 +0300 Subject: [PATCH 024/525] Refactoting. Move new state definitions to a separate header --- src/pg_probackup.c | 3 ++- src/pg_probackup.h | 27 ++++-------------------- src/pg_probackup_state.h | 44 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 50 insertions(+), 24 deletions(-) create mode 100644 src/pg_probackup_state.h diff --git a/src/pg_probackup.c b/src/pg_probackup.c index ae5555beb..3b6e8a499 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -11,7 +11,7 @@ * * - * TODO: + * TODO (see pg_probackup_state.h): * * Functions that work with a backup catalog accept catalogState, * which currently only contains pathes to backup catalog subdirectories @@ -38,6 +38,7 @@ */ #include "pg_probackup.h" +#include "pg_probackup_state.h" #include "pg_getopt.h" #include "streamutil.h" diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 635739a59..b5af7b6d4 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -10,6 +10,7 @@ #ifndef PG_PROBACKUP_H #define PG_PROBACKUP_H + #include "postgres_fe.h" #include "libpq-fe.h" #include "libpq-int.h" @@ -39,6 +40,9 @@ #include "datapagemap.h" #include "utils/thread.h" +#include "pg_probackup_state.h" + + #ifdef WIN32 #define __thread __declspec(thread) #else @@ -755,16 +759,6 @@ extern char *backup_path; extern char backup_instance_path[MAXPGPATH]; extern char arclog_path[MAXPGPATH]; -typedef struct CatalogState -{ - /* $BACKUP_PATH */ - char catalog_path[MAXPGPATH]; //previously global var backup_path - /* $BACKUP_PATH/backups */ - char backup_subdir_path[MAXPGPATH]; - /* $BACKUP_PATH/wal */ - char wal_subdir_path[MAXPGPATH]; // previously global var arclog_path -} CatalogState; - /* ====== CatalogState (END) ======= */ /* common options */ @@ -797,19 +791,6 @@ extern bool dry_run; /* ===== instanceState ===== */ extern char *instance_name; -typedef struct InstanceState -{ - /* catalog, this instance belongs to */ - CatalogState *catalog_state; - - char instance_name[MAXPGPATH]; //previously global var instance_name - /* $BACKUP_PATH/backups/instance_name */ - char instance_backup_subdir_path[MAXPGPATH]; - /* $BACKUP_PATH/backups/instance_name */ - char instance_wal_subdir_path[MAXPGPATH]; // previously global var arclog_path - - //TODO add config here -} InstanceState; /* ===== instanceState (END) ===== */ diff --git a/src/pg_probackup_state.h b/src/pg_probackup_state.h new file mode 100644 index 000000000..ab20a55a3 --- /dev/null +++ b/src/pg_probackup_state.h @@ -0,0 +1,44 @@ +/*------------------------------------------------------------------------- + * + * pg_probackup_state.h: Definitions of internal pg_probackup states + * + * Portions Copyright (c) 2021-2018, Postgres Professional + * + *------------------------------------------------------------------------- + */ +#ifndef PG_PROBACKUP_STATE_H +#define PG_PROBACKUP_STATE_H + +/* ====== CatalogState ======= */ + +typedef struct CatalogState +{ + /* $BACKUP_PATH */ + char catalog_path[MAXPGPATH]; //previously global var backup_path + /* $BACKUP_PATH/backups */ + char backup_subdir_path[MAXPGPATH]; + /* $BACKUP_PATH/wal */ + char wal_subdir_path[MAXPGPATH]; // previously global var arclog_path +} CatalogState; + +/* ====== CatalogState (END) ======= */ + + +/* ===== instanceState ===== */ +typedef struct InstanceState +{ + /* catalog, this instance belongs to */ + CatalogState *catalog_state; + + char instance_name[MAXPGPATH]; //previously global var instance_name + /* $BACKUP_PATH/backups/instance_name */ + char instance_backup_subdir_path[MAXPGPATH]; + /* $BACKUP_PATH/backups/instance_name */ + char instance_wal_subdir_path[MAXPGPATH]; // previously global var arclog_path + + //TODO add config here +} InstanceState; + +/* ===== instanceState (END) ===== */ + +#endif /* PG_PROBACKUP_STATE_H */ From cc58553514ea4c54ddf3520953caacd2a8da848e Mon Sep 17 00:00:00 2001 From: anastasia Date: Wed, 3 Feb 2021 18:00:21 +0300 Subject: [PATCH 025/525] Refactoring. Cleanup comments --- src/pg_probackup.c | 6 ++++-- src/pg_probackup_state.h | 2 +- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/src/pg_probackup.c b/src/pg_probackup.c index 3b6e8a499..3d6990945 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -19,7 +19,7 @@ * * Functions that work with an instance accept instanceState argument, which * includes catalogState, instance_name, - * info about pgdata associated with the instance, + * info about pgdata associated with the instance (see pgState), * various instance config options, and list of backups belonging to the instance. * + function specific options. * @@ -31,6 +31,9 @@ * which includes link to the instanceState, backup_id and backup-specific info. * + function specific options. * + * Functions that work with a postgreSQL instance (i.e. checkdb) accept pgState, + * which includes info about pgdata directory and connection. + * * Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION * Portions Copyright (c) 2015-2019, Postgres Professional * @@ -72,7 +75,6 @@ char backup_instance_path[MAXPGPATH]; */ char arclog_path[MAXPGPATH] = ""; - static CatalogState *catalogState = NULL; /* ================ catalogState (END) =========== */ diff --git a/src/pg_probackup_state.h b/src/pg_probackup_state.h index ab20a55a3..19e096328 100644 --- a/src/pg_probackup_state.h +++ b/src/pg_probackup_state.h @@ -2,7 +2,7 @@ * * pg_probackup_state.h: Definitions of internal pg_probackup states * - * Portions Copyright (c) 2021-2018, Postgres Professional + * Portions Copyright (c) 2021, Postgres Professional * *------------------------------------------------------------------------- */ From dbf523308b07ae1d23e511110a0ab3c453559e87 Mon Sep 17 00:00:00 2001 From: anastasia Date: Wed, 3 Feb 2021 18:39:51 +0300 Subject: [PATCH 026/525] Refactoring. Use instanceState instead of global variable instance_name --- src/backup.c | 14 +++++------ src/catalog.c | 2 +- src/delete.c | 16 ++++++------- src/init.c | 2 +- src/merge.c | 8 +++---- src/parsexlog.c | 2 +- src/pg_probackup.c | 17 +++++++------- src/pg_probackup.h | 20 +++++++++------- src/restore.c | 10 ++++---- src/validate.c | 58 ++++++++++++++++++++++++++++------------------ 10 files changed, 83 insertions(+), 66 deletions(-) diff --git a/src/backup.c b/src/backup.c index c84b1b7d5..bbd168db0 100644 --- a/src/backup.c +++ b/src/backup.c @@ -47,7 +47,7 @@ static void backup_cleanup(bool fatal, void *userdata); static void *backup_files(void *arg); -static void do_backup_instance(PGconn *backup_conn, PGNodeInfo *nodeInfo, bool no_sync, bool backup_logs); +static void do_backup_pg(PGconn *backup_conn, PGNodeInfo *nodeInfo, bool no_sync, bool backup_logs); static void pg_start_backup(const char *label, bool smooth, pgBackup *backup, PGNodeInfo *nodeInfo, PGconn *conn); @@ -92,7 +92,7 @@ backup_stopbackup_callback(bool fatal, void *userdata) * Move files from 'pgdata' to a subdirectory in backup catalog. */ static void -do_backup_instance(PGconn *backup_conn, PGNodeInfo *nodeInfo, bool no_sync, bool backup_logs) +do_backup_pg(PGconn *backup_conn, PGNodeInfo *nodeInfo, bool no_sync, bool backup_logs) { int i; char external_prefix[MAXPGPATH]; /* Temp value. Used as template */ @@ -724,7 +724,7 @@ pgdata_basic_setup(ConnectionOptions conn_opt, PGNodeInfo *nodeInfo) * Entry point of pg_probackup BACKUP subcommand. */ int -do_backup(pgSetBackupParams *set_backup_params, +do_backup(InstanceState *instanceState, pgSetBackupParams *set_backup_params, bool no_validate, bool no_sync, bool backup_logs) { PGconn *backup_conn = NULL; @@ -740,7 +740,7 @@ do_backup(pgSetBackupParams *set_backup_params, current.external_dir_str = instance_config.external_dir_str; /* Create backup directory and BACKUP_CONTROL_FILE */ - pgBackupCreateDir(¤t, backup_instance_path); + pgBackupCreateDir(¤t, instanceState->instance_backup_subdir_path); if (!instance_config.pgdata) elog(ERROR, "required parameter not specified: PGDATA " @@ -758,7 +758,7 @@ do_backup(pgSetBackupParams *set_backup_params, elog(INFO, "Backup start, pg_probackup version: %s, instance: %s, backup ID: %s, backup mode: %s, " "wal mode: %s, remote: %s, compress-algorithm: %s, compress-level: %i", - PROGRAM_VERSION, instance_name, base36enc(current.backup_id), pgBackupGetBackupMode(¤t, false), + PROGRAM_VERSION, instanceState->instance_name, base36enc(current.backup_id), pgBackupGetBackupMode(¤t, false), current.stream ? "STREAM" : "ARCHIVE", IsSshProtocol() ? "true" : "false", deparse_compress_alg(current.compress_alg), current.compress_level); @@ -824,7 +824,7 @@ do_backup(pgSetBackupParams *set_backup_params, add_note(¤t, set_backup_params->note); /* backup data */ - do_backup_instance(backup_conn, &nodeInfo, no_sync, backup_logs); + do_backup_pg(backup_conn, &nodeInfo, no_sync, backup_logs); pgut_atexit_pop(backup_cleanup, NULL); /* compute size of wal files of this backup stored in the archive */ @@ -879,7 +879,7 @@ do_backup(pgSetBackupParams *set_backup_params, * which are expired according to retention policies */ if (delete_expired || merge_expired || delete_wal) - do_retention(); + do_retention(instanceState); return 0; } diff --git a/src/catalog.c b/src/catalog.c index 4d4df123d..f215e90e2 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -112,7 +112,7 @@ read_backup(const char *root_dir) */ void write_backup_status(pgBackup *backup, BackupStatus status, - const char *instance_name, bool strict) + bool strict) { pgBackup *tmp; diff --git a/src/delete.c b/src/delete.c index 2130f1a1d..b020716fa 100644 --- a/src/delete.c +++ b/src/delete.c @@ -29,7 +29,7 @@ static bool backup_merged = false; /* At least one merge was enacted */ static bool wal_deleted = false; /* At least one WAL segments was deleted */ void -do_delete(time_t backup_id) +do_delete(InstanceState *instanceState, time_t backup_id) { int i; parray *backup_list, @@ -39,7 +39,7 @@ do_delete(time_t backup_id) char size_to_delete_pretty[20]; /* Get complete list of backups */ - backup_list = catalog_get_backup_list(instance_name, INVALID_BACKUP_ID); + backup_list = catalog_get_backup_list(instanceState->instance_name, INVALID_BACKUP_ID); delete_list = parray_new(); @@ -123,7 +123,7 @@ do_delete(time_t backup_id) * which FULL backup should be keeped for redundancy obligation(only valid do), * but if invalid backup is not guarded by retention - it is removed */ -void do_retention(void) +void do_retention(InstanceState *instanceState) { parray *backup_list = NULL; parray *to_keep_list = parray_new(); @@ -139,7 +139,7 @@ void do_retention(void) MyLocation = FIO_LOCAL_HOST; /* Get a complete list of backups. */ - backup_list = catalog_get_backup_list(instance_name, INVALID_BACKUP_ID); + backup_list = catalog_get_backup_list(instanceState->instance_name, INVALID_BACKUP_ID); if (parray_num(backup_list) == 0) backup_list_is_empty = true; @@ -750,7 +750,7 @@ delete_backup_files(pgBackup *backup) * Update STATUS to BACKUP_STATUS_DELETING in preparation for the case which * the error occurs before deleting all backup files. */ - write_backup_status(backup, BACKUP_STATUS_DELETING, instance_name, false); + write_backup_status(backup, BACKUP_STATUS_DELETING, false); /* list files to be deleted */ files = parray_new(); @@ -966,7 +966,7 @@ delete_walfiles_in_tli(XLogRecPtr keep_lsn, timelineInfo *tlinfo, /* Delete all backup files and wal files of given instance. */ int -do_delete_instance(void) +do_delete_instance(InstanceState *instanceState) { parray *backup_list; int i; @@ -974,7 +974,7 @@ do_delete_instance(void) /* Delete all backups. */ - backup_list = catalog_get_backup_list(instance_name, INVALID_BACKUP_ID); + backup_list = catalog_get_backup_list(instanceState->instance_name, INVALID_BACKUP_ID); catalog_lock_backup_list(backup_list, 0, parray_num(backup_list) - 1, true, true); @@ -1008,7 +1008,7 @@ do_delete_instance(void) elog(ERROR, "Can't remove \"%s\": %s", arclog_path, strerror(errno)); - elog(INFO, "Instance '%s' successfully deleted", instance_name); + elog(INFO, "Instance '%s' successfully deleted", instanceState->instance_name); return 0; } diff --git a/src/init.c b/src/init.c index aa376389d..29506749b 100644 --- a/src/init.c +++ b/src/init.c @@ -121,6 +121,6 @@ do_add_instance(InstanceState *instanceState, InstanceConfig *instance) /* pgdata was set through command line */ do_set_config(true); - elog(INFO, "Instance '%s' successfully inited", instance_name); + elog(INFO, "Instance '%s' successfully inited", instanceState->instance_name); return 0; } diff --git a/src/merge.c b/src/merge.c index 3c51a1fae..6dc599fe4 100644 --- a/src/merge.c +++ b/src/merge.c @@ -68,7 +68,7 @@ static bool is_forward_compatible(parray *parent_chain); * - Remove unnecessary files, which doesn't exist in the target backup anymore */ void -do_merge(time_t backup_id) +do_merge(InstanceState *instanceState, time_t backup_id) { parray *backups; parray *merge_list = parray_new(); @@ -80,13 +80,13 @@ do_merge(time_t backup_id) if (backup_id == INVALID_BACKUP_ID) elog(ERROR, "required parameter is not specified: --backup-id"); - if (instance_name == NULL) + if (instanceState == NULL) elog(ERROR, "required parameter is not specified: --instance"); elog(INFO, "Merge started"); /* Get list of all backups sorted in order of descending start time */ - backups = catalog_get_backup_list(instance_name, INVALID_BACKUP_ID); + backups = catalog_get_backup_list(instanceState->instance_name, INVALID_BACKUP_ID); /* Find destination backup first */ for (i = 0; i < parray_num(backups); i++) @@ -597,7 +597,7 @@ merge_chain(parray *parent_chain, pgBackup *full_backup, pgBackup *dest_backup) write_backup(backup, true); } else - write_backup_status(backup, BACKUP_STATUS_MERGING, instance_name, true); + write_backup_status(backup, BACKUP_STATUS_MERGING, true); } /* Construct path to database dir: /backup_dir/instance_name/FULL/database */ diff --git a/src/parsexlog.c b/src/parsexlog.c index 41a410d30..e427041d0 100644 --- a/src/parsexlog.c +++ b/src/parsexlog.c @@ -385,7 +385,7 @@ validate_backup_wal_from_start_to_stop(pgBackup *backup, * If we don't have WAL between start_lsn and stop_lsn, * the backup is definitely corrupted. Update its status. */ - write_backup_status(backup, BACKUP_STATUS_CORRUPT, instance_name, true); + write_backup_status(backup, BACKUP_STATUS_CORRUPT, true); elog(WARNING, "There are not enough WAL records to consistenly restore " "backup %s from START LSN: %X/%X to STOP LSN: %X/%X", diff --git a/src/pg_probackup.c b/src/pg_probackup.c index 3d6990945..9af364674 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -809,7 +809,7 @@ main(int argc, char *argv[]) case ADD_INSTANCE_CMD: return do_add_instance(instanceState, &instance_config); case DELETE_INSTANCE_CMD: - return do_delete_instance(); + return do_delete_instance(instanceState); case INIT_CMD: return do_init(catalogState); case BACKUP_CMD: @@ -821,10 +821,11 @@ main(int argc, char *argv[]) elog(ERROR, "required parameter not specified: BACKUP_MODE " "(-b, --backup-mode)"); - return do_backup(set_backup_params, no_validate, no_sync, backup_logs); + return do_backup(instanceState, set_backup_params, + no_validate, no_sync, backup_logs); } case RESTORE_CMD: - return do_restore_or_validate(current.backup_id, + return do_restore_or_validate(instanceState, current.backup_id, recovery_target_options, restore_params, no_sync); case VALIDATE_CMD: @@ -834,11 +835,11 @@ main(int argc, char *argv[]) if (datname_exclude_list || datname_include_list) elog(ERROR, "You must specify parameter (-i, --backup-id) for partial validation"); - return do_validate_all(backup_path); + return do_validate_all(catalogState, instanceState); } else /* PITR validation and, optionally, partial validation */ - return do_restore_or_validate(current.backup_id, + return do_restore_or_validate(instanceState, current.backup_id, recovery_target_options, restore_params, no_sync); @@ -859,13 +860,13 @@ main(int argc, char *argv[]) if (delete_status) do_delete_status(&instance_config, delete_status); else - do_retention(); + do_retention(instanceState); } else - do_delete(current.backup_id); + do_delete(instanceState, current.backup_id); break; case MERGE_CMD: - do_merge(current.backup_id); + do_merge(instanceState, current.backup_id); break; case SHOW_CONFIG_CMD: do_show_config(); diff --git a/src/pg_probackup.h b/src/pg_probackup.h index b5af7b6d4..3125e81ad 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -49,6 +49,9 @@ #include #endif +/* Wrap the code that we're going to delete after refactoring in this define*/ +#define REFACTORE_ME + /* pgut client variables and full path */ extern const char *PROGRAM_NAME; extern const char *PROGRAM_NAME_FULL; @@ -808,7 +811,7 @@ extern pgBackup current; extern char** commands_args; /* in backup.c */ -extern int do_backup(pgSetBackupParams *set_backup_params, +extern int do_backup(InstanceState *instanceState, pgSetBackupParams *set_backup_params, bool no_validate, bool no_sync, bool backup_logs); extern void do_checkdb(bool need_amcheck, ConnectionOptions conn_opt, char *pgdata); @@ -822,7 +825,8 @@ extern char *pg_ptrack_get_block(ConnectionArgs *arguments, BlockNumber blknum, size_t *result_size, int ptrack_version_num, const char *ptrack_schema); /* in restore.c */ -extern int do_restore_or_validate(time_t target_backup_id, +extern int do_restore_or_validate(InstanceState *instanceState, + time_t target_backup_id, pgRecoveryTarget *rt, pgRestoreParams *params, bool no_sync); @@ -843,7 +847,7 @@ extern parray *read_timeline_history(const char *arclog_path, TimeLineID targetT extern bool tliIsPartOfHistory(const parray *timelines, TimeLineID tli); /* in merge.c */ -extern void do_merge(time_t backup_id); +extern void do_merge(InstanceState *instanceState, time_t backup_id); extern void merge_backups(pgBackup *backup, pgBackup *next_backup); extern void merge_chain(parray *parent_chain, pgBackup *full_backup, pgBackup *dest_backup); @@ -872,10 +876,10 @@ extern int do_show(char *backup_catalog_path, const char *instance_name, time_t requested_backup_id, bool show_archive); /* in delete.c */ -extern void do_delete(time_t backup_id); +extern void do_delete(InstanceState *instanceState, time_t backup_id); extern void delete_backup_files(pgBackup *backup); -extern void do_retention(void); -extern int do_delete_instance(void); +extern void do_retention(InstanceState *instanceState); +extern int do_delete_instance(InstanceState *instanceState); extern void do_delete_status(InstanceConfig *instance_config, const char *status); /* in fetch.c */ @@ -893,7 +897,7 @@ extern void help_command(ProbackupSubcmd const subcmd); /* in validate.c */ extern void pgBackupValidate(pgBackup* backup, pgRestoreParams *params); -extern int do_validate_all(char *backup_catalog_path); +extern int do_validate_all(CatalogState *catalogState, InstanceState *instanceState); extern int validate_one_page(Page page, BlockNumber absolute_blkno, XLogRecPtr stop_lsn, PageState *page_st, uint32 checksum_version); @@ -912,7 +916,7 @@ extern bool validate_tablespace_map(pgBackup *backup); extern pgBackup *read_backup(const char *root_dir); extern void write_backup(pgBackup *backup, bool strict); extern void write_backup_status(pgBackup *backup, BackupStatus status, - const char *instance_name, bool strict); + bool strict); extern void write_backup_data_bytes(pgBackup *backup); extern bool lock_backup(pgBackup *backup, bool strict, bool exclusive); diff --git a/src/restore.c b/src/restore.c index 3f0adf7b7..ad7300ba8 100644 --- a/src/restore.c +++ b/src/restore.c @@ -94,7 +94,7 @@ set_orphan_status(parray *backups, pgBackup *parent_backup) if (backup->status == BACKUP_STATUS_OK || backup->status == BACKUP_STATUS_DONE) { - write_backup_status(backup, BACKUP_STATUS_ORPHAN, instance_name, true); + write_backup_status(backup, BACKUP_STATUS_ORPHAN, true); elog(WARNING, "Backup %s is orphaned because his parent %s has status: %s", @@ -117,7 +117,7 @@ set_orphan_status(parray *backups, pgBackup *parent_backup) * Entry point of pg_probackup RESTORE and VALIDATE subcommands. */ int -do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt, +do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pgRecoveryTarget *rt, pgRestoreParams *params, bool no_sync) { int i = 0; @@ -136,7 +136,7 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt, bool backup_has_tblspc = true; /* backup contain tablespace */ XLogRecPtr shift_lsn = InvalidXLogRecPtr; - if (instance_name == NULL) + if (instanceState == NULL) elog(ERROR, "required parameter not specified: --instance"); if (params->is_restore) @@ -216,7 +216,7 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt, elog(LOG, "%s begin.", action); /* Get list of all backups sorted in order of descending start time */ - backups = catalog_get_backup_list(instance_name, INVALID_BACKUP_ID); + backups = catalog_get_backup_list(instanceState->instance_name, INVALID_BACKUP_ID); /* Find backup range we should restore or validate. */ while ((i < parray_num(backups)) && !dest_backup) @@ -364,7 +364,7 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt, if (backup->status == BACKUP_STATUS_OK || backup->status == BACKUP_STATUS_DONE) { - write_backup_status(backup, BACKUP_STATUS_ORPHAN, instance_name, true); + write_backup_status(backup, BACKUP_STATUS_ORPHAN, true); elog(WARNING, "Backup %s is orphaned because his parent %s is missing", base36enc(backup->start_time), missing_backup_id); diff --git a/src/validate.c b/src/validate.c index 40c94af67..0049b4f71 100644 --- a/src/validate.c +++ b/src/validate.c @@ -16,7 +16,7 @@ #include "utils/thread.h" static void *pgBackupValidateFiles(void *arg); -static void do_validate_instance(void); +static void do_validate_instance(InstanceState *instanceState); static bool corrupted_backup_found = false; static bool skipped_due_to_lock = false; @@ -75,7 +75,7 @@ pgBackupValidate(pgBackup *backup, pgRestoreParams *params) { elog(WARNING, "Backup %s has status %s, change it to ERROR and skip validation", base36enc(backup->start_time), status2str(backup->status)); - write_backup_status(backup, BACKUP_STATUS_ERROR, instance_name, true); + write_backup_status(backup, BACKUP_STATUS_ERROR, true); corrupted_backup_found = true; return; } @@ -121,7 +121,7 @@ pgBackupValidate(pgBackup *backup, pgRestoreParams *params) { elog(WARNING, "Backup %s file list is corrupted", base36enc(backup->start_time)); backup->status = BACKUP_STATUS_CORRUPT; - write_backup_status(backup, BACKUP_STATUS_CORRUPT, instance_name, true); + write_backup_status(backup, BACKUP_STATUS_CORRUPT, true); return; } @@ -190,7 +190,7 @@ pgBackupValidate(pgBackup *backup, pgRestoreParams *params) backup->status = BACKUP_STATUS_CORRUPT; write_backup_status(backup, corrupted ? BACKUP_STATUS_CORRUPT : - BACKUP_STATUS_OK, instance_name, true); + BACKUP_STATUS_OK, true); if (corrupted) elog(WARNING, "Backup %s data files are corrupted", base36enc(backup->start_time)); @@ -214,7 +214,7 @@ pgBackupValidate(pgBackup *backup, pgRestoreParams *params) "https://github.com/postgrespro/pg_probackup/issues/132", base36enc(backup->start_time)); backup->status = BACKUP_STATUS_CORRUPT; - write_backup_status(backup, BACKUP_STATUS_CORRUPT, instance_name, true); + write_backup_status(backup, BACKUP_STATUS_CORRUPT, true); } } } @@ -380,25 +380,25 @@ pgBackupValidateFiles(void *arg) /* * Validate all backups in the backup catalog. * If --instance option was provided, validate only backups of this instance. + * + * TODO: split into two functions: do_validate_catalog and do_validate_instance. */ int -do_validate_all(char *backup_catalog_path) +do_validate_all(CatalogState *catalogState, InstanceState *instanceState) { corrupted_backup_found = false; skipped_due_to_lock = false; - if (instance_name == NULL) + if (instanceState == NULL) { /* Show list of instances */ - char path[MAXPGPATH]; DIR *dir; struct dirent *dent; /* open directory and list contents */ - join_path_components(path, backup_catalog_path, BACKUPS_DIR); - dir = opendir(path); + dir = opendir(catalogState->backup_subdir_path); if (dir == NULL) - elog(ERROR, "cannot open directory \"%s\": %s", path, strerror(errno)); + elog(ERROR, "cannot open directory \"%s\": %s", catalogState->backup_subdir_path, strerror(errno)); errno = 0; while ((dent = readdir(dir))) @@ -406,13 +406,15 @@ do_validate_all(char *backup_catalog_path) char conf_path[MAXPGPATH]; char child[MAXPGPATH]; struct stat st; + InstanceState *instance_state; + /* skip entries point current dir or parent dir */ if (strcmp(dent->d_name, ".") == 0 || strcmp(dent->d_name, "..") == 0) continue; - join_path_components(child, path, dent->d_name); + join_path_components(child, catalogState->backup_subdir_path, dent->d_name); if (lstat(child, &st) == -1) elog(ERROR, "cannot stat file \"%s\": %s", child, strerror(errno)); @@ -423,10 +425,20 @@ do_validate_all(char *backup_catalog_path) /* * Initialize instance configuration. */ - instance_name = dent->d_name; + instance_state = pgut_new(InstanceState); + strncpy(instanceState->instance_name, dent->d_name, MAXPGPATH); + + join_path_components(instanceState->instance_backup_subdir_path, + catalogState->backup_subdir_path, instance_name); + join_path_components(instanceState->instance_wal_subdir_path, + catalogState->wal_subdir_path, instance_name); + +#ifdef REFACTORE_ME sprintf(backup_instance_path, "%s/%s/%s", - backup_catalog_path, BACKUPS_DIR, instance_name); - sprintf(arclog_path, "%s/%s/%s", backup_catalog_path, "wal", instance_name); + catalogState->catalog_path, BACKUPS_DIR, instanceState->instance_name); + + sprintf(arclog_path, "%s/%s/%s", catalogState->catalog_path, "wal", instanceState->instance_name); +#endif join_path_components(conf_path, backup_instance_path, BACKUP_CATALOG_CONF_FILE); if (config_read_opt(conf_path, instance_options, ERROR, false, @@ -437,12 +449,12 @@ do_validate_all(char *backup_catalog_path) continue; } - do_validate_instance(); + do_validate_instance(instanceState); } } else { - do_validate_instance(); + do_validate_instance(instanceState); } /* TODO: Probably we should have different exit code for every condition @@ -472,17 +484,17 @@ do_validate_all(char *backup_catalog_path) * Validate all backups in the given instance of the backup catalog. */ static void -do_validate_instance(void) +do_validate_instance(InstanceState *instanceState) { int i; int j; parray *backups; pgBackup *current_backup = NULL; - elog(INFO, "Validate backups of the instance '%s'", instance_name); + elog(INFO, "Validate backups of the instance '%s'", instanceState->instance_name); /* Get list of all backups sorted in order of descending start time */ - backups = catalog_get_backup_list(instance_name, INVALID_BACKUP_ID); + backups = catalog_get_backup_list(instanceState->instance_name, INVALID_BACKUP_ID); /* Examine backups one by one and validate them */ for (i = 0; i < parray_num(backups); i++) @@ -512,7 +524,7 @@ do_validate_instance(void) if (current_backup->status == BACKUP_STATUS_OK || current_backup->status == BACKUP_STATUS_DONE) { - write_backup_status(current_backup, BACKUP_STATUS_ORPHAN, instance_name, true); + write_backup_status(current_backup, BACKUP_STATUS_ORPHAN, true); elog(WARNING, "Backup %s is orphaned because his parent %s is missing", base36enc(current_backup->start_time), parent_backup_id); @@ -536,7 +548,7 @@ do_validate_instance(void) if (current_backup->status == BACKUP_STATUS_OK || current_backup->status == BACKUP_STATUS_DONE) { - write_backup_status(current_backup, BACKUP_STATUS_ORPHAN, instance_name, true); + write_backup_status(current_backup, BACKUP_STATUS_ORPHAN, true); elog(WARNING, "Backup %s is orphaned because his parent %s has status: %s", base36enc(current_backup->start_time), backup_id, status2str(tmp_backup->status)); @@ -609,7 +621,7 @@ do_validate_instance(void) if (backup->status == BACKUP_STATUS_OK || backup->status == BACKUP_STATUS_DONE) { - write_backup_status(backup, BACKUP_STATUS_ORPHAN, instance_name, true); + write_backup_status(backup, BACKUP_STATUS_ORPHAN, true); elog(WARNING, "Backup %s is orphaned because his parent %s has status: %s", base36enc(backup->start_time), From 341d7b82746f4b17d807f0d600ac6495090ced21 Mon Sep 17 00:00:00 2001 From: anastasia Date: Wed, 3 Feb 2021 20:00:43 +0300 Subject: [PATCH 027/525] Refactoring. Get rid of global variable instance_name --- src/backup.c | 10 ++++++---- src/delete.c | 11 ++++++----- src/merge.c | 11 ++++------- src/pg_probackup.c | 6 +++--- src/pg_probackup.h | 3 +-- src/restore.c | 31 ++++++++++++++++--------------- src/validate.c | 8 ++++---- 7 files changed, 40 insertions(+), 40 deletions(-) diff --git a/src/backup.c b/src/backup.c index bbd168db0..17bb3345f 100644 --- a/src/backup.c +++ b/src/backup.c @@ -47,7 +47,8 @@ static void backup_cleanup(bool fatal, void *userdata); static void *backup_files(void *arg); -static void do_backup_pg(PGconn *backup_conn, PGNodeInfo *nodeInfo, bool no_sync, bool backup_logs); +static void do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, + PGNodeInfo *nodeInfo, bool no_sync, bool backup_logs); static void pg_start_backup(const char *label, bool smooth, pgBackup *backup, PGNodeInfo *nodeInfo, PGconn *conn); @@ -92,7 +93,8 @@ backup_stopbackup_callback(bool fatal, void *userdata) * Move files from 'pgdata' to a subdirectory in backup catalog. */ static void -do_backup_pg(PGconn *backup_conn, PGNodeInfo *nodeInfo, bool no_sync, bool backup_logs) +do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, + PGNodeInfo *nodeInfo, bool no_sync, bool backup_logs) { int i; char external_prefix[MAXPGPATH]; /* Temp value. Used as template */ @@ -155,7 +157,7 @@ do_backup_pg(PGconn *backup_conn, PGNodeInfo *nodeInfo, bool no_sync, bool backu current.backup_mode == BACKUP_MODE_DIFF_DELTA) { /* get list of backups already taken */ - backup_list = catalog_get_backup_list(instance_name, INVALID_BACKUP_ID); + backup_list = catalog_get_backup_list(instanceState->instance_name, INVALID_BACKUP_ID); prev_backup = catalog_get_last_data_backup(backup_list, current.tli, current.start_time); if (prev_backup == NULL) @@ -824,7 +826,7 @@ do_backup(InstanceState *instanceState, pgSetBackupParams *set_backup_params, add_note(¤t, set_backup_params->note); /* backup data */ - do_backup_pg(backup_conn, &nodeInfo, no_sync, backup_logs); + do_backup_pg(instanceState, backup_conn, &nodeInfo, no_sync, backup_logs); pgut_atexit_pop(backup_cleanup, NULL); /* compute size of wal files of this backup stored in the archive */ diff --git a/src/delete.c b/src/delete.c index b020716fa..d3d719506 100644 --- a/src/delete.c +++ b/src/delete.c @@ -18,8 +18,8 @@ static void delete_walfiles_in_tli(XLogRecPtr keep_lsn, timelineInfo *tli, uint32 xlog_seg_size, bool dry_run); static void do_retention_internal(parray *backup_list, parray *to_keep_list, parray *to_purge_list); -static void do_retention_merge(parray *backup_list, parray *to_keep_list, - parray *to_purge_list); +static void do_retention_merge(InstanceState *instanceState, parray *backup_list, + parray *to_keep_list, parray *to_purge_list); static void do_retention_purge(parray *to_keep_list, parray *to_purge_list); static void do_retention_wal(bool dry_run); @@ -172,7 +172,7 @@ void do_retention(InstanceState *instanceState) do_retention_internal(backup_list, to_keep_list, to_purge_list); if (merge_expired && !dry_run && !backup_list_is_empty) - do_retention_merge(backup_list, to_keep_list, to_purge_list); + do_retention_merge(instanceState, backup_list, to_keep_list, to_purge_list); if (delete_expired && !dry_run && !backup_list_is_empty) do_retention_purge(to_keep_list, to_purge_list); @@ -420,7 +420,8 @@ do_retention_internal(parray *backup_list, parray *to_keep_list, parray *to_purg /* Merge partially expired incremental chains */ static void -do_retention_merge(parray *backup_list, parray *to_keep_list, parray *to_purge_list) +do_retention_merge(InstanceState *instanceState, parray *backup_list, + parray *to_keep_list, parray *to_purge_list) { int i; int j; @@ -539,7 +540,7 @@ do_retention_merge(parray *backup_list, parray *to_keep_list, parray *to_purge_l */ keep_backup = parray_get(merge_list, 0); - merge_chain(merge_list, full_backup, keep_backup); + merge_chain(instanceState, merge_list, full_backup, keep_backup); backup_merged = true; for (j = parray_num(merge_list) - 2; j >= 0; j--) diff --git a/src/merge.c b/src/merge.c index 6dc599fe4..90376c02c 100644 --- a/src/merge.c +++ b/src/merge.c @@ -405,7 +405,7 @@ do_merge(InstanceState *instanceState, time_t backup_id) catalog_lock_backup_list(merge_list, parray_num(merge_list) - 1, 0, true, true); /* do actual merge */ - merge_chain(merge_list, full_backup, dest_backup); + merge_chain(instanceState, merge_list, full_backup, dest_backup); pgBackupValidate(full_backup, NULL); if (full_backup->status == BACKUP_STATUS_CORRUPT) @@ -434,7 +434,8 @@ do_merge(InstanceState *instanceState, time_t backup_id) * that chain is ok. */ void -merge_chain(parray *parent_chain, pgBackup *full_backup, pgBackup *dest_backup) +merge_chain(InstanceState *instanceState, + parray *parent_chain, pgBackup *full_backup, pgBackup *dest_backup) { int i; char *dest_backup_id; @@ -846,13 +847,9 @@ merge_chain(parray *parent_chain, pgBackup *full_backup, pgBackup *dest_backup) else { /* Ugly */ - char backups_dir[MAXPGPATH]; - char instance_dir[MAXPGPATH]; char destination_path[MAXPGPATH]; - join_path_components(backups_dir, backup_path, BACKUPS_DIR); - join_path_components(instance_dir, backups_dir, instance_name); - join_path_components(destination_path, instance_dir, + join_path_components(destination_path, instanceState->instance_backup_subdir_path, base36enc(full_backup->merge_dest_backup)); elog(LOG, "Rename %s to %s", full_backup->root_dir, destination_path); diff --git a/src/pg_probackup.c b/src/pg_probackup.c index 9af364674..423087b4c 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -144,7 +144,7 @@ static char *delete_status = NULL; static bool compress_shortcut = false; /* ================ instanceState =========== */ -char *instance_name; +static char *instance_name; static InstanceState *instanceState = NULL; @@ -488,9 +488,9 @@ main(int argc, char *argv[]) strncpy(instanceState->instance_name, instance_name, MAXPGPATH); join_path_components(instanceState->instance_backup_subdir_path, - catalogState->backup_subdir_path, instance_name); + catalogState->backup_subdir_path, instanceState->instance_name); join_path_components(instanceState->instance_wal_subdir_path, - catalogState->wal_subdir_path, instance_name); + catalogState->wal_subdir_path, instanceState->instance_name); } /* ===== instanceState (END) ======*/ diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 3125e81ad..756361165 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -793,7 +793,6 @@ extern bool merge_expired; extern bool dry_run; /* ===== instanceState ===== */ -extern char *instance_name; /* ===== instanceState (END) ===== */ @@ -849,7 +848,7 @@ extern bool tliIsPartOfHistory(const parray *timelines, TimeLineID tli); /* in merge.c */ extern void do_merge(InstanceState *instanceState, time_t backup_id); extern void merge_backups(pgBackup *backup, pgBackup *next_backup); -extern void merge_chain(parray *parent_chain, +extern void merge_chain(InstanceState *instanceState, parray *parent_chain, pgBackup *full_backup, pgBackup *dest_backup); extern parray *read_database_map(pgBackup *backup); diff --git a/src/restore.c b/src/restore.c index ad7300ba8..628dad978 100644 --- a/src/restore.c +++ b/src/restore.c @@ -41,22 +41,22 @@ typedef struct static void -print_recovery_settings(FILE *fp, pgBackup *backup, +print_recovery_settings(InstanceState *instanceState, FILE *fp, pgBackup *backup, pgRestoreParams *params, pgRecoveryTarget *rt); static void print_standby_settings_common(FILE *fp, pgBackup *backup, pgRestoreParams *params); #if PG_VERSION_NUM >= 120000 static void -update_recovery_options(pgBackup *backup, +update_recovery_options(InstanceState *instanceState, pgBackup *backup, pgRestoreParams *params, pgRecoveryTarget *rt); #else static void -update_recovery_options_before_v12(pgBackup *backup, +update_recovery_options_before_v12(InstanceState *instanceState, pgBackup *backup, pgRestoreParams *params, pgRecoveryTarget *rt); #endif -static void create_recovery_conf(time_t backup_id, +static void create_recovery_conf(InstanceState *instanceState, time_t backup_id, pgRecoveryTarget *rt, pgBackup *backup, pgRestoreParams *params); @@ -673,7 +673,7 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg //TODO rename and update comment /* Create recovery.conf with given recovery target parameters */ - create_recovery_conf(target_backup_id, rt, dest_backup, params); + create_recovery_conf(instanceState, target_backup_id, rt, dest_backup, params); } /* ssh connection to longer needed */ @@ -1298,7 +1298,7 @@ restore_files(void *arg) * with given recovery target parameters */ static void -create_recovery_conf(time_t backup_id, +create_recovery_conf(InstanceState *instanceState, time_t backup_id, pgRecoveryTarget *rt, pgBackup *backup, pgRestoreParams *params) @@ -1345,16 +1345,16 @@ create_recovery_conf(time_t backup_id, elog(LOG, "----------------------------------------"); #if PG_VERSION_NUM >= 120000 - update_recovery_options(backup, params, rt); + update_recovery_options(instanceState, backup, params, rt); #else - update_recovery_options_before_v12(backup, params, rt); + update_recovery_options_before_v12(instanceState, backup, params, rt); #endif } -/* TODO get rid of using global variables: instance_config, backup_path, instance_name */ +/* TODO get rid of using global variables: instance_config */ static void -print_recovery_settings(FILE *fp, pgBackup *backup, +print_recovery_settings(InstanceState *instanceState, FILE *fp, pgBackup *backup, pgRestoreParams *params, pgRecoveryTarget *rt) { char restore_command_guc[16384]; @@ -1370,7 +1370,8 @@ print_recovery_settings(FILE *fp, pgBackup *backup, sprintf(restore_command_guc, "%s archive-get -B %s --instance %s " "--wal-file-path=%%p --wal-file-name=%%f", PROGRAM_FULL_PATH ? PROGRAM_FULL_PATH : PROGRAM_NAME, - backup_path, instance_name); + /* TODO What is going on here? Why do we use catalog path as wal-file-path? */ + instanceState->catalog_state->catalog_path, instanceState->instance_name); /* append --remote-* parameters provided via --archive-* settings */ if (instance_config.archive.host) @@ -1455,7 +1456,7 @@ print_standby_settings_common(FILE *fp, pgBackup *backup, pgRestoreParams *param #if PG_VERSION_NUM < 120000 static void -update_recovery_options_before_v12(pgBackup *backup, +update_recovery_options_before_v12(InstanceState *instanceState, pgBackup *backup, pgRestoreParams *params, pgRecoveryTarget *rt) { FILE *fp; @@ -1486,7 +1487,7 @@ update_recovery_options_before_v12(pgBackup *backup, PROGRAM_VERSION); if (params->recovery_settings_mode == PITR_REQUESTED) - print_recovery_settings(fp, backup, params, rt); + print_recovery_settings(instanceState, fp, backup, params, rt); if (params->restore_as_replica) { @@ -1508,7 +1509,7 @@ update_recovery_options_before_v12(pgBackup *backup, */ #if PG_VERSION_NUM >= 120000 static void -update_recovery_options(pgBackup *backup, +update_recovery_options(InstanceState *instanceState, pgBackup *backup, pgRestoreParams *params, pgRecoveryTarget *rt) { @@ -1616,7 +1617,7 @@ update_recovery_options(pgBackup *backup, base36enc(backup->start_time), current_time_str); if (params->recovery_settings_mode == PITR_REQUESTED) - print_recovery_settings(fp, backup, params, rt); + print_recovery_settings(instanceState, fp, backup, params, rt); if (params->restore_as_replica) print_standby_settings_common(fp, backup, params); diff --git a/src/validate.c b/src/validate.c index 0049b4f71..21cf32250 100644 --- a/src/validate.c +++ b/src/validate.c @@ -406,7 +406,7 @@ do_validate_all(CatalogState *catalogState, InstanceState *instanceState) char conf_path[MAXPGPATH]; char child[MAXPGPATH]; struct stat st; - InstanceState *instance_state; + InstanceState *instanceState; /* skip entries point current dir or parent dir */ @@ -425,13 +425,13 @@ do_validate_all(CatalogState *catalogState, InstanceState *instanceState) /* * Initialize instance configuration. */ - instance_state = pgut_new(InstanceState); + instanceState = pgut_new(InstanceState); strncpy(instanceState->instance_name, dent->d_name, MAXPGPATH); join_path_components(instanceState->instance_backup_subdir_path, - catalogState->backup_subdir_path, instance_name); + catalogState->backup_subdir_path, instanceState->instance_name); join_path_components(instanceState->instance_wal_subdir_path, - catalogState->wal_subdir_path, instance_name); + catalogState->wal_subdir_path, instanceState->instance_name); #ifdef REFACTORE_ME sprintf(backup_instance_path, "%s/%s/%s", From b956febb7b425ed605504845599b867e576e8859 Mon Sep 17 00:00:00 2001 From: anastasia Date: Wed, 3 Feb 2021 20:50:34 +0300 Subject: [PATCH 028/525] Refactoring. Get rid of global variable backup_path --- src/backup.c | 4 +-- src/catalog.c | 61 ++++++++++++++++++++-------------------- src/configure.c | 8 +++--- src/delete.c | 23 +++++++-------- src/merge.c | 2 +- src/pg_probackup.c | 8 +++--- src/pg_probackup.h | 32 +++++++++++++++------ src/pg_probackup_state.h | 13 --------- src/restore.c | 2 +- src/show.c | 54 +++++++++++++++++------------------ src/validate.c | 2 +- 11 files changed, 105 insertions(+), 104 deletions(-) diff --git a/src/backup.c b/src/backup.c index 17bb3345f..56711b206 100644 --- a/src/backup.c +++ b/src/backup.c @@ -157,7 +157,7 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, current.backup_mode == BACKUP_MODE_DIFF_DELTA) { /* get list of backups already taken */ - backup_list = catalog_get_backup_list(instanceState->instance_name, INVALID_BACKUP_ID); + backup_list = catalog_get_backup_list(instanceState, INVALID_BACKUP_ID); prev_backup = catalog_get_last_data_backup(backup_list, current.tli, current.start_time); if (prev_backup == NULL) @@ -168,7 +168,7 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, current.tli); /* TODO: use read_timeline_history */ - tli_list = catalog_get_timelines(&instance_config); + tli_list = catalog_get_timelines(instanceState, &instance_config); if (parray_num(tli_list) == 0) elog(WARNING, "Cannot find valid backup on previous timelines, " diff --git a/src/catalog.c b/src/catalog.c index f215e90e2..b32043a0e 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -688,13 +688,11 @@ IsDir(const char *dirpath, const char *entry, fio_location location) /* * Create list of instances in given backup catalog. * - * Returns parray of "InstanceConfig" structures, filled with - * actual config of each instance. + * Returns parray of InstanceState structures. */ parray * -catalog_get_instance_list(char *backup_catalog_path) +catalog_get_instance_list(CatalogState *catalogState) { - char path[MAXPGPATH]; DIR *dir; struct dirent *dent; parray *instances; @@ -702,24 +700,23 @@ catalog_get_instance_list(char *backup_catalog_path) instances = parray_new(); /* open directory and list contents */ - join_path_components(path, backup_catalog_path, BACKUPS_DIR); - dir = opendir(path); + dir = opendir(catalogState->backup_subdir_path); if (dir == NULL) elog(ERROR, "Cannot open directory \"%s\": %s", - path, strerror(errno)); + catalogState->backup_subdir_path, strerror(errno)); while (errno = 0, (dent = readdir(dir)) != NULL) { char child[MAXPGPATH]; struct stat st; - InstanceConfig *instance; + InstanceState *instanceState = NULL; /* skip entries point current dir or parent dir */ if (strcmp(dent->d_name, ".") == 0 || strcmp(dent->d_name, "..") == 0) continue; - join_path_components(child, path, dent->d_name); + join_path_components(child, catalogState->backup_subdir_path, dent->d_name); if (lstat(child, &st) == -1) elog(ERROR, "Cannot stat file \"%s\": %s", @@ -728,9 +725,16 @@ catalog_get_instance_list(char *backup_catalog_path) if (!S_ISDIR(st.st_mode)) continue; - instance = readInstanceConfigFile(dent->d_name); + instanceState = pgut_new(InstanceState); + instanceState->config = readInstanceConfigFile(instanceState); + + strncpy(instanceState->instance_name, dent->d_name, MAXPGPATH); + join_path_components(instanceState->instance_backup_subdir_path, + catalogState->backup_subdir_path, instanceState->instance_name); + join_path_components(instanceState->instance_wal_subdir_path, + catalogState->wal_subdir_path, instanceState->instance_name); - parray_append(instances, instance); + parray_append(instances, instanceState); } /* TODO 3.0: switch to ERROR */ @@ -739,11 +743,11 @@ catalog_get_instance_list(char *backup_catalog_path) if (errno) elog(ERROR, "Cannot read directory \"%s\": %s", - path, strerror(errno)); + catalogState->backup_subdir_path, strerror(errno)); if (closedir(dir)) elog(ERROR, "Cannot close directory \"%s\": %s", - path, strerror(errno)); + catalogState->backup_subdir_path, strerror(errno)); return instances; } @@ -755,22 +759,18 @@ catalog_get_instance_list(char *backup_catalog_path) * If valid backup id is passed only matching backup will be added to the list. */ parray * -catalog_get_backup_list(const char *instance_name, time_t requested_backup_id) +catalog_get_backup_list(InstanceState *instanceState, time_t requested_backup_id) { DIR *data_dir = NULL; struct dirent *data_ent = NULL; parray *backups = NULL; int i; - char backup_instance_path[MAXPGPATH]; - - sprintf(backup_instance_path, "%s/%s/%s", - backup_path, BACKUPS_DIR, instance_name); /* open backup instance backups directory */ - data_dir = fio_opendir(backup_instance_path, FIO_BACKUP_HOST); + data_dir = fio_opendir(instanceState->instance_backup_subdir_path, FIO_BACKUP_HOST); if (data_dir == NULL) { - elog(WARNING, "cannot open directory \"%s\": %s", backup_instance_path, + elog(WARNING, "cannot open directory \"%s\": %s", instanceState->instance_backup_subdir_path, strerror(errno)); goto err_proc; } @@ -784,12 +784,12 @@ catalog_get_backup_list(const char *instance_name, time_t requested_backup_id) pgBackup *backup = NULL; /* skip not-directory entries and hidden entries */ - if (!IsDir(backup_instance_path, data_ent->d_name, FIO_BACKUP_HOST) + if (!IsDir(instanceState->instance_backup_subdir_path, data_ent->d_name, FIO_BACKUP_HOST) || data_ent->d_name[0] == '.') continue; /* open subdirectory of specific backup */ - join_path_components(data_path, backup_instance_path, data_ent->d_name); + join_path_components(data_path, instanceState->instance_backup_subdir_path, data_ent->d_name); /* read backup information from BACKUP_CONTROL_FILE */ snprintf(backup_conf_path, MAXPGPATH, "%s/%s", data_path, BACKUP_CONTROL_FILE); @@ -835,7 +835,7 @@ catalog_get_backup_list(const char *instance_name, time_t requested_backup_id) if (errno) { elog(WARNING, "cannot read backup root directory \"%s\": %s", - backup_instance_path, strerror(errno)); + instanceState->instance_backup_subdir_path, strerror(errno)); goto err_proc; } @@ -1345,22 +1345,21 @@ create_backup_dir(pgBackup *backup, const char *backup_instance_path) * TODO: '.partial' and '.part' segno information should be added to tlinfo. */ parray * -catalog_get_timelines(InstanceConfig *instance) +catalog_get_timelines(InstanceState *instanceState, InstanceConfig *instance) { int i,j,k; parray *xlog_files_list = parray_new(); parray *timelineinfos; parray *backups; timelineInfo *tlinfo; - char arclog_path[MAXPGPATH]; /* for fancy reporting */ char begin_segno_str[MAXFNAMELEN]; char end_segno_str[MAXFNAMELEN]; /* read all xlog files that belong to this archive */ - sprintf(arclog_path, "%s/%s/%s", backup_path, "wal", instance->name); - dir_list_file(xlog_files_list, arclog_path, false, false, false, false, true, 0, FIO_BACKUP_HOST); + dir_list_file(xlog_files_list, instanceState->instance_wal_subdir_path, + false, false, false, false, true, 0, FIO_BACKUP_HOST); parray_qsort(xlog_files_list, pgFileCompareName); timelineinfos = parray_new(); @@ -1530,7 +1529,7 @@ catalog_get_timelines(InstanceConfig *instance) TimeLineHistoryEntry *tln; sscanf(file->name, "%08X.history", &tli); - timelines = read_timeline_history(arclog_path, tli, true); + timelines = read_timeline_history(instanceState->instance_wal_subdir_path, tli, true); if (!tlinfo || tlinfo->tli != tli) { @@ -1564,7 +1563,7 @@ catalog_get_timelines(InstanceConfig *instance) } /* save information about backups belonging to each timeline */ - backups = catalog_get_backup_list(instance->name, INVALID_BACKUP_ID); + backups = catalog_get_backup_list(instanceState, INVALID_BACKUP_ID); for (i = 0; i < parray_num(timelineinfos); i++) { @@ -2037,7 +2036,7 @@ get_oldest_backup(timelineInfo *tlinfo) * Overwrite backup metadata. */ void -do_set_backup(const char *instance_name, time_t backup_id, +do_set_backup(InstanceState *instanceState, time_t backup_id, pgSetBackupParams *set_backup_params) { pgBackup *target_backup = NULL; @@ -2046,7 +2045,7 @@ do_set_backup(const char *instance_name, time_t backup_id, if (!set_backup_params) elog(ERROR, "Nothing to set by 'set-backup' command"); - backup_list = catalog_get_backup_list(instance_name, backup_id); + backup_list = catalog_get_backup_list(instanceState, backup_id); if (parray_num(backup_list) != 1) elog(ERROR, "Failed to find backup %s", base36enc(backup_id)); diff --git a/src/configure.c b/src/configure.c index 1aae3df13..065e01e6e 100644 --- a/src/configure.c +++ b/src/configure.c @@ -374,7 +374,7 @@ init_config(InstanceConfig *config, const char *instance_name) * read instance config from file */ InstanceConfig * -readInstanceConfigFile(const char *instance_name) +readInstanceConfigFile(InstanceState *instanceState) { char path[MAXPGPATH]; InstanceConfig *instance = pgut_new(InstanceConfig); @@ -592,14 +592,14 @@ readInstanceConfigFile(const char *instance_name) }; - init_config(instance, instance_name); + init_config(instance, instanceState->instance_name); sprintf(instance->backup_instance_path, "%s/%s/%s", - backup_path, BACKUPS_DIR, instance_name); + instanceState->catalog_state->catalog_path, BACKUPS_DIR, instanceState->instance_name); canonicalize_path(instance->backup_instance_path); sprintf(instance->arclog_path, "%s/%s/%s", - backup_path, "wal", instance_name); + instanceState->catalog_state->catalog_path, "wal", instanceState->instance_name); canonicalize_path(instance->arclog_path); join_path_components(path, instance->backup_instance_path, diff --git a/src/delete.c b/src/delete.c index d3d719506..ebe09dbe5 100644 --- a/src/delete.c +++ b/src/delete.c @@ -21,7 +21,7 @@ static void do_retention_internal(parray *backup_list, parray *to_keep_list, static void do_retention_merge(InstanceState *instanceState, parray *backup_list, parray *to_keep_list, parray *to_purge_list); static void do_retention_purge(parray *to_keep_list, parray *to_purge_list); -static void do_retention_wal(bool dry_run); +static void do_retention_wal(InstanceState *instanceState, bool dry_run); // TODO: more useful messages for dry run. static bool backup_deleted = false; /* At least one backup was deleted */ @@ -39,7 +39,7 @@ do_delete(InstanceState *instanceState, time_t backup_id) char size_to_delete_pretty[20]; /* Get complete list of backups */ - backup_list = catalog_get_backup_list(instanceState->instance_name, INVALID_BACKUP_ID); + backup_list = catalog_get_backup_list(instanceState, INVALID_BACKUP_ID); delete_list = parray_new(); @@ -105,7 +105,7 @@ do_delete(InstanceState *instanceState, time_t backup_id) /* Clean WAL segments */ if (delete_wal) - do_retention_wal(dry_run); + do_retention_wal(instanceState, dry_run); /* cleanup */ parray_free(delete_list); @@ -139,7 +139,7 @@ void do_retention(InstanceState *instanceState) MyLocation = FIO_LOCAL_HOST; /* Get a complete list of backups. */ - backup_list = catalog_get_backup_list(instanceState->instance_name, INVALID_BACKUP_ID); + backup_list = catalog_get_backup_list(instanceState, INVALID_BACKUP_ID); if (parray_num(backup_list) == 0) backup_list_is_empty = true; @@ -179,7 +179,7 @@ void do_retention(InstanceState *instanceState) /* TODO: some sort of dry run for delete_wal */ if (delete_wal) - do_retention_wal(dry_run); + do_retention_wal(instanceState, dry_run); /* TODO: consider dry-run flag */ @@ -653,12 +653,13 @@ do_retention_purge(parray *to_keep_list, parray *to_purge_list) * and delete them. */ static void -do_retention_wal(bool dry_run) +do_retention_wal(InstanceState *instanceState, bool dry_run) { parray *tli_list; int i; - tli_list = catalog_get_timelines(&instance_config); + //TODO check that instanceState is not NULL + tli_list = catalog_get_timelines(instanceState, &instance_config); for (i = 0; i < parray_num(tli_list); i++) { @@ -975,7 +976,7 @@ do_delete_instance(InstanceState *instanceState) /* Delete all backups. */ - backup_list = catalog_get_backup_list(instanceState->instance_name, INVALID_BACKUP_ID); + backup_list = catalog_get_backup_list(instanceState, INVALID_BACKUP_ID); catalog_lock_backup_list(backup_list, 0, parray_num(backup_list) - 1, true, true); @@ -1015,7 +1016,7 @@ do_delete_instance(InstanceState *instanceState) /* Delete all backups of given status in instance */ void -do_delete_status(InstanceConfig *instance_config, const char *status) +do_delete_status(InstanceState *instanceState, InstanceConfig *instance_config, const char *status) { int i; parray *backup_list, *delete_list; @@ -1038,11 +1039,11 @@ do_delete_status(InstanceConfig *instance_config, const char *status) */ pretty_status = status2str(status_for_delete); - backup_list = catalog_get_backup_list(instance_config->name, INVALID_BACKUP_ID); + backup_list = catalog_get_backup_list(instanceState, INVALID_BACKUP_ID); if (parray_num(backup_list) == 0) { - elog(WARNING, "Instance '%s' has no backups", instance_config->name); + elog(WARNING, "Instance '%s' has no backups", instanceState->instance_name); return; } diff --git a/src/merge.c b/src/merge.c index 90376c02c..265aa4ad0 100644 --- a/src/merge.c +++ b/src/merge.c @@ -86,7 +86,7 @@ do_merge(InstanceState *instanceState, time_t backup_id) elog(INFO, "Merge started"); /* Get list of all backups sorted in order of descending start time */ - backups = catalog_get_backup_list(instanceState->instance_name, INVALID_BACKUP_ID); + backups = catalog_get_backup_list(instanceState, INVALID_BACKUP_ID); /* Find destination backup first */ for (i = 0; i < parray_num(backups); i++) diff --git a/src/pg_probackup.c b/src/pg_probackup.c index 423087b4c..656a778ad 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -63,7 +63,7 @@ const char *PROGRAM_EMAIL = "https://github.com/postgrespro/pg_probackup/issues /* ================ catalogState =========== */ /* directory options */ /* TODO make it local variable, pass as an argument to all commands that need it. */ -char *backup_path = NULL; +static char *backup_path = NULL; /* * path or to the data files in the backup catalog * $BACKUP_PATH/backups/instance_name @@ -844,7 +844,7 @@ main(int argc, char *argv[]) restore_params, no_sync); case SHOW_CMD: - return do_show(backup_path, instance_name, current.backup_id, show_archive); + return do_show(catalogState, instanceState, current.backup_id, show_archive); case DELETE_CMD: if (delete_expired && backup_id_string) elog(ERROR, "You cannot specify --delete-expired and (-i, --backup-id) options together"); @@ -858,7 +858,7 @@ main(int argc, char *argv[]) if (!backup_id_string) { if (delete_status) - do_delete_status(&instance_config, delete_status); + do_delete_status(instanceState, &instance_config, delete_status); else do_retention(instanceState); } @@ -877,7 +877,7 @@ main(int argc, char *argv[]) case SET_BACKUP_CMD: if (!backup_id_string) elog(ERROR, "You must specify parameter (-i, --backup-id) for 'set-backup' command"); - do_set_backup(instance_name, current.backup_id, set_backup_params); + do_set_backup(instanceState, current.backup_id, set_backup_params); break; case CHECKDB_CMD: do_checkdb(need_amcheck, diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 756361165..bcfadcb23 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -758,7 +758,6 @@ typedef struct BackupPageHeader2 /* ====== CatalogState ======= */ /* directory options */ -extern char *backup_path; extern char backup_instance_path[MAXPGPATH]; extern char arclog_path[MAXPGPATH]; @@ -794,6 +793,21 @@ extern bool dry_run; /* ===== instanceState ===== */ +typedef struct InstanceState +{ + /* catalog, this instance belongs to */ + CatalogState *catalog_state; + + char instance_name[MAXPGPATH]; //previously global var instance_name + /* $BACKUP_PATH/backups/instance_name */ + char instance_backup_subdir_path[MAXPGPATH]; + /* $BACKUP_PATH/backups/instance_name */ + char instance_wal_subdir_path[MAXPGPATH]; // previously global var arclog_path + + //TODO split into some more meaningdul parts + InstanceConfig *config; +} InstanceState; + /* ===== instanceState (END) ===== */ /* show options */ @@ -868,10 +882,10 @@ extern void do_archive_get(InstanceConfig *instance, const char *prefetch_dir_ar extern void do_show_config(void); extern void do_set_config(bool missing_ok); extern void init_config(InstanceConfig *config, const char *instance_name); -extern InstanceConfig *readInstanceConfigFile(const char *instance_name); +extern InstanceConfig *readInstanceConfigFile(InstanceState *instanceState); /* in show.c */ -extern int do_show(char *backup_catalog_path, const char *instance_name, +extern int do_show(CatalogState *catalogState, InstanceState *instanceState, time_t requested_backup_id, bool show_archive); /* in delete.c */ @@ -879,7 +893,8 @@ extern void do_delete(InstanceState *instanceState, time_t backup_id); extern void delete_backup_files(pgBackup *backup); extern void do_retention(InstanceState *instanceState); extern int do_delete_instance(InstanceState *instanceState); -extern void do_delete_status(InstanceConfig *instance_config, const char *status); +extern void do_delete_status(InstanceState *instanceState, + InstanceConfig *instance_config, const char *status); /* in fetch.c */ extern char *slurpFile(const char *datadir, @@ -922,8 +937,9 @@ extern bool lock_backup(pgBackup *backup, bool strict, bool exclusive); extern const char *pgBackupGetBackupMode(pgBackup *backup, bool show_color); extern void pgBackupGetBackupModeColor(pgBackup *backup, char *mode); -extern parray *catalog_get_instance_list(char *backup_catalog_path); -extern parray *catalog_get_backup_list(const char *instance_name, time_t requested_backup_id); +extern parray *catalog_get_instance_list(CatalogState *catalogState); + +extern parray *catalog_get_backup_list(InstanceState *instanceState, time_t requested_backup_id); extern void catalog_lock_backup_list(parray *backup_list, int from_idx, int to_idx, bool strict, bool exclusive); extern pgBackup *catalog_get_last_data_backup(parray *backup_list, @@ -933,8 +949,8 @@ extern pgBackup *get_multi_timeline_parent(parray *backup_list, parray *tli_list TimeLineID current_tli, time_t current_start_time, InstanceConfig *instance); extern void timelineInfoFree(void *tliInfo); -extern parray *catalog_get_timelines(InstanceConfig *instance); -extern void do_set_backup(const char *instance_name, time_t backup_id, +extern parray *catalog_get_timelines(InstanceState *instanceState, InstanceConfig *instance); +extern void do_set_backup(InstanceState *instanceState, time_t backup_id, pgSetBackupParams *set_backup_params); extern void pin_backup(pgBackup *target_backup, pgSetBackupParams *set_backup_params); diff --git a/src/pg_probackup_state.h b/src/pg_probackup_state.h index 19e096328..56d852537 100644 --- a/src/pg_probackup_state.h +++ b/src/pg_probackup_state.h @@ -25,19 +25,6 @@ typedef struct CatalogState /* ===== instanceState ===== */ -typedef struct InstanceState -{ - /* catalog, this instance belongs to */ - CatalogState *catalog_state; - - char instance_name[MAXPGPATH]; //previously global var instance_name - /* $BACKUP_PATH/backups/instance_name */ - char instance_backup_subdir_path[MAXPGPATH]; - /* $BACKUP_PATH/backups/instance_name */ - char instance_wal_subdir_path[MAXPGPATH]; // previously global var arclog_path - - //TODO add config here -} InstanceState; /* ===== instanceState (END) ===== */ diff --git a/src/restore.c b/src/restore.c index 628dad978..398726772 100644 --- a/src/restore.c +++ b/src/restore.c @@ -216,7 +216,7 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg elog(LOG, "%s begin.", action); /* Get list of all backups sorted in order of descending start time */ - backups = catalog_get_backup_list(instanceState->instance_name, INVALID_BACKUP_ID); + backups = catalog_get_backup_list(instanceState, INVALID_BACKUP_ID); /* Find backup range we should restore or validate. */ while ((i < parray_num(backups)) && !dest_backup) diff --git a/src/show.c b/src/show.c index c88f4fae9..ed4f43ef3 100644 --- a/src/show.c +++ b/src/show.c @@ -54,14 +54,14 @@ typedef struct ShowArchiveRow static void show_instance_start(void); static void show_instance_end(void); -static void show_instance(const char *instance_name, time_t requested_backup_id, bool show_name); +static void show_instance(InstanceState *instanceState, time_t requested_backup_id, bool show_name); static void print_backup_json_object(PQExpBuffer buf, pgBackup *backup); -static int show_backup(const char *instance_name, time_t requested_backup_id); +static int show_backup(InstanceState *instanceState, time_t requested_backup_id); static void show_instance_plain(const char *instance_name, parray *backup_list, bool show_name); static void show_instance_json(const char *instance_name, parray *backup_list); -static void show_instance_archive(InstanceConfig *instance); +static void show_instance_archive(InstanceState *instanceState, InstanceConfig *instance); static void show_archive_plain(const char *instance_name, uint32 xlog_seg_size, parray *timelines_list, bool show_name); static void show_archive_json(const char *instance_name, uint32 xlog_seg_size, @@ -75,12 +75,12 @@ static int32 json_level = 0; * Entry point of pg_probackup SHOW subcommand. */ int -do_show(char *backup_catalog_path, const char *instance_name, +do_show(CatalogState *catalogState, InstanceState *instanceState, time_t requested_backup_id, bool show_archive) { int i; - if (instance_name == NULL && + if (instanceState == NULL && requested_backup_id != INVALID_BACKUP_ID) elog(ERROR, "You must specify --instance to use (-i, --backup-id) option"); @@ -89,28 +89,25 @@ do_show(char *backup_catalog_path, const char *instance_name, elog(ERROR, "You cannot specify --archive and (-i, --backup-id) options together"); /* - * if instance_name is not specified, + * if instance is not specified, * show information about all instances in this backup catalog */ - if (instance_name == NULL) + if (instanceState == NULL) { - parray *instances = catalog_get_instance_list(backup_catalog_path); + parray *instances = catalog_get_instance_list(catalogState); show_instance_start(); for (i = 0; i < parray_num(instances); i++) { - InstanceConfig *instance = parray_get(instances, i); - char backup_instance_path[MAXPGPATH]; + InstanceState *instanceState = parray_get(instances, i); if (interrupted) elog(ERROR, "Interrupted during show"); - sprintf(backup_instance_path, "%s/%s/%s", backup_catalog_path, BACKUPS_DIR, instance->name); - if (show_archive) - show_instance_archive(instance); + show_instance_archive(instanceState, instanceState->config); else - show_instance(instance->name, INVALID_BACKUP_ID, true); + show_instance(instanceState, INVALID_BACKUP_ID, true); } show_instance_end(); @@ -124,11 +121,11 @@ do_show(char *backup_catalog_path, const char *instance_name, if (show_archive) { - InstanceConfig *instance = readInstanceConfigFile(instance_name); - show_instance_archive(instance); + InstanceConfig *instance = readInstanceConfigFile(instanceState); + show_instance_archive(instanceState, instance); } else - show_instance(instance_name, requested_backup_id, false); + show_instance(instanceState, requested_backup_id, false); show_instance_end(); @@ -138,11 +135,11 @@ do_show(char *backup_catalog_path, const char *instance_name, { if (show_archive) { - InstanceConfig *instance = readInstanceConfigFile(instance_name); - show_instance_archive(instance); + InstanceConfig *instance = readInstanceConfigFile(instanceState); + show_instance_archive(instanceState, instance); } else - show_backup(instance_name, requested_backup_id); + show_backup(instanceState, requested_backup_id); return 0; } @@ -290,16 +287,16 @@ show_instance_end(void) * Show brief meta information about all backups in the backup instance. */ static void -show_instance(const char *instance_name, time_t requested_backup_id, bool show_name) +show_instance(InstanceState *instanceState, time_t requested_backup_id, bool show_name) { parray *backup_list; - backup_list = catalog_get_backup_list(instance_name, requested_backup_id); + backup_list = catalog_get_backup_list(instanceState, requested_backup_id); if (show_format == SHOW_PLAIN) - show_instance_plain(instance_name, backup_list, show_name); + show_instance_plain(instanceState->instance_name, backup_list, show_name); else if (show_format == SHOW_JSON) - show_instance_json(instance_name, backup_list); + show_instance_json(instanceState->instance_name, backup_list); else elog(ERROR, "Invalid show format %d", (int) show_format); @@ -451,13 +448,14 @@ print_backup_json_object(PQExpBuffer buf, pgBackup *backup) * Show detailed meta information about specified backup. */ static int -show_backup(const char *instance_name, time_t requested_backup_id) +show_backup(InstanceState *instanceState, time_t requested_backup_id) { int i; pgBackup *backup = NULL; parray *backups; - backups = catalog_get_backup_list(instance_name, INVALID_BACKUP_ID); + //TODO pass requested_backup_id to the function + backups = catalog_get_backup_list(instanceState, INVALID_BACKUP_ID); /* Find requested backup */ for (i = 0; i < parray_num(backups); i++) @@ -776,11 +774,11 @@ show_instance_json(const char *instance_name, parray *backup_list) * show information about WAL archive of the instance */ static void -show_instance_archive(InstanceConfig *instance) +show_instance_archive(InstanceState *instanceState, InstanceConfig *instance) { parray *timelineinfos; - timelineinfos = catalog_get_timelines(instance); + timelineinfos = catalog_get_timelines(instanceState, instance); if (show_format == SHOW_PLAIN) show_archive_plain(instance->name, instance->xlog_seg_size, timelineinfos, true); diff --git a/src/validate.c b/src/validate.c index 21cf32250..ce434f027 100644 --- a/src/validate.c +++ b/src/validate.c @@ -494,7 +494,7 @@ do_validate_instance(InstanceState *instanceState) elog(INFO, "Validate backups of the instance '%s'", instanceState->instance_name); /* Get list of all backups sorted in order of descending start time */ - backups = catalog_get_backup_list(instanceState->instance_name, INVALID_BACKUP_ID); + backups = catalog_get_backup_list(instanceState, INVALID_BACKUP_ID); /* Examine backups one by one and validate them */ for (i = 0; i < parray_num(backups); i++) From 9940967657c428a7e6a2f6b402088809db7d6e45 Mon Sep 17 00:00:00 2001 From: anastasia Date: Wed, 3 Feb 2021 21:28:34 +0300 Subject: [PATCH 029/525] Refactoring. Get rid of global variable backup_instance_path --- src/backup.c | 59 ++++++++++++++++++++++++++-------------------- src/catalog.c | 24 ++----------------- src/configure.c | 27 +++++++++------------ src/delete.c | 11 ++++----- src/init.c | 2 +- src/pg_probackup.c | 7 ++++-- src/pg_probackup.h | 14 +++++++---- src/validate.c | 14 ++++------- 8 files changed, 71 insertions(+), 87 deletions(-) diff --git a/src/backup.c b/src/backup.c index 56711b206..baed44fc5 100644 --- a/src/backup.c +++ b/src/backup.c @@ -50,12 +50,12 @@ static void *backup_files(void *arg); static void do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, PGNodeInfo *nodeInfo, bool no_sync, bool backup_logs); -static void pg_start_backup(const char *label, bool smooth, pgBackup *backup, +static void pg_start_backup(InstanceState *instanceState, const char *label, bool smooth, pgBackup *backup, PGNodeInfo *nodeInfo, PGconn *conn); static void pg_switch_wal(PGconn *conn); -static void pg_stop_backup(pgBackup *backup, PGconn *pg_startbackup_conn, PGNodeInfo *nodeInfo); +static void pg_stop_backup(InstanceState *instanceState, pgBackup *backup, PGconn *pg_startbackup_conn, PGNodeInfo *nodeInfo); -static XLogRecPtr wait_wal_lsn(XLogRecPtr lsn, bool is_start_lsn, TimeLineID tli, +static XLogRecPtr wait_wal_lsn(InstanceState *instanceState, XLogRecPtr lsn, bool is_start_lsn, TimeLineID tli, bool in_prev_segment, bool segment_only, int timeout_elevel, bool in_stream_dir); @@ -77,14 +77,15 @@ static void set_cfs_datafiles(parray *files, const char *root, char *relative, s static void backup_stopbackup_callback(bool fatal, void *userdata) { - PGconn *pg_startbackup_conn = (PGconn *) userdata; + InstanceState *instanceState = (InstanceState *) userdata; + PGconn *pg_startbackup_conn = instanceState->conn; /* * If backup is in progress, notify stop of backup to PostgreSQL */ if (backup_in_progress) { elog(WARNING, "backup in progress, stop backup"); - pg_stop_backup(NULL, pg_startbackup_conn, NULL); /* don't care about stop_lsn in case of error */ + pg_stop_backup(instanceState, NULL, pg_startbackup_conn, NULL); /* don't care about stop_lsn in case of error */ } } @@ -139,7 +140,7 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, strlen(" with pg_probackup")); /* Call pg_start_backup function in PostgreSQL connect */ - pg_start_backup(label, smooth_checkpoint, ¤t, nodeInfo, backup_conn); + pg_start_backup(instanceState, label, smooth_checkpoint, ¤t, nodeInfo, backup_conn); /* Obtain current timeline */ #if PG_VERSION_NUM >= 90600 @@ -270,7 +271,7 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, * Because WAL streaming will start after pg_start_backup() in stream * mode. */ - wait_wal_lsn(current.start_lsn, true, current.tli, false, true, ERROR, false); + wait_wal_lsn(instanceState, current.start_lsn, true, current.tli, false, true, ERROR, false); } /* start stream replication */ @@ -527,7 +528,7 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, } /* Notify end of backup */ - pg_stop_backup(¤t, backup_conn, nodeInfo); + pg_stop_backup(instanceState, ¤t, backup_conn, nodeInfo); /* In case of backup from replica >= 9.6 we must fix minRecPoint, * First we must find pg_control in backup_files_list. @@ -1028,7 +1029,7 @@ confirm_block_size(PGconn *conn, const char *name, int blcksz) * Notify start of backup to PostgreSQL server. */ static void -pg_start_backup(const char *label, bool smooth, pgBackup *backup, +pg_start_backup(InstanceState *instanceState, const char *label, bool smooth, pgBackup *backup, PGNodeInfo *nodeInfo, PGconn *conn) { PGresult *res; @@ -1056,7 +1057,8 @@ pg_start_backup(const char *label, bool smooth, pgBackup *backup, * is necessary to call pg_stop_backup() in backup_cleanup(). */ backup_in_progress = true; - pgut_atexit_push(backup_stopbackup_callback, conn); + instanceState->conn = conn; + pgut_atexit_push(backup_stopbackup_callback, instanceState); /* Extract timeline and LSN from results of pg_start_backup() */ XLogDataFromLSN(PQgetvalue(res, 0, 0), &lsn_hi, &lsn_lo); @@ -1262,7 +1264,7 @@ pg_is_superuser(PGconn *conn) * Returns InvalidXLogRecPtr if 'segment_only' flag is used. */ static XLogRecPtr -wait_wal_lsn(XLogRecPtr target_lsn, bool is_start_lsn, TimeLineID tli, +wait_wal_lsn(InstanceState *instanceState, XLogRecPtr target_lsn, bool is_start_lsn, TimeLineID tli, bool in_prev_segment, bool segment_only, int timeout_elevel, bool in_stream_dir) { @@ -1296,8 +1298,9 @@ wait_wal_lsn(XLogRecPtr target_lsn, bool is_start_lsn, TimeLineID tli, */ if (in_stream_dir) { - pgBackupGetPath2(¤t, pg_wal_dir, lengthof(pg_wal_dir), - DATABASE_DIR, PG_XLOG_DIR); + snprintf(pg_wal_dir, lengthof(pg_wal_dir), "%s/%s/%s/%s", + instanceState->instance_backup_subdir_path, base36enc(current.start_time), + DATABASE_DIR, PG_XLOG_DIR); join_path_components(wal_segment_path, pg_wal_dir, wal_segment); wal_segment_dir = pg_wal_dir; } @@ -1438,7 +1441,7 @@ wait_wal_lsn(XLogRecPtr target_lsn, bool is_start_lsn, TimeLineID tli, * Notify end of backup to PostgreSQL server. */ static void -pg_stop_backup(pgBackup *backup, PGconn *pg_startbackup_conn, +pg_stop_backup(InstanceState *instanceState, pgBackup *backup, PGconn *pg_startbackup_conn, PGNodeInfo *nodeInfo) { PGconn *conn; @@ -1566,7 +1569,8 @@ pg_stop_backup(pgBackup *backup, PGconn *pg_startbackup_conn, } /* After we have sent pg_stop_backup, we don't need this callback anymore */ - pgut_atexit_pop(backup_stopbackup_callback, pg_startbackup_conn); + instanceState->conn = pg_startbackup_conn; + pgut_atexit_pop(backup_stopbackup_callback, instanceState); /* * Wait for the result of pg_stop_backup(), but no longer than @@ -1671,9 +1675,10 @@ pg_stop_backup(pgBackup *backup, PGconn *pg_startbackup_conn, if (stream_wal) { - pgBackupGetPath2(backup, stream_xlog_path, - lengthof(stream_xlog_path), - DATABASE_DIR, PG_XLOG_DIR); + snprintf(stream_xlog_path, lengthof(stream_xlog_path), + "%s/%s/%s/%s", instanceState->instance_backup_subdir_path, + base36enc(backup->start_time), + DATABASE_DIR, PG_XLOG_DIR); xlog_path = stream_xlog_path; } else @@ -1702,7 +1707,7 @@ pg_stop_backup(pgBackup *backup, PGconn *pg_startbackup_conn, if (stop_backup_lsn_tmp % instance_config.xlog_seg_size == 0) { /* Wait for segment with current stop_lsn, it is ok for it to never arrive */ - wait_wal_lsn(stop_backup_lsn_tmp, false, backup->tli, + wait_wal_lsn(instanceState, stop_backup_lsn_tmp, false, backup->tli, false, true, WARNING, stream_wal); /* Get the first record in segment with current stop_lsn */ @@ -1730,7 +1735,7 @@ pg_stop_backup(pgBackup *backup, PGconn *pg_startbackup_conn, /* Despite looking for previous record there is not guarantee of success * because previous record can be the contrecord. */ - lsn_tmp = wait_wal_lsn(stop_backup_lsn_tmp, false, backup->tli, + lsn_tmp = wait_wal_lsn(instanceState, stop_backup_lsn_tmp, false, backup->tli, true, false, ERROR, stream_wal); /* sanity */ @@ -1744,7 +1749,7 @@ pg_stop_backup(pgBackup *backup, PGconn *pg_startbackup_conn, else if (stop_backup_lsn_tmp % XLOG_BLCKSZ == 0) { /* Wait for segment with current stop_lsn */ - wait_wal_lsn(stop_backup_lsn_tmp, false, backup->tli, + wait_wal_lsn(instanceState, stop_backup_lsn_tmp, false, backup->tli, false, true, ERROR, stream_wal); /* Get the next closest record in segment with current stop_lsn */ @@ -1776,7 +1781,8 @@ pg_stop_backup(pgBackup *backup, PGconn *pg_startbackup_conn, if (!exclusive_backup) { Assert(PQnfields(res) >= 4); - pgBackupGetPath2(backup, path, lengthof(path), DATABASE_DIR, NULL); + snprintf(path, lengthof(path), "%s/%s/%s", instanceState->instance_backup_subdir_path, + base36enc(backup->start_time), DATABASE_DIR); /* Write backup_label */ join_path_components(backup_label, path, PG_BACKUP_LABEL_FILE); @@ -1873,7 +1879,7 @@ pg_stop_backup(pgBackup *backup, PGconn *pg_startbackup_conn, * look for previous record with endpoint >= STOP_LSN. */ if (!stop_lsn_exists) - stop_backup_lsn = wait_wal_lsn(stop_backup_lsn_tmp, false, backup->tli, + stop_backup_lsn = wait_wal_lsn(instanceState, stop_backup_lsn_tmp, false, backup->tli, false, false, ERROR, stream_wal); if (stream_wal) @@ -1883,9 +1889,10 @@ pg_stop_backup(pgBackup *backup, PGconn *pg_startbackup_conn, if(wait_WAL_streaming_end(backup_files_list)) elog(ERROR, "WAL streaming failed"); - pgBackupGetPath2(backup, stream_xlog_path, - lengthof(stream_xlog_path), - DATABASE_DIR, PG_XLOG_DIR); + snprintf(stream_xlog_path, lengthof(stream_xlog_path), "%s/%s/%s/%s", + instanceState->instance_backup_subdir_path, base36enc(backup->start_time), + DATABASE_DIR, PG_XLOG_DIR); + xlog_path = stream_xlog_path; } else diff --git a/src/catalog.c b/src/catalog.c index b32043a0e..d8b6d109a 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -733,7 +733,8 @@ catalog_get_instance_list(CatalogState *catalogState) catalogState->backup_subdir_path, instanceState->instance_name); join_path_components(instanceState->instance_wal_subdir_path, catalogState->wal_subdir_path, instanceState->instance_name); - + join_path_components(instanceState->instance_config_path, + instanceState->instance_backup_subdir_path, BACKUP_CATALOG_CONF_FILE); parray_append(instances, instanceState); } @@ -2808,27 +2809,6 @@ pgBackupCompareIdDesc(const void *l, const void *r) return -pgBackupCompareId(l, r); } -/* - * Construct absolute path of the backup directory. - * Append "subdir1" and "subdir2" to the backup directory. - */ -void -pgBackupGetPath2(const pgBackup *backup, char *path, size_t len, - const char *subdir1, const char *subdir2) -{ - /* If "subdir1" is NULL do not check "subdir2" */ - if (!subdir1) - snprintf(path, len, "%s/%s", backup_instance_path, - base36enc(backup->start_time)); - else if (!subdir2) - snprintf(path, len, "%s/%s/%s", backup_instance_path, - base36enc(backup->start_time), subdir1); - /* "subdir1" and "subdir2" is not NULL */ - else - snprintf(path, len, "%s/%s/%s/%s", backup_instance_path, - base36enc(backup->start_time), subdir1, subdir2); -} - /* * Check if multiple backups consider target backup to be their direct parent */ diff --git a/src/configure.c b/src/configure.c index 065e01e6e..b5ef8a356 100644 --- a/src/configure.c +++ b/src/configure.c @@ -277,18 +277,16 @@ do_show_config(void) * values into the file. */ void -do_set_config(bool missing_ok) +do_set_config(InstanceState *instanceState, bool missing_ok) { - char path[MAXPGPATH]; char path_temp[MAXPGPATH]; FILE *fp; int i; - join_path_components(path, backup_instance_path, BACKUP_CATALOG_CONF_FILE); - snprintf(path_temp, sizeof(path_temp), "%s.tmp", path); + snprintf(path_temp, sizeof(path_temp), "%s.tmp", instanceState->instance_config_path); - if (!missing_ok && !fileExists(path, FIO_LOCAL_HOST)) - elog(ERROR, "Configuration file \"%s\" doesn't exist", path); + if (!missing_ok && !fileExists(instanceState->instance_config_path, FIO_LOCAL_HOST)) + elog(ERROR, "Configuration file \"%s\" doesn't exist", instanceState->instance_config_path); fp = fopen(path_temp, "wt"); if (fp == NULL) @@ -327,12 +325,12 @@ do_set_config(bool missing_ok) fclose(fp); - if (rename(path_temp, path) < 0) + if (rename(path_temp, instanceState->instance_config_path) < 0) { int errno_temp = errno; unlink(path_temp); elog(ERROR, "Cannot rename configuration file \"%s\" to \"%s\": %s", - path_temp, path, strerror(errno_temp)); + path_temp, instanceState->instance_config_path, strerror(errno_temp)); } } @@ -376,7 +374,6 @@ init_config(InstanceConfig *config, const char *instance_name) InstanceConfig * readInstanceConfigFile(InstanceState *instanceState) { - char path[MAXPGPATH]; InstanceConfig *instance = pgut_new(InstanceConfig); char *log_level_console = NULL; char *log_level_file = NULL; @@ -602,21 +599,19 @@ readInstanceConfigFile(InstanceState *instanceState) instanceState->catalog_state->catalog_path, "wal", instanceState->instance_name); canonicalize_path(instance->arclog_path); - join_path_components(path, instance->backup_instance_path, - BACKUP_CATALOG_CONF_FILE); - - if (fio_access(path, F_OK, FIO_BACKUP_HOST) != 0) + if (fio_access(instanceState->instance_config_path, F_OK, FIO_BACKUP_HOST) != 0) { - elog(WARNING, "Control file \"%s\" doesn't exist", path); + elog(WARNING, "Control file \"%s\" doesn't exist", instanceState->instance_config_path); pfree(instance); return NULL; } - parsed_options = config_read_opt(path, instance_options, WARNING, true, true); + parsed_options = config_read_opt(instanceState->instance_config_path, + instance_options, WARNING, true, true); if (parsed_options == 0) { - elog(WARNING, "Control file \"%s\" is empty", path); + elog(WARNING, "Control file \"%s\" is empty", instanceState->instance_config_path); pfree(instance); return NULL; } diff --git a/src/delete.c b/src/delete.c index ebe09dbe5..e833487a9 100644 --- a/src/delete.c +++ b/src/delete.c @@ -972,8 +972,6 @@ do_delete_instance(InstanceState *instanceState) { parray *backup_list; int i; - char instance_config_path[MAXPGPATH]; - /* Delete all backups. */ backup_list = catalog_get_backup_list(instanceState, INVALID_BACKUP_ID); @@ -994,16 +992,15 @@ do_delete_instance(InstanceState *instanceState) pgut_rmtree(arclog_path, false, true); /* Delete backup instance config file */ - join_path_components(instance_config_path, backup_instance_path, BACKUP_CATALOG_CONF_FILE); - if (remove(instance_config_path)) + if (remove(instanceState->instance_config_path)) { - elog(ERROR, "Can't remove \"%s\": %s", instance_config_path, + elog(ERROR, "Can't remove \"%s\": %s", instanceState->instance_config_path, strerror(errno)); } /* Delete instance root directories */ - if (rmdir(backup_instance_path) != 0) - elog(ERROR, "Can't remove \"%s\": %s", backup_instance_path, + if (rmdir(instanceState->instance_backup_subdir_path) != 0) + elog(ERROR, "Can't remove \"%s\": %s", instanceState->instance_backup_subdir_path, strerror(errno)); if (rmdir(arclog_path) != 0) diff --git a/src/init.c b/src/init.c index 29506749b..dc821325a 100644 --- a/src/init.c +++ b/src/init.c @@ -119,7 +119,7 @@ do_add_instance(InstanceState *instanceState, InstanceConfig *instance) SOURCE_DEFAULT); /* pgdata was set through command line */ - do_set_config(true); + do_set_config(instanceState, true); elog(INFO, "Instance '%s' successfully inited", instanceState->instance_name); return 0; diff --git a/src/pg_probackup.c b/src/pg_probackup.c index 656a778ad..6b297e892 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -68,7 +68,7 @@ static char *backup_path = NULL; * path or to the data files in the backup catalog * $BACKUP_PATH/backups/instance_name */ -char backup_instance_path[MAXPGPATH]; +static char backup_instance_path[MAXPGPATH]; /* * path or to the wal files in the backup catalog * $BACKUP_PATH/wal/instance_name @@ -491,6 +491,9 @@ main(int argc, char *argv[]) catalogState->backup_subdir_path, instanceState->instance_name); join_path_components(instanceState->instance_wal_subdir_path, catalogState->wal_subdir_path, instanceState->instance_name); + join_path_components(instanceState->instance_config_path, + instanceState->instance_backup_subdir_path, BACKUP_CATALOG_CONF_FILE); + } /* ===== instanceState (END) ======*/ @@ -872,7 +875,7 @@ main(int argc, char *argv[]) do_show_config(); break; case SET_CONFIG_CMD: - do_set_config(false); + do_set_config(instanceState, false); break; case SET_BACKUP_CMD: if (!backup_id_string) diff --git a/src/pg_probackup.h b/src/pg_probackup.h index bcfadcb23..2731b4558 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -758,7 +758,6 @@ typedef struct BackupPageHeader2 /* ====== CatalogState ======= */ /* directory options */ -extern char backup_instance_path[MAXPGPATH]; extern char arclog_path[MAXPGPATH]; /* ====== CatalogState (END) ======= */ @@ -801,9 +800,17 @@ typedef struct InstanceState char instance_name[MAXPGPATH]; //previously global var instance_name /* $BACKUP_PATH/backups/instance_name */ char instance_backup_subdir_path[MAXPGPATH]; + + /* $BACKUP_PATH/backups/instance_name/BACKUP_CATALOG_CONF_FILE */ + char instance_config_path[MAXPGPATH]; + /* $BACKUP_PATH/backups/instance_name */ char instance_wal_subdir_path[MAXPGPATH]; // previously global var arclog_path + /* TODO: Make it more specific */ + PGconn *conn; + + //TODO split into some more meaningdul parts InstanceConfig *config; } InstanceState; @@ -880,7 +887,7 @@ extern void do_archive_get(InstanceConfig *instance, const char *prefetch_dir_ar /* in configure.c */ extern void do_show_config(void); -extern void do_set_config(bool missing_ok); +extern void do_set_config(InstanceState *instanceState, bool missing_ok); extern void init_config(InstanceConfig *config, const char *instance_name); extern InstanceConfig *readInstanceConfigFile(InstanceState *instanceState); @@ -959,8 +966,7 @@ extern void pgBackupWriteControl(FILE *out, pgBackup *backup, bool utc); extern void write_backup_filelist(pgBackup *backup, parray *files, const char *root, parray *external_list, bool sync); -extern void pgBackupGetPath2(const pgBackup *backup, char *path, size_t len, - const char *subdir1, const char *subdir2); + extern void pgBackupCreateDir(pgBackup *backup, const char *backup_instance_path); extern void pgNodeInit(PGNodeInfo *node); extern void pgBackupInit(pgBackup *backup); diff --git a/src/validate.c b/src/validate.c index ce434f027..0a5397550 100644 --- a/src/validate.c +++ b/src/validate.c @@ -403,7 +403,6 @@ do_validate_all(CatalogState *catalogState, InstanceState *instanceState) errno = 0; while ((dent = readdir(dir))) { - char conf_path[MAXPGPATH]; char child[MAXPGPATH]; struct stat st; InstanceState *instanceState; @@ -432,19 +431,16 @@ do_validate_all(CatalogState *catalogState, InstanceState *instanceState) catalogState->backup_subdir_path, instanceState->instance_name); join_path_components(instanceState->instance_wal_subdir_path, catalogState->wal_subdir_path, instanceState->instance_name); - + join_path_components(instanceState->instance_config_path, + instanceState->instance_backup_subdir_path, BACKUP_CATALOG_CONF_FILE); #ifdef REFACTORE_ME - sprintf(backup_instance_path, "%s/%s/%s", - catalogState->catalog_path, BACKUPS_DIR, instanceState->instance_name); - sprintf(arclog_path, "%s/%s/%s", catalogState->catalog_path, "wal", instanceState->instance_name); #endif - join_path_components(conf_path, backup_instance_path, - BACKUP_CATALOG_CONF_FILE); - if (config_read_opt(conf_path, instance_options, ERROR, false, + + if (config_read_opt(instanceState->instance_config_path, instance_options, ERROR, false, true) == 0) { - elog(WARNING, "Configuration file \"%s\" is empty", conf_path); + elog(WARNING, "Configuration file \"%s\" is empty", instanceState->instance_config_path); corrupted_backup_found = true; continue; } From d9b3fb22d60c2f7b5033e5b4a3fced60e04a87d3 Mon Sep 17 00:00:00 2001 From: anastasia Date: Wed, 3 Feb 2021 21:44:32 +0300 Subject: [PATCH 030/525] Refactoring. Get rid of global variable archlog_path --- src/backup.c | 15 ++++++++------- src/delete.c | 6 +++--- src/pg_probackup.c | 6 ------ src/pg_probackup.h | 7 ------- src/restore.c | 8 +++++--- src/validate.c | 7 ++----- 6 files changed, 18 insertions(+), 31 deletions(-) diff --git a/src/backup.c b/src/backup.c index baed44fc5..3c8c0c93a 100644 --- a/src/backup.c +++ b/src/backup.c @@ -262,9 +262,9 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, if (current.backup_mode == BACKUP_MODE_DIFF_PAGE || !stream_wal) { /* Check that archive_dir can be reached */ - if (fio_access(arclog_path, F_OK, FIO_BACKUP_HOST) != 0) + if (fio_access(instanceState->instance_wal_subdir_path, F_OK, FIO_BACKUP_HOST) != 0) elog(ERROR, "WAL archive directory is not accessible \"%s\": %s", - arclog_path, strerror(errno)); + instanceState->instance_wal_subdir_path, strerror(errno)); /* * Do not wait start_lsn for stream backup. @@ -391,7 +391,8 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, * reading WAL segments present in archives up to the point * where this backup has started. */ - pagemap_isok = extractPageMap(arclog_path, instance_config.xlog_seg_size, + pagemap_isok = extractPageMap(instanceState->instance_wal_subdir_path, + instance_config.xlog_seg_size, prev_backup->start_lsn, prev_backup->tli, current.start_lsn, current.tli, tli_list); } @@ -1306,8 +1307,8 @@ wait_wal_lsn(InstanceState *instanceState, XLogRecPtr target_lsn, bool is_start_ } else { - join_path_components(wal_segment_path, arclog_path, wal_segment); - wal_segment_dir = arclog_path; + join_path_components(wal_segment_path, instanceState->instance_wal_subdir_path, wal_segment); + wal_segment_dir = instanceState->instance_wal_subdir_path; } /* TODO: remove this in 3.0 (it is a cludge against some old bug with archive_timeout) */ @@ -1682,7 +1683,7 @@ pg_stop_backup(InstanceState *instanceState, pgBackup *backup, PGconn *pg_startb xlog_path = stream_xlog_path; } else - xlog_path = arclog_path; + xlog_path = instanceState->instance_wal_subdir_path; GetXLogSegNo(stop_backup_lsn_tmp, segno, instance_config.xlog_seg_size); @@ -1896,7 +1897,7 @@ pg_stop_backup(InstanceState *instanceState, pgBackup *backup, PGconn *pg_startb xlog_path = stream_xlog_path; } else - xlog_path = arclog_path; + xlog_path = instanceState->instance_wal_subdir_path; backup->stop_lsn = stop_backup_lsn; backup->recovery_xid = recovery_xid; diff --git a/src/delete.c b/src/delete.c index e833487a9..e66ce0cd4 100644 --- a/src/delete.c +++ b/src/delete.c @@ -989,7 +989,7 @@ do_delete_instance(InstanceState *instanceState) parray_free(backup_list); /* Delete all wal files. */ - pgut_rmtree(arclog_path, false, true); + pgut_rmtree(instanceState->instance_wal_subdir_path, false, true); /* Delete backup instance config file */ if (remove(instanceState->instance_config_path)) @@ -1003,8 +1003,8 @@ do_delete_instance(InstanceState *instanceState) elog(ERROR, "Can't remove \"%s\": %s", instanceState->instance_backup_subdir_path, strerror(errno)); - if (rmdir(arclog_path) != 0) - elog(ERROR, "Can't remove \"%s\": %s", arclog_path, + if (rmdir(instanceState->instance_wal_subdir_path) != 0) + elog(ERROR, "Can't remove \"%s\": %s", instanceState->instance_wal_subdir_path, strerror(errno)); elog(INFO, "Instance '%s' successfully deleted", instanceState->instance_name); diff --git a/src/pg_probackup.c b/src/pg_probackup.c index 6b297e892..453aab458 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -69,11 +69,6 @@ static char *backup_path = NULL; * $BACKUP_PATH/backups/instance_name */ static char backup_instance_path[MAXPGPATH]; -/* - * path or to the wal files in the backup catalog - * $BACKUP_PATH/wal/instance_name - */ -char arclog_path[MAXPGPATH] = ""; static CatalogState *catalogState = NULL; /* ================ catalogState (END) =========== */ @@ -510,7 +505,6 @@ main(int argc, char *argv[]) */ sprintf(backup_instance_path, "%s/%s/%s", backup_path, BACKUPS_DIR, instance_name); - sprintf(arclog_path, "%s/%s/%s", backup_path, "wal", instance_name); /* * Fill InstanceConfig structure fields used to generate pathes inside diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 2731b4558..d4dc90281 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -755,13 +755,6 @@ typedef struct BackupPageHeader2 #define IsSshProtocol() (instance_config.remote.host && strcmp(instance_config.remote.proto, "ssh") == 0) -/* ====== CatalogState ======= */ - -/* directory options */ -extern char arclog_path[MAXPGPATH]; - -/* ====== CatalogState (END) ======= */ - /* common options */ extern pid_t my_pid; extern __thread int my_thread_num; diff --git a/src/restore.c b/src/restore.c index 398726772..481aee2b2 100644 --- a/src/restore.c +++ b/src/restore.c @@ -287,7 +287,8 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg // elog(LOG, "target timeline ID = %u", rt->target_tli); /* Read timeline history files from archives */ - timelines = read_timeline_history(arclog_path, rt->target_tli, true); + timelines = read_timeline_history(instanceState->instance_wal_subdir_path, + rt->target_tli, true); if (!satisfy_timeline(timelines, current_backup)) { @@ -489,7 +490,8 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg elog(ERROR, "Incremental restore in 'lsn' mode require " "data_checksums to be enabled in destination data directory"); - timelines = read_timeline_history(arclog_path, redo.tli, false); + timelines = read_timeline_history(instanceState->instance_wal_subdir_path, + redo.tli, false); if (!timelines) elog(WARNING, "Failed to get history for redo timeline %i, " @@ -604,7 +606,7 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg * We pass base_full_backup timeline as last argument to this function, * because it's needed to form the name of xlog file. */ - validate_wal(dest_backup, arclog_path, rt->target_time, + validate_wal(dest_backup, instanceState->instance_wal_subdir_path, rt->target_time, rt->target_xid, rt->target_lsn, dest_backup->tli, instance_config.xlog_seg_size); } diff --git a/src/validate.c b/src/validate.c index 0a5397550..1c013b92c 100644 --- a/src/validate.c +++ b/src/validate.c @@ -433,9 +433,6 @@ do_validate_all(CatalogState *catalogState, InstanceState *instanceState) catalogState->wal_subdir_path, instanceState->instance_name); join_path_components(instanceState->instance_config_path, instanceState->instance_backup_subdir_path, BACKUP_CATALOG_CONF_FILE); -#ifdef REFACTORE_ME - sprintf(arclog_path, "%s/%s/%s", catalogState->catalog_path, "wal", instanceState->instance_name); -#endif if (config_read_opt(instanceState->instance_config_path, instance_options, ERROR, false, true) == 0) @@ -587,7 +584,7 @@ do_validate_instance(InstanceState *instanceState) /* Validate corresponding WAL files */ if (current_backup->status == BACKUP_STATUS_OK) - validate_wal(current_backup, arclog_path, 0, + validate_wal(current_backup, instanceState->instance_wal_subdir_path, 0, 0, 0, current_backup->tli, instance_config.xlog_seg_size); @@ -684,7 +681,7 @@ do_validate_instance(InstanceState *instanceState) { /* Revalidation successful, validate corresponding WAL files */ - validate_wal(backup, arclog_path, 0, + validate_wal(backup, instanceState->instance_wal_subdir_path, 0, 0, 0, backup->tli, instance_config.xlog_seg_size); } From 6c52147c478dbb2e9a663482051e0a19d0476645 Mon Sep 17 00:00:00 2001 From: anastasia Date: Wed, 3 Feb 2021 21:48:53 +0300 Subject: [PATCH 031/525] Refactoring. Get rid of global variable backup_instance_path (2) --- src/pg_probackup.c | 26 +++++--------------------- 1 file changed, 5 insertions(+), 21 deletions(-) diff --git a/src/pg_probackup.c b/src/pg_probackup.c index 453aab458..38f78f931 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -64,11 +64,6 @@ const char *PROGRAM_EMAIL = "https://github.com/postgrespro/pg_probackup/issues /* directory options */ /* TODO make it local variable, pass as an argument to all commands that need it. */ static char *backup_path = NULL; -/* - * path or to the data files in the backup catalog - * $BACKUP_PATH/backups/instance_name - */ -static char backup_instance_path[MAXPGPATH]; static CatalogState *catalogState = NULL; /* ================ catalogState (END) =========== */ @@ -498,14 +493,6 @@ main(int argc, char *argv[]) */ if ((backup_path != NULL) && instance_name) { - /* - * Fill global variables used to generate pathes inside the instance's - * backup catalog. - * TODO replace global variables with InstanceConfig structure fields - */ - sprintf(backup_instance_path, "%s/%s/%s", - backup_path, BACKUPS_DIR, instance_name); - /* * Fill InstanceConfig structure fields used to generate pathes inside * the instance's backup catalog. @@ -530,10 +517,11 @@ main(int argc, char *argv[]) { struct stat st; - if (fio_stat(backup_instance_path, &st, true, FIO_BACKUP_HOST) != 0) + if (fio_stat(instanceState->instance_backup_subdir_path, + &st, true, FIO_BACKUP_HOST) != 0) { elog(WARNING, "Failed to access directory \"%s\": %s", - backup_instance_path, strerror(errno)); + instanceState->instance_backup_subdir_path, strerror(errno)); // TODO: redundant message, should we get rid of it? elog(ERROR, "Instance '%s' does not exist in this backup catalog", @@ -555,7 +543,6 @@ main(int argc, char *argv[]) */ if (instance_name) { - char path[MAXPGPATH]; /* Read environment variables */ config_get_opt_env(instance_options); @@ -563,13 +550,10 @@ main(int argc, char *argv[]) if (backup_subcmd != ADD_INSTANCE_CMD && backup_subcmd != ARCHIVE_GET_CMD) { - join_path_components(path, backup_instance_path, - BACKUP_CATALOG_CONF_FILE); - if (backup_subcmd == CHECKDB_CMD) - config_read_opt(path, instance_options, ERROR, true, true); + config_read_opt(instanceState->instance_config_path, instance_options, ERROR, true, true); else - config_read_opt(path, instance_options, ERROR, true, false); + config_read_opt(instanceState->instance_config_path, instance_options, ERROR, true, false); /* * We can determine our location only after reading the configuration file, From f89793158bca9f8ce9239e844029f1cfc86e36d0 Mon Sep 17 00:00:00 2001 From: anastasia Date: Wed, 3 Feb 2021 22:10:17 +0300 Subject: [PATCH 032/525] Refactoring. Remove redundand fields from InstanceConfig --- src/archive.c | 18 +++++++++--------- src/configure.c | 12 ++++++------ src/delete.c | 18 +++++++++--------- src/pg_probackup.c | 20 ++------------------ src/pg_probackup.h | 8 ++------ src/show.c | 4 ++-- 6 files changed, 30 insertions(+), 50 deletions(-) diff --git a/src/archive.c b/src/archive.c index 28622cc57..ef87910f8 100644 --- a/src/archive.c +++ b/src/archive.c @@ -113,7 +113,7 @@ static parray *setup_push_filelist(const char *archive_status_dir, * Where archlog_path is $BACKUP_PATH/wal/instance_name */ void -do_archive_push(InstanceConfig *instance, char *wal_file_path, +do_archive_push(InstanceState *instanceState, InstanceConfig *instance, char *wal_file_path, char *wal_file_name, int batch_size, bool overwrite, bool no_sync, bool no_ready_rename) { @@ -156,7 +156,7 @@ do_archive_push(InstanceConfig *instance, char *wal_file_path, if (system_id != instance->system_identifier) elog(ERROR, "Refuse to push WAL segment %s into archive. Instance parameters mismatch." "Instance '%s' should have SYSTEM_ID = " UINT64_FORMAT " instead of " UINT64_FORMAT, - wal_file_name, instance->name, instance->system_identifier, system_id); + wal_file_name, instanceState->instance_name, instance->system_identifier, system_id); if (instance->compress_alg == PGLZ_COMPRESS) elog(ERROR, "Cannot use pglz for WAL compression"); @@ -165,7 +165,7 @@ do_archive_push(InstanceConfig *instance, char *wal_file_path, join_path_components(archive_status_dir, pg_xlog_dir, "archive_status"); /* Create 'archlog_path' directory. Do nothing if it already exists. */ - //fio_mkdir(instance->arclog_path, DIR_PERMISSION, FIO_BACKUP_HOST); + //fio_mkdir(instanceState->instance_wal_subdir_path, DIR_PERMISSION, FIO_BACKUP_HOST); #ifdef HAVE_LIBZ if (instance->compress_alg == ZLIB_COMPRESS) @@ -206,7 +206,7 @@ do_archive_push(InstanceConfig *instance, char *wal_file_path, WALSegno *xlogfile = (WALSegno *) parray_get(batch_files, i); rc = push_file(xlogfile, archive_status_dir, - pg_xlog_dir, instance->arclog_path, + pg_xlog_dir, instanceState->instance_wal_subdir_path, overwrite, no_sync, instance->archive_timeout, no_ready_rename || (strcmp(xlogfile->name, wal_file_name) == 0) ? true : false, @@ -231,7 +231,7 @@ do_archive_push(InstanceConfig *instance, char *wal_file_path, archive_push_arg *arg = &(threads_args[i]); arg->first_filename = wal_file_name; - arg->archive_dir = instance->arclog_path; + arg->archive_dir = instanceState->instance_wal_subdir_path; arg->pg_xlog_dir = pg_xlog_dir; arg->archive_status_dir = archive_status_dir; arg->overwrite = overwrite; @@ -1008,7 +1008,7 @@ setup_push_filelist(const char *archive_status_dir, const char *first_file, */ void -do_archive_get(InstanceConfig *instance, const char *prefetch_dir_arg, +do_archive_get(InstanceState *instanceState, InstanceConfig *instance, const char *prefetch_dir_arg, char *wal_file_path, char *wal_file_name, int batch_size, bool validate_wal) { @@ -1047,7 +1047,7 @@ do_archive_get(InstanceConfig *instance, const char *prefetch_dir_arg, /* full filepath to WAL file in archive directory. * $BACKUP_PATH/wal/instance_name/000000010000000000000001 */ - join_path_components(backup_wal_file_path, instance->arclog_path, wal_file_name); + join_path_components(backup_wal_file_path, instanceState->instance_wal_subdir_path, wal_file_name); INSTR_TIME_SET_CURRENT(start_time); if (num_threads > batch_size) @@ -1098,7 +1098,7 @@ do_archive_get(InstanceConfig *instance, const char *prefetch_dir_arg, * copy requested file directly from archive. */ if (!next_wal_segment_exists(tli, segno, prefetch_dir, instance->xlog_seg_size)) - n_fetched = run_wal_prefetch(prefetch_dir, instance->arclog_path, + n_fetched = run_wal_prefetch(prefetch_dir, instanceState->instance_wal_subdir_path, tli, segno, num_threads, false, batch_size, instance->xlog_seg_size); @@ -1137,7 +1137,7 @@ do_archive_get(InstanceConfig *instance, const char *prefetch_dir_arg, // rmtree(prefetch_dir, false); /* prefetch files */ - n_fetched = run_wal_prefetch(prefetch_dir, instance->arclog_path, + n_fetched = run_wal_prefetch(prefetch_dir, instanceState->instance_wal_subdir_path, tli, segno, num_threads, true, batch_size, instance->xlog_seg_size); diff --git a/src/configure.c b/src/configure.c index b5ef8a356..21e6b6662 100644 --- a/src/configure.c +++ b/src/configure.c @@ -339,8 +339,6 @@ init_config(InstanceConfig *config, const char *instance_name) { MemSet(config, 0, sizeof(InstanceConfig)); - config->name = pgut_strdup(instance_name); - /* * Starting from PostgreSQL 11 WAL segment size may vary. Prior to * PostgreSQL 10 xlog_seg_size is equal to XLOG_SEG_SIZE. @@ -591,13 +589,15 @@ readInstanceConfigFile(InstanceState *instanceState) init_config(instance, instanceState->instance_name); - sprintf(instance->backup_instance_path, "%s/%s/%s", +#ifdef REFACTORE_ME + sprintf(instanceState->instance_backup_subdir_path, "%s/%s/%s", instanceState->catalog_state->catalog_path, BACKUPS_DIR, instanceState->instance_name); - canonicalize_path(instance->backup_instance_path); + canonicalize_path(instanceState->instance_backup_subdir_path); - sprintf(instance->arclog_path, "%s/%s/%s", + sprintf(instanceState->instance_wal_subdir_path, "%s/%s/%s", instanceState->catalog_state->catalog_path, "wal", instanceState->instance_name); - canonicalize_path(instance->arclog_path); + canonicalize_path(instanceState->instance_wal_subdir_path); +#endif if (fio_access(instanceState->instance_config_path, F_OK, FIO_BACKUP_HOST) != 0) { diff --git a/src/delete.c b/src/delete.c index e66ce0cd4..3eb1a8f61 100644 --- a/src/delete.c +++ b/src/delete.c @@ -14,7 +14,7 @@ #include #include -static void delete_walfiles_in_tli(XLogRecPtr keep_lsn, timelineInfo *tli, +static void delete_walfiles_in_tli(InstanceState *instanceState, XLogRecPtr keep_lsn, timelineInfo *tli, uint32 xlog_seg_size, bool dry_run); static void do_retention_internal(parray *backup_list, parray *to_keep_list, parray *to_purge_list); @@ -698,22 +698,22 @@ do_retention_wal(InstanceState *instanceState, bool dry_run) { if (instance_config.wal_depth >= 0 && !(XLogRecPtrIsInvalid(tlinfo->anchor_lsn))) { - delete_walfiles_in_tli(tlinfo->anchor_lsn, + delete_walfiles_in_tli(instanceState, tlinfo->anchor_lsn, tlinfo, instance_config.xlog_seg_size, dry_run); } else { - delete_walfiles_in_tli(tlinfo->oldest_backup->start_lsn, + delete_walfiles_in_tli(instanceState, tlinfo->oldest_backup->start_lsn, tlinfo, instance_config.xlog_seg_size, dry_run); } } else { if (instance_config.wal_depth >= 0 && !(XLogRecPtrIsInvalid(tlinfo->anchor_lsn))) - delete_walfiles_in_tli(tlinfo->anchor_lsn, + delete_walfiles_in_tli(instanceState, tlinfo->anchor_lsn, tlinfo, instance_config.xlog_seg_size, dry_run); else - delete_walfiles_in_tli(InvalidXLogRecPtr, + delete_walfiles_in_tli(instanceState, InvalidXLogRecPtr, tlinfo, instance_config.xlog_seg_size, dry_run); } } @@ -806,7 +806,7 @@ delete_backup_files(pgBackup *backup) * Q: Maybe we should stop treating partial WAL segments as second-class citizens? */ static void -delete_walfiles_in_tli(XLogRecPtr keep_lsn, timelineInfo *tlinfo, +delete_walfiles_in_tli(InstanceState *instanceState, XLogRecPtr keep_lsn, timelineInfo *tlinfo, uint32 xlog_seg_size, bool dry_run) { XLogSegNo FirstToDeleteSegNo; @@ -931,7 +931,7 @@ delete_walfiles_in_tli(XLogRecPtr keep_lsn, timelineInfo *tlinfo, { char wal_fullpath[MAXPGPATH]; - join_path_components(wal_fullpath, instance_config.arclog_path, wal_file->file.name); + join_path_components(wal_fullpath, instanceState->instance_wal_subdir_path, wal_file->file.name); /* save segment from purging */ if (instance_config.wal_depth >= 0 && wal_file->keep) @@ -1099,12 +1099,12 @@ do_delete_status(InstanceState *instanceState, InstanceConfig *instance_config, if (!dry_run && n_deleted > 0) elog(INFO, "Successfully deleted %i %s from instance '%s'", n_deleted, n_deleted == 1 ? "backup" : "backups", - instance_config->name); + instanceState->instance_name); if (n_found == 0) elog(WARNING, "Instance '%s' has no backups with status '%s'", - instance_config->name, pretty_status); + instanceState->instance_name, pretty_status); // we don`t do WAL purge here, because it is impossible to correctly handle // dry-run case. diff --git a/src/pg_probackup.c b/src/pg_probackup.c index 38f78f931..046d3989b 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -470,9 +470,6 @@ main(int argc, char *argv[]) } else { - /* Set instance name */ - instance_config.name = pgut_strdup(instance_name); - instanceState = pgut_new(InstanceState); instanceState->catalog_state = catalogState; @@ -493,19 +490,6 @@ main(int argc, char *argv[]) */ if ((backup_path != NULL) && instance_name) { - /* - * Fill InstanceConfig structure fields used to generate pathes inside - * the instance's backup catalog. - * TODO continue refactoring to use these fields instead of global vars - */ - sprintf(instance_config.backup_instance_path, "%s/%s/%s", - backup_path, BACKUPS_DIR, instance_name); - canonicalize_path(instance_config.backup_instance_path); - - sprintf(instance_config.arclog_path, "%s/%s/%s", - backup_path, "wal", instance_name); - canonicalize_path(instance_config.arclog_path); - /* * Ensure that requested backup instance exists. * for all commands except init, which doesn't take this parameter, @@ -780,11 +764,11 @@ main(int argc, char *argv[]) switch (backup_subcmd) { case ARCHIVE_PUSH_CMD: - do_archive_push(&instance_config, wal_file_path, wal_file_name, + do_archive_push(instanceState, &instance_config, wal_file_path, wal_file_name, batch_size, file_overwrite, no_sync, no_ready_rename); break; case ARCHIVE_GET_CMD: - do_archive_get(&instance_config, prefetch_dir, + do_archive_get(instanceState, &instance_config, prefetch_dir, wal_file_path, wal_file_name, batch_size, !no_validate_wal); break; case ADD_INSTANCE_CMD: diff --git a/src/pg_probackup.h b/src/pg_probackup.h index d4dc90281..e2a07d795 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -362,10 +362,6 @@ typedef struct ArchiveOptions */ typedef struct InstanceConfig { - char *name; - char arclog_path[MAXPGPATH]; - char backup_instance_path[MAXPGPATH]; - uint64 system_identifier; uint32 xlog_seg_size; @@ -872,10 +868,10 @@ extern int do_init(CatalogState *catalogState); extern int do_add_instance(InstanceState *instanceState, InstanceConfig *instance); /* in archive.c */ -extern void do_archive_push(InstanceConfig *instance, char *wal_file_path, +extern void do_archive_push(InstanceState *instanceState, InstanceConfig *instance, char *wal_file_path, char *wal_file_name, int batch_size, bool overwrite, bool no_sync, bool no_ready_rename); -extern void do_archive_get(InstanceConfig *instance, const char *prefetch_dir_arg, char *wal_file_path, +extern void do_archive_get(InstanceState *instanceState, InstanceConfig *instance, const char *prefetch_dir_arg, char *wal_file_path, char *wal_file_name, int batch_size, bool validate_wal); /* in configure.c */ diff --git a/src/show.c b/src/show.c index ed4f43ef3..dc953a3f6 100644 --- a/src/show.c +++ b/src/show.c @@ -781,9 +781,9 @@ show_instance_archive(InstanceState *instanceState, InstanceConfig *instance) timelineinfos = catalog_get_timelines(instanceState, instance); if (show_format == SHOW_PLAIN) - show_archive_plain(instance->name, instance->xlog_seg_size, timelineinfos, true); + show_archive_plain(instanceState->instance_name, instance->xlog_seg_size, timelineinfos, true); else if (show_format == SHOW_JSON) - show_archive_json(instance->name, instance->xlog_seg_size, timelineinfos); + show_archive_json(instanceState->instance_name, instance->xlog_seg_size, timelineinfos); else elog(ERROR, "Invalid show format %d", (int) show_format); } From 736b9c2a0c1abf209d54e556266c08d3e5422d07 Mon Sep 17 00:00:00 2001 From: anastasia Date: Thu, 4 Feb 2021 13:34:53 +0300 Subject: [PATCH 033/525] Refactoring. Fixes of previous changes --- src/catalog.c | 3 ++- src/configure.c | 11 ----------- src/pg_probackup.h | 2 +- 3 files changed, 3 insertions(+), 13 deletions(-) diff --git a/src/catalog.c b/src/catalog.c index d8b6d109a..909a1af29 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -726,7 +726,6 @@ catalog_get_instance_list(CatalogState *catalogState) continue; instanceState = pgut_new(InstanceState); - instanceState->config = readInstanceConfigFile(instanceState); strncpy(instanceState->instance_name, dent->d_name, MAXPGPATH); join_path_components(instanceState->instance_backup_subdir_path, @@ -735,6 +734,8 @@ catalog_get_instance_list(CatalogState *catalogState) catalogState->wal_subdir_path, instanceState->instance_name); join_path_components(instanceState->instance_config_path, instanceState->instance_backup_subdir_path, BACKUP_CATALOG_CONF_FILE); + + instanceState->config = readInstanceConfigFile(instanceState); parray_append(instances, instanceState); } diff --git a/src/configure.c b/src/configure.c index 21e6b6662..53ac8ef78 100644 --- a/src/configure.c +++ b/src/configure.c @@ -589,16 +589,6 @@ readInstanceConfigFile(InstanceState *instanceState) init_config(instance, instanceState->instance_name); -#ifdef REFACTORE_ME - sprintf(instanceState->instance_backup_subdir_path, "%s/%s/%s", - instanceState->catalog_state->catalog_path, BACKUPS_DIR, instanceState->instance_name); - canonicalize_path(instanceState->instance_backup_subdir_path); - - sprintf(instanceState->instance_wal_subdir_path, "%s/%s/%s", - instanceState->catalog_state->catalog_path, "wal", instanceState->instance_name); - canonicalize_path(instanceState->instance_wal_subdir_path); -#endif - if (fio_access(instanceState->instance_config_path, F_OK, FIO_BACKUP_HOST) != 0) { elog(WARNING, "Control file \"%s\" doesn't exist", instanceState->instance_config_path); @@ -632,7 +622,6 @@ readInstanceConfigFile(InstanceState *instanceState) #endif return instance; - } static void diff --git a/src/pg_probackup.h b/src/pg_probackup.h index e2a07d795..13f1c0724 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -371,7 +371,7 @@ typedef struct InstanceConfig ConnectionOptions conn_opt; ConnectionOptions master_conn_opt; - uint32 replica_timeout; + uint32 replica_timeout; //Deprecated. Not used anywhere /* Wait timeout for WAL segment archiving */ uint32 archive_timeout; From 8041e666cf1eb375ce7853ccccace048d2b9776c Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Thu, 4 Feb 2021 22:05:10 +0300 Subject: [PATCH 034/525] tests: fix tests.auth_test.SimpleAuthTest.test_backup_via_unprivileged_user --- tests/auth_test.py | 58 ++++++++++++---------------------------------- 1 file changed, 15 insertions(+), 43 deletions(-) diff --git a/tests/auth_test.py b/tests/auth_test.py index eca62316b..c84fdb981 100644 --- a/tests/auth_test.py +++ b/tests/auth_test.py @@ -34,10 +34,8 @@ def test_backup_via_unprivileged_user(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'max_wal_senders': '2'} - ) + initdb_params=['--data-checksums']) + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -64,7 +62,15 @@ def test_backup_via_unprivileged_user(self): "GRANT EXECUTE ON FUNCTION" " pg_start_backup(text, boolean, boolean) TO backup;") - time.sleep(1) + if self.get_version(node) < 100000: + node.safe_psql( + 'postgres', + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup") + else: + node.safe_psql( + 'postgres', + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup") + try: self.backup_node( backup_dir, 'node', node, options=['-U', 'backup']) @@ -84,8 +90,6 @@ def test_backup_via_unprivileged_user(self): "GRANT EXECUTE ON FUNCTION" " pg_create_restore_point(text) TO backup;") - time.sleep(1) - try: self.backup_node( backup_dir, 'node', node, options=['-U', 'backup']) @@ -129,50 +133,18 @@ def test_backup_via_unprivileged_user(self): node.stop() node.slow_start() - try: - self.backup_node( - backup_dir, 'node', node, options=['-U', 'backup']) - self.assertEqual( - 1, 0, - "Expecting Error due to missing grant on clearing ptrack_files.") - except ProbackupException as e: - self.assertIn( - "ERROR: must be superuser or replication role to clear ptrack files\n" - "query was: SELECT pg_catalog.pg_ptrack_clear()", e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - time.sleep(1) - - try: - self.backup_node( - backup_dir, 'node', node, - backup_type='ptrack', options=['-U', 'backup']) - self.assertEqual( - 1, 0, - "Expecting Error due to missing grant on clearing ptrack_files.") - except ProbackupException as e: - self.assertIn( - "ERROR: must be superuser or replication role read ptrack files\n" - "query was: select pg_catalog.pg_ptrack_control_lsn()", e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - node.safe_psql( "postgres", "ALTER ROLE backup REPLICATION") - time.sleep(1) - # FULL self.backup_node( - backup_dir, 'node', node, - options=['-U', 'backup']) + backup_dir, 'node', node, options=['-U', 'backup']) # PTRACK - self.backup_node( - backup_dir, 'node', node, - backup_type='ptrack', options=['-U', 'backup']) +# self.backup_node( +# backup_dir, 'node', node, +# backup_type='ptrack', options=['-U', 'backup']) # Clean after yourself self.del_test_dir(module_name, fname) From 46c14304d758d713f7204e9e68c996c9f6457af0 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Mon, 8 Feb 2021 06:00:51 +0300 Subject: [PATCH 035/525] [Issue #320] incorrect crc calculation for pg_filenode.map --- tests/incr_restore.py | 110 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 110 insertions(+) diff --git a/tests/incr_restore.py b/tests/incr_restore.py index 3aa84121f..6246ae197 100644 --- a/tests/incr_restore.py +++ b/tests/incr_restore.py @@ -10,6 +10,7 @@ import shutil import json from testgres import QueryException +from distutils.dir_util import copy_tree module_name = 'incr_restore' @@ -2390,5 +2391,114 @@ def test_incremental_partial_restore_exclude_tablespace_checksum(self): # Clean after yourself self.del_test_dir(module_name, fname, [node2]) + def test_incremental_pg_filenode_map(self): + """""" + fname = self.id().split('.')[3] + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node'), + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node1 = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node1'), + initdb_params=['--data-checksums']) + node1.cleanup() + + node.pgbench_init(scale=5) + + # FULL backup + backup_id = self.backup_node(backup_dir, 'node', node) + + # in node1 restore full backup + self.restore_node(backup_dir, 'node', node1) + self.set_auto_conf(node1, {'port': node1.port}) + node1.slow_start() + + pgbench = node.pgbench( + stdout=subprocess.PIPE, stderr=subprocess.STDOUT, + options=['-T', '10', '-c', '1']) + + pgbench = node1.pgbench( + stdout=subprocess.PIPE, stderr=subprocess.STDOUT, + options=['-T', '10', '-c', '1']) + + node.safe_psql( + 'postgres', + 'reindex index pg_type_oid_index') + + # FULL backup + backup_id = self.backup_node(backup_dir, 'node', node) + + node1.stop() + + + # incremental restore into node1 + self.restore_node(backup_dir, 'node', node1, options=["-I", "checksum", '--log-level-file=VERBOSE']) + + self.set_auto_conf(node1, {'port': node1.port}) + node1.slow_start() + + node1.safe_psql( + 'postgres', + 'select 1') + + # Clean after yourself + self.del_test_dir(module_name, fname) + + + def test_incr_backup_filenode_map(self): + """""" + fname = self.id().split('.')[3] + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node'), + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node1 = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node1'), + initdb_params=['--data-checksums']) + node1.cleanup() + + node.pgbench_init(scale=5) + + # FULL backup + backup_id = self.backup_node(backup_dir, 'node', node) + + pgbench = node.pgbench( + stdout=subprocess.PIPE, stderr=subprocess.STDOUT, + options=['-T', '10', '-c', '1']) + + backup_id = self.backup_node(backup_dir, 'node', node, backup_type='delta') + + node.safe_psql( + 'postgres', + 'reindex index pg_type_oid_index') + + backup_id = self.backup_node( + backup_dir, 'node', node, backup_type='delta', options=['--log-level-file=VERBOSE']) + + # incremental restore into node1 + node.cleanup() + + self.restore_node(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + 'postgres', + 'select 1') + + # Clean after yourself + self.del_test_dir(module_name, fname) + # check that MinRecPoint and BackupStartLsn are correctly used in case of --incrementa-lsn # incremental restore + partial restore. From 17cc612089bd200972a4ec735379e18bc4533a53 Mon Sep 17 00:00:00 2001 From: anastasia Date: Mon, 8 Feb 2021 15:04:51 +0300 Subject: [PATCH 036/525] tests: remove debug messages in module 'archive' --- tests/archive.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/archive.py b/tests/archive.py index 482a2502b..05675065a 100644 --- a/tests/archive.py +++ b/tests/archive.py @@ -1908,7 +1908,7 @@ def test_archive_push_sanity(self): output = self.show_archive( backup_dir, 'node', as_json=False, as_text=True, - options=['--log-level-console=VERBOSE']) + options=['--log-level-console=INFO']) self.assertNotIn('WARNING', output) @@ -2181,7 +2181,7 @@ def test_archive_get_batching_sanity(self): restore_command += ' -j 2 --batch-size=10' - print(restore_command) + # print(restore_command) if node.major_version >= 12: self.set_auto_conf(replica, {'restore_command': restore_command}) @@ -2298,7 +2298,7 @@ def test_archive_get_prefetch_corruption(self): dst_file = os.path.join(replica.data_dir, wal_dir, 'pbk_prefetch', filename) shutil.copyfile(src_file, dst_file) - print(dst_file) + # print(dst_file) # corrupt file if files[-2].endswith('.gz'): From b2ab1dfef8307cbce8ebd5fa9bcd7d031a99d74c Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Mon, 8 Feb 2021 18:31:00 +0300 Subject: [PATCH 037/525] [Issue #320] always backup and always restore pg_filenode.map --- src/data.c | 5 +++-- src/pg_probackup.h | 3 ++- src/restore.c | 5 +++++ 3 files changed, 10 insertions(+), 3 deletions(-) diff --git a/src/data.c b/src/data.c index d3f67f43c..f7032fb55 100644 --- a/src/data.c +++ b/src/data.c @@ -711,8 +711,9 @@ backup_non_data_file(pgFile *file, pgFile *prev_file, /* * If nonedata file exists in previous backup * and its mtime is less than parent backup start time ... */ - if (prev_file && file->exists_in_prev && - file->mtime <= parent_backup_time) + if ((pg_strcasecmp(file->name, RELMAPPER_FILENAME) != 0) && + (prev_file && file->exists_in_prev && + file->mtime <= parent_backup_time)) { file->crc = fio_get_crc32(from_fullpath, FIO_DB_HOST, false); diff --git a/src/pg_probackup.h b/src/pg_probackup.h index f4adc98cc..a766e705e 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -70,7 +70,8 @@ extern const char *PROGRAM_EMAIL; #define BACKUP_RO_LOCK_FILE "backup_ro.pid" #define DATABASE_FILE_LIST "backup_content.control" #define PG_BACKUP_LABEL_FILE "backup_label" -#define PG_TABLESPACE_MAP_FILE "tablespace_map" +#define PG_TABLESPACE_MAP_FILE "tablespace_map" +#define RELMAPPER_FILENAME "pg_filenode.map" #define EXTERNAL_DIR "external_directories/externaldir" #define DATABASE_MAP "database_map" #define HEADER_MAP "page_header_map" diff --git a/src/restore.c b/src/restore.c index 3f0adf7b7..b3b9965ea 100644 --- a/src/restore.c +++ b/src/restore.c @@ -903,6 +903,11 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain, if (parray_bsearch(dest_backup->files, file, pgFileCompareRelPathWithExternal)) redundant = false; + /* pg_filenode.map are always restored, because it's crc cannot be trusted */ + if (file->external_dir_num == 0 && + pg_strcasecmp(file->name, RELMAPPER_FILENAME) == 0) + redundant = true; + /* do not delete the useful internal directories */ if (S_ISDIR(file->mode) && !redundant) continue; From 7ecb56e7438da41b7f8b6bb6d2a04963daa74c7a Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Mon, 8 Feb 2021 22:25:58 +0300 Subject: [PATCH 038/525] [Issue #320] improve test coverage --- tests/backup.py | 51 ++++++++++++++++++++++++++++++++++++++ tests/incr_restore.py | 57 +++---------------------------------------- tests/merge.py | 52 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 107 insertions(+), 53 deletions(-) diff --git a/tests/backup.py b/tests/backup.py index 8ca96609f..2ee5b9ace 100644 --- a/tests/backup.py +++ b/tests/backup.py @@ -3027,3 +3027,54 @@ def test_issue_231(self): # Clean after yourself self.del_test_dir(module_name, fname) + + def test_incr_backup_filenode_map(self): + """ + https://github.com/postgrespro/pg_probackup/issues/320 + """ + fname = self.id().split('.')[3] + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node'), + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node1 = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node1'), + initdb_params=['--data-checksums']) + node1.cleanup() + + node.pgbench_init(scale=5) + + # FULL backup + backup_id = self.backup_node(backup_dir, 'node', node) + + pgbench = node.pgbench( + stdout=subprocess.PIPE, stderr=subprocess.STDOUT, + options=['-T', '10', '-c', '1']) + + backup_id = self.backup_node(backup_dir, 'node', node, backup_type='delta') + + node.safe_psql( + 'postgres', + 'reindex index pg_type_oid_index') + + backup_id = self.backup_node( + backup_dir, 'node', node, backup_type='delta') + + # incremental restore into node1 + node.cleanup() + + self.restore_node(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + 'postgres', + 'select 1') + + # Clean after yourself + self.del_test_dir(module_name, fname) diff --git a/tests/incr_restore.py b/tests/incr_restore.py index 6246ae197..4595cbb31 100644 --- a/tests/incr_restore.py +++ b/tests/incr_restore.py @@ -2392,7 +2392,9 @@ def test_incremental_partial_restore_exclude_tablespace_checksum(self): self.del_test_dir(module_name, fname, [node2]) def test_incremental_pg_filenode_map(self): - """""" + """ + https://github.com/postgrespro/pg_probackup/issues/320 + """ fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( @@ -2436,9 +2438,8 @@ def test_incremental_pg_filenode_map(self): node1.stop() - # incremental restore into node1 - self.restore_node(backup_dir, 'node', node1, options=["-I", "checksum", '--log-level-file=VERBOSE']) + self.restore_node(backup_dir, 'node', node1, options=["-I", "checksum"]) self.set_auto_conf(node1, {'port': node1.port}) node1.slow_start() @@ -2450,55 +2451,5 @@ def test_incremental_pg_filenode_map(self): # Clean after yourself self.del_test_dir(module_name, fname) - - def test_incr_backup_filenode_map(self): - """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node1 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node1'), - initdb_params=['--data-checksums']) - node1.cleanup() - - node.pgbench_init(scale=5) - - # FULL backup - backup_id = self.backup_node(backup_dir, 'node', node) - - pgbench = node.pgbench( - stdout=subprocess.PIPE, stderr=subprocess.STDOUT, - options=['-T', '10', '-c', '1']) - - backup_id = self.backup_node(backup_dir, 'node', node, backup_type='delta') - - node.safe_psql( - 'postgres', - 'reindex index pg_type_oid_index') - - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type='delta', options=['--log-level-file=VERBOSE']) - - # incremental restore into node1 - node.cleanup() - - self.restore_node(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - 'postgres', - 'select 1') - - # Clean after yourself - self.del_test_dir(module_name, fname) - # check that MinRecPoint and BackupStartLsn are correctly used in case of --incrementa-lsn # incremental restore + partial restore. diff --git a/tests/merge.py b/tests/merge.py index 44652066a..bf21405bc 100644 --- a/tests/merge.py +++ b/tests/merge.py @@ -2829,5 +2829,57 @@ def test_merge_remote_mode(self): self.del_test_dir(module_name, fname) + def test_merge_pg_filenode_map(self): + """ + https://github.com/postgrespro/pg_probackup/issues/320 + """ + fname = self.id().split('.')[3] + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node'), + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node1 = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node1'), + initdb_params=['--data-checksums']) + node1.cleanup() + + node.pgbench_init(scale=5) + + # FULL backup + self.backup_node(backup_dir, 'node', node) + + pgbench = node.pgbench( + stdout=subprocess.PIPE, stderr=subprocess.STDOUT, + options=['-T', '10', '-c', '1']) + + self.backup_node(backup_dir, 'node', node, backup_type='delta') + + node.safe_psql( + 'postgres', + 'reindex index pg_type_oid_index') + + backup_id = self.backup_node( + backup_dir, 'node', node, backup_type='delta') + + self.merge_backup(backup_dir, 'node', backup_id) + + node.cleanup() + + self.restore_node(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + 'postgres', + 'select 1') + + # Clean after yourself + self.del_test_dir(module_name, fname) + # 1. Need new test with corrupted FULL backup # 2. different compression levels From def4cb4ae27d40881a713eafa7265173f61b525f Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Mon, 8 Feb 2021 22:29:21 +0300 Subject: [PATCH 039/525] [Issue #320] minor cleanup in "incr_restore" module --- tests/incr_restore.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/tests/incr_restore.py b/tests/incr_restore.py index 4595cbb31..1d3a52df8 100644 --- a/tests/incr_restore.py +++ b/tests/incr_restore.py @@ -10,7 +10,6 @@ import shutil import json from testgres import QueryException -from distutils.dir_util import copy_tree module_name = 'incr_restore' @@ -2452,4 +2451,3 @@ def test_incremental_pg_filenode_map(self): self.del_test_dir(module_name, fname) # check that MinRecPoint and BackupStartLsn are correctly used in case of --incrementa-lsn -# incremental restore + partial restore. From 02a3665375f5e578f390b6dc4d60ed824d9bfcb2 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Tue, 9 Feb 2021 17:32:27 +0300 Subject: [PATCH 040/525] [Issue #311] Release shared locks at proc exit --- src/catalog.c | 425 +++++++++++++++++++++++++++++++---------------- tests/locking.py | 59 ++++++- 2 files changed, 336 insertions(+), 148 deletions(-) diff --git a/src/catalog.c b/src/catalog.c index 8ac3e5799..e1b5d770f 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -26,12 +26,22 @@ static pgBackup *readBackupControlFile(const char *path); static time_t create_backup_dir(pgBackup *backup, const char *backup_instance_path); static bool backup_lock_exit_hook_registered = false; -static parray *lock_files = NULL; +static parray *locks = NULL; -static int lock_backup_exclusive(pgBackup *backup, bool strict); -static bool lock_backup_internal(pgBackup *backup, bool exclusive); -static bool lock_backup_read_only(pgBackup *backup); -static bool wait_read_only_owners(pgBackup *backup); +static int grab_excl_lock_file(const char *backup_dir, const char *backup_id, bool strict); +static int grab_shared_lock_file(pgBackup *backup); +static int wait_shared_owners(pgBackup *backup); + +static void unlock_backup(const char *backup_dir, const char *backup_id, bool exclusive); +static void release_excl_lock_file(const char *backup_dir); +static void release_shared_lock_file(const char *backup_dir); + +typedef struct LockInfo +{ + char backup_id[10]; + char backup_dir[MAXPGPATH]; + bool exclusive; +} LockInfo; static timelineInfo * timelineInfoNew(TimeLineID tli) @@ -66,28 +76,24 @@ timelineInfoFree(void *tliInfo) pfree(tliInfo); } -/* Iterate over locked backups and delete locks files */ +/* Iterate over locked backups and unlock them */ static void unlink_lock_atexit(void) { - int i; + int i; - if (lock_files == NULL) + if (locks == NULL) return; - for (i = 0; i < parray_num(lock_files); i++) + for (i = 0; i < parray_num(locks); i++) { - char *lock_file = (char *) parray_get(lock_files, i); - int res; - - res = fio_unlink(lock_file, FIO_BACKUP_HOST); - if (res != 0 && errno != ENOENT) - elog(WARNING, "%s: %s", lock_file, strerror(errno)); + LockInfo *lock = (LockInfo *) parray_get(locks, i); + unlock_backup(lock->backup_dir, lock->backup_dir, lock->exclusive); } - parray_walk(lock_files, pfree); - parray_free(lock_files); - lock_files = NULL; + parray_walk(locks, pg_free); + parray_free(locks); + locks = NULL; } /* @@ -147,39 +153,39 @@ write_backup_status(pgBackup *backup, BackupStatus status, } /* - * Lock backup in either exclusive or non-exclusive (read-only) mode. + * Lock backup in either exclusive or shared mode. * "strict" flag allows to ignore "out of space" errors and should be * used only by DELETE command to free disk space on filled up * filesystem. * - * Only read only tasks (validate, restore) are allowed to take non-exclusive locks. + * Only read only tasks (validate, restore) are allowed to take shared locks. * Changing backup metadata must be done with exclusive lock. * * Only one process can hold exclusive lock at any time. * Exlusive lock - PID of process, holding the lock - is placed in * lock file: BACKUP_LOCK_FILE. * - * Multiple proccess are allowed to take non-exclusive locks simultaneously. - * Non-exclusive locks - PIDs of proccesses, holding the lock - are placed in + * Multiple proccess are allowed to take shared locks simultaneously. + * Shared locks - PIDs of proccesses, holding the lock - are placed in * separate lock file: BACKUP_RO_LOCK_FILE. - * When taking RO lock, a brief exclusive lock is taken. + * When taking shared lock, a brief exclusive lock is taken. + * + * -> exclusive -> grab exclusive lock file and wait until all shared lockers are gone, return + * -> shared -> grab exclusive lock file, grab shared lock file, release exclusive lock file, return * * TODO: lock-timeout as parameter - * TODO: we must think about more fine grain unlock mechanism - separate unlock_backup() function. - * TODO: more accurate naming - * -> exclusive lock -> acquire HW_LATCH and wait until all LW_LATCH`es are clear - * -> shared lock -> acquire HW_LATCH, acquire LW_LATCH, release HW_LATCH */ bool lock_backup(pgBackup *backup, bool strict, bool exclusive) { - int rc; - char lock_file[MAXPGPATH]; - bool enospc_detected = false; + int rc; + char lock_file[MAXPGPATH]; + bool enospc_detected = false; + LockInfo *lock = NULL; join_path_components(lock_file, backup->root_dir, BACKUP_LOCK_FILE); - rc = lock_backup_exclusive(backup, strict); + rc = grab_excl_lock_file(backup->root_dir, base36enc(backup->start_time), strict); if (rc == 1) return false; @@ -188,54 +194,62 @@ lock_backup(pgBackup *backup, bool strict, bool exclusive) enospc_detected = true; if (strict) return false; + + /* + * If we failed to take exclusive lock due to ENOSPC, + * then in lax mode treat such condition as if lock was taken. + */ } /* * We have exclusive lock, now there are following scenarios: * - * 1. If we are for exlusive lock, then we must open the RO lock file + * 1. If we are for exlusive lock, then we must open the shared lock file * and check if any of the processes listed there are still alive. * If some processes are alive and are not going away in lock_timeout, * then return false. * * 2. If we are here for non-exlusive lock, then write the pid - * into RO lock list and release the exclusive lock. + * into shared lock file and release the exclusive lock. */ - if (lock_backup_internal(backup, exclusive)) + if (exclusive) + rc = wait_shared_owners(backup); + else + rc = grab_shared_lock_file(backup); + + if (rc != 0) { - if (!exclusive) - { - /* release exclusive lock */ - if (fio_unlink(lock_file, FIO_BACKUP_HOST) < 0) - elog(ERROR, "Could not remove exclusive lock file \"%s\": %s", - lock_file, strerror(errno)); + /* + * Failed to grab shared lock or (in case of exclusive mode) shared lock owners + * are not going away in time, release the exclusive lock file and return in shame. + */ + release_excl_lock_file(backup->root_dir); + return false; + } - /* we are done */ - return true; - } + if (!exclusive) + { + /* Shared lock file is grabbed, now we can release exclusive lock file */ + release_excl_lock_file(backup->root_dir); + } - /* When locking backup in lax exclusive mode, - * we should wait until all RO locks owners are gone. + if (exclusive && !strict && enospc_detected) + { + /* We are in lax exclusive mode and EONSPC was encountered: + * once again try to grab exclusive lock file, + * because there is a chance that release of shared lock file in wait_shared_owners may have + * freed some space on filesystem, thanks to unlinking of BACKUP_RO_LOCK_FILE. + * If somebody concurrently acquired exclusive lock file first, then we should give up. */ - if (!strict && enospc_detected) - { - /* We are in lax mode and EONSPC was encountered: once again try to grab exclusive lock, - * because there is a chance that lock_backup_read_only may have freed some space on filesystem, - * thanks to unlinking of BACKUP_RO_LOCK_FILE. - * If somebody concurrently acquired exclusive lock first, then we should give up. - */ - if (lock_backup_exclusive(backup, strict) == 1) - return false; + if (grab_excl_lock_file(backup->root_dir, base36enc(backup->start_time), strict) == 1) + return false; - return true; - } + return true; } - else - return false; /* - * Arrange to unlink the lock file(s) at proc_exit. + * Arrange the unlocking at proc_exit. */ if (!backup_lock_exit_hook_registered) { @@ -243,10 +257,16 @@ lock_backup(pgBackup *backup, bool strict, bool exclusive) backup_lock_exit_hook_registered = true; } - /* Use parray so that the lock files are unlinked in a loop */ - if (lock_files == NULL) - lock_files = parray_new(); - parray_append(lock_files, pgut_strdup(lock_file)); + /* save lock metadata for later unlocking */ + lock = pgut_malloc(sizeof(LockInfo)); + snprintf(lock->backup_id, 10, "%s", base36enc(backup->backup_id)); + snprintf(lock->backup_dir, MAXPGPATH, "%s", backup->root_dir); + lock->exclusive = exclusive; + + /* Use parray for lock release */ + if (locks == NULL) + locks = parray_new(); + parray_append(locks, lock); return true; } @@ -258,7 +278,7 @@ lock_backup(pgBackup *backup, bool strict, bool exclusive) * 2 Failed to acquire lock due to ENOSPC */ int -lock_backup_exclusive(pgBackup *backup, bool strict) +grab_excl_lock_file(const char *root_dir, const char *backup_id, bool strict) { char lock_file[MAXPGPATH]; int fd = 0; @@ -268,7 +288,7 @@ lock_backup_exclusive(pgBackup *backup, bool strict) int len; int encoded_pid; - join_path_components(lock_file, backup->root_dir, BACKUP_LOCK_FILE); + join_path_components(lock_file, root_dir, BACKUP_LOCK_FILE); /* * We need a loop here because of race conditions. But don't loop forever @@ -280,8 +300,7 @@ lock_backup_exclusive(pgBackup *backup, bool strict) FILE *fp_out = NULL; if (interrupted) - elog(ERROR, "Interrupted while locking backup %s", - base36enc(backup->start_time)); + elog(ERROR, "Interrupted while locking backup %s", backup_id); /* * Try to create the lock file --- O_EXCL makes this atomic. @@ -344,7 +363,7 @@ lock_backup_exclusive(pgBackup *backup, bool strict) if ((empty_tries % LOG_FREQ) == 0) elog(WARNING, "Waiting %u seconds on empty exclusive lock for backup %s", - empty_tries, base36enc(backup->start_time)); + empty_tries, backup_id); sleep(1); /* @@ -372,34 +391,32 @@ lock_backup_exclusive(pgBackup *backup, bool strict) */ if (encoded_pid == my_pid) return 0; - else + + if (kill(encoded_pid, 0) == 0) { - if (kill(encoded_pid, 0) == 0) + /* complain every fifth interval */ + if ((ntries % LOG_FREQ) == 0) { - /* complain every fifth interval */ - if ((ntries % LOG_FREQ) == 0) - { - elog(WARNING, "Process %d is using backup %s, and is still running", - encoded_pid, base36enc(backup->start_time)); + elog(WARNING, "Process %d is using backup %s, and is still running", + encoded_pid, backup_id); - elog(WARNING, "Waiting %u seconds on exclusive lock for backup %s", - ntries, base36enc(backup->start_time)); - } + elog(WARNING, "Waiting %u seconds on exclusive lock for backup %s", + ntries, backup_id); + } - sleep(1); + sleep(1); - /* try again */ - continue; - } + /* try again */ + continue; + } + else + { + if (errno == ESRCH) + elog(WARNING, "Process %d which used backup %s no longer exists", + encoded_pid, backup_id); else - { - if (errno == ESRCH) - elog(WARNING, "Process %d which used backup %s no longer exists", - encoded_pid, base36enc(backup->start_time)); - else - elog(ERROR, "Failed to send signal 0 to a process %d: %s", - encoded_pid, strerror(errno)); - } + elog(ERROR, "Failed to send signal 0 to a process %d: %s", + encoded_pid, strerror(errno)); } grab_lock: @@ -484,11 +501,14 @@ lock_backup_exclusive(pgBackup *backup, bool strict) return 0; } -/* Wait until all read-only lock owners are gone */ -bool -wait_read_only_owners(pgBackup *backup) +/* Wait until all shared lock owners are gone + * 0 - successs + * 1 - fail + */ +int +wait_shared_owners(pgBackup *backup) { - FILE *fp = NULL; + FILE *fp = NULL; char buffer[256]; pid_t encoded_pid; int ntries = LOCK_TIMEOUT; @@ -500,7 +520,7 @@ wait_read_only_owners(pgBackup *backup) if (fp == NULL && errno != ENOENT) elog(ERROR, "Cannot open lock file \"%s\": %s", lock_file, strerror(errno)); - /* iterate over pids in lock file */ + /* iterate over pids in lock file */ while (fp && fgets(buffer, sizeof(buffer), fp)) { encoded_pid = atoi(buffer); @@ -510,47 +530,42 @@ wait_read_only_owners(pgBackup *backup) continue; } - /* wait until RO lock owners go away */ + /* wait until shared lock owners go away */ do { if (interrupted) elog(ERROR, "Interrupted while locking backup %s", base36enc(backup->start_time)); - if (encoded_pid != my_pid) + if (encoded_pid == my_pid) + break; + + /* check if lock owner is still alive */ + if (kill(encoded_pid, 0) == 0) { - if (kill(encoded_pid, 0) == 0) + /* complain from time to time */ + if ((ntries % LOG_FREQ) == 0) { - if ((ntries % LOG_FREQ) == 0) - { - elog(WARNING, "Process %d is using backup %s in read only mode, and is still running", - encoded_pid, base36enc(backup->start_time)); + elog(WARNING, "Process %d is using backup %s in shared mode, and is still running", + encoded_pid, base36enc(backup->start_time)); - elog(WARNING, "Waiting %u seconds on lock for backup %s", ntries, - base36enc(backup->start_time)); - } + elog(WARNING, "Waiting %u seconds on lock for backup %s", ntries, + base36enc(backup->start_time)); + } - sleep(1); + sleep(1); - /* try again */ - continue; - } - else if (errno != ESRCH) - elog(ERROR, "Failed to send signal 0 to a process %d: %s", - encoded_pid, strerror(errno)); + /* try again */ + continue; } + else if (errno != ESRCH) + elog(ERROR, "Failed to send signal 0 to a process %d: %s", + encoded_pid, strerror(errno)); /* locker is dead */ break; } while (ntries--); - - if (ntries <= 0) - { - elog(WARNING, "Cannot to lock backup %s in exclusive mode, because process %u owns read-only lock", - base36enc(backup->start_time), encoded_pid); - return false; - } } if (fp && ferror(fp)) @@ -559,22 +574,26 @@ wait_read_only_owners(pgBackup *backup) if (fp) fclose(fp); - /* unlink RO lock list */ - fio_unlink(lock_file, FIO_BACKUP_HOST); - return true; -} + /* some shared owners are still alive */ + if (ntries <= 0) + { + elog(WARNING, "Cannot to lock backup %s in exclusive mode, because process %u owns shared lock", + base36enc(backup->start_time), encoded_pid); + return 1; + } -bool -lock_backup_internal(pgBackup *backup, bool exclusive) -{ - if (exclusive) - return wait_read_only_owners(backup); - else - return lock_backup_read_only(backup); + /* unlink shared lock file */ + fio_unlink(lock_file, FIO_BACKUP_HOST); + return 0; } -bool -lock_backup_read_only(pgBackup *backup) +/* + * Lock backup in shared mode + * 0 - successs + * 1 - fail + */ +int +grab_shared_lock_file(pgBackup *backup) { FILE *fp_in = NULL; FILE *fp_out = NULL; @@ -604,20 +623,20 @@ lock_backup_read_only(pgBackup *backup) continue; } - if (encoded_pid != my_pid) + if (encoded_pid == my_pid) + continue; + + if (kill(encoded_pid, 0) == 0) { - if (kill(encoded_pid, 0) == 0) - { - /* - * Somebody is still using this backup in RO mode, - * copy this pid into a new file. - */ - buffer_len += snprintf(buffer+buffer_len, 4096, "%u\n", encoded_pid); - } - else if (errno != ESRCH) - elog(ERROR, "Failed to send signal 0 to a process %d: %s", - encoded_pid, strerror(errno)); + /* + * Somebody is still using this backup in shared mode, + * copy this pid into a new file. + */ + buffer_len += snprintf(buffer+buffer_len, 4096, "%u\n", encoded_pid); } + else if (errno != ESRCH) + elog(ERROR, "Failed to send signal 0 to a process %d: %s", + encoded_pid, strerror(errno)); } if (fp_in) @@ -647,7 +666,123 @@ lock_backup_read_only(pgBackup *backup) elog(ERROR, "Cannot rename file \"%s\" to \"%s\": %s", lock_file_tmp, lock_file, strerror(errno)); - return true; + return 0; +} + +void +unlock_backup(const char *backup_dir, const char *backup_id, bool exclusive) +{ + if (exclusive) + { + release_excl_lock_file(backup_dir); + return; + } + + /* To remove shared lock, we must briefly obtain exclusive lock, ... */ + if (grab_excl_lock_file(backup_dir, backup_id, false) != 0) + /* ... if it's not possible then leave shared lock */ + return; + + release_shared_lock_file(backup_dir); + release_excl_lock_file(backup_dir); +} + +void +release_excl_lock_file(const char *backup_dir) +{ + char lock_file[MAXPGPATH]; + + join_path_components(lock_file, backup_dir, BACKUP_LOCK_FILE); + + /* TODO Sanity check: maybe we should check, that pid in lock file is my_pid */ + + /* unlink pid file */ + fio_unlink(lock_file, FIO_BACKUP_HOST); +} + +void +release_shared_lock_file(const char *backup_dir) +{ + FILE *fp_in = NULL; + FILE *fp_out = NULL; + char buf_in[256]; + pid_t encoded_pid; + char lock_file[MAXPGPATH]; + + char buffer[8192]; /*TODO: should be enough, but maybe malloc+realloc is better ? */ + char lock_file_tmp[MAXPGPATH]; + int buffer_len = 0; + + join_path_components(lock_file, backup_dir, BACKUP_RO_LOCK_FILE); + snprintf(lock_file_tmp, MAXPGPATH, "%s%s", lock_file, "tmp"); + + /* open lock file */ + fp_in = fopen(lock_file, "r"); + if (fp_in == NULL) + { + if (errno == ENOENT) + return; + else + elog(ERROR, "Cannot open lock file \"%s\": %s", lock_file, strerror(errno)); + } + + /* read PIDs of owners */ + while (fgets(buf_in, sizeof(buf_in), fp_in)) + { + encoded_pid = atoi(buf_in); + + if (encoded_pid <= 0) + { + elog(WARNING, "Bogus data in lock file \"%s\": \"%s\"", lock_file, buf_in); + continue; + } + + /* remove my pid */ + if (encoded_pid == my_pid) + continue; + + if (kill(encoded_pid, 0) == 0) + { + /* + * Somebody is still using this backup in shared mode, + * copy this pid into a new file. + */ + buffer_len += snprintf(buffer+buffer_len, 4096, "%u\n", encoded_pid); + } + else if (errno != ESRCH) + elog(ERROR, "Failed to send signal 0 to a process %d: %s", + encoded_pid, strerror(errno)); + } + + if (ferror(fp_in)) + elog(ERROR, "Cannot read from lock file: \"%s\"", lock_file); + fclose(fp_in); + + /* if there is no active pid left, then there is nothing to do */ + if (buffer_len == 0) + { + fio_unlink(lock_file, FIO_BACKUP_HOST); + return; + } + + fp_out = fopen(lock_file_tmp, "w"); + if (fp_out == NULL) + elog(ERROR, "Cannot open temp lock file \"%s\": %s", lock_file_tmp, strerror(errno)); + + /* write out the collected PIDs to temp lock file */ + fwrite(buffer, 1, buffer_len, fp_out); + + if (ferror(fp_out)) + elog(ERROR, "Cannot write to lock file: \"%s\"", lock_file_tmp); + + if (fclose(fp_out) != 0) + elog(ERROR, "Cannot close temp lock file \"%s\": %s", lock_file_tmp, strerror(errno)); + + if (rename(lock_file_tmp, lock_file) < 0) + elog(ERROR, "Cannot rename file \"%s\" to \"%s\": %s", + lock_file_tmp, lock_file, strerror(errno)); + + return; } /* diff --git a/tests/locking.py b/tests/locking.py index 540007838..ef7aa1f25 100644 --- a/tests/locking.py +++ b/tests/locking.py @@ -581,6 +581,59 @@ def test_empty_lock_file(self): # Clean after yourself self.del_test_dir(module_name, fname) -# TODO: -# test that concurrent validation and restore are not locking each other -# check that quick exclusive lock, when taking RO-lock, is really quick + def test_shared_lock(self): + """ + Make sure that shared lock leaves no files with pids + """ + fname = self.id().split('.')[3] + node = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # Fill with data + node.pgbench_init(scale=1) + + # FULL + backup_id = self.backup_node(backup_dir, 'node', node) + + lockfile_excl = os.path.join(backup_dir, 'backups', 'node', backup_id, 'backup.pid') + lockfile_shr = os.path.join(backup_dir, 'backups', 'node', backup_id, 'backup_ro.pid') + + self.validate_pb(backup_dir, 'node', backup_id) + + self.assertFalse( + os.path.exists(lockfile_excl), + "File should not exist: {0}".format(lockfile_excl)) + + self.assertFalse( + os.path.exists(lockfile_shr), + "File should not exist: {0}".format(lockfile_shr)) + + gdb = self.validate_pb(backup_dir, 'node', backup_id, gdb=True) + + gdb.set_breakpoint('validate_one_page') + gdb.run_until_break() + gdb.kill() + + self.assertTrue( + os.path.exists(lockfile_shr), + "File should exist: {0}".format(lockfile_shr)) + + self.validate_pb(backup_dir, 'node', backup_id) + + self.assertFalse( + os.path.exists(lockfile_excl), + "File should not exist: {0}".format(lockfile_excl)) + + self.assertFalse( + os.path.exists(lockfile_shr), + "File should not exist: {0}".format(lockfile_shr)) + + # Clean after yourself + self.del_test_dir(module_name, fname) From c30628cd1da24f85da8f9fc9316bd8115348efd2 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Wed, 10 Feb 2021 14:11:57 +0300 Subject: [PATCH 041/525] [Issue #237] Ignore EROFS when locking backup in shared mode --- src/catalog.c | 64 ++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 46 insertions(+), 18 deletions(-) diff --git a/src/catalog.c b/src/catalog.c index e1b5d770f..b9d384b69 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -36,6 +36,11 @@ static void unlock_backup(const char *backup_dir, const char *backup_id, bool ex static void release_excl_lock_file(const char *backup_dir); static void release_shared_lock_file(const char *backup_dir); +#define LOCK_OK 0 +#define LOCK_FAIL_TIMEOUT 1 +#define LOCK_FAIL_ENOSPC 2 +#define LOCK_FAIL_EROFS 3 + typedef struct LockInfo { char backup_id[10]; @@ -187,18 +192,26 @@ lock_backup(pgBackup *backup, bool strict, bool exclusive) rc = grab_excl_lock_file(backup->root_dir, base36enc(backup->start_time), strict); - if (rc == 1) + if (rc == LOCK_FAIL_TIMEOUT) return false; - else if (rc == 2) + else if (rc == LOCK_FAIL_ENOSPC) { + /* + * If we failed to take exclusive lock due to ENOSPC, + * then in lax mode treat such condition as if lock was taken. + */ + enospc_detected = true; if (strict) return false; - + } + else if (rc == LOCK_FAIL_EROFS) + { /* - * If we failed to take exclusive lock due to ENOSPC, - * then in lax mode treat such condition as if lock was taken. + * If we failed to take exclusive lock due to EROFS, + * then in shared mode treat such condition as if lock was taken. */ + return !exclusive; } /* @@ -242,7 +255,7 @@ lock_backup(pgBackup *backup, bool strict, bool exclusive) * freed some space on filesystem, thanks to unlinking of BACKUP_RO_LOCK_FILE. * If somebody concurrently acquired exclusive lock file first, then we should give up. */ - if (grab_excl_lock_file(backup->root_dir, base36enc(backup->start_time), strict) == 1) + if (grab_excl_lock_file(backup->root_dir, base36enc(backup->start_time), strict) == LOCK_FAIL_TIMEOUT) return false; return true; @@ -271,18 +284,20 @@ lock_backup(pgBackup *backup, bool strict, bool exclusive) return true; } -/* Lock backup in exclusive mode +/* + * Lock backup in exclusive mode * Result codes: - * 0 Success - * 1 Failed to acquire lock in lock_timeout time - * 2 Failed to acquire lock due to ENOSPC + * LOCK_OK Success + * LOCK_FAIL_TIMEOUT Failed to acquire lock in lock_timeout time + * LOCK_FAIL_ENOSPC Failed to acquire lock due to ENOSPC + * LOCK_FAIL_EROFS Failed to acquire lock due to EROFS */ int grab_excl_lock_file(const char *root_dir, const char *backup_id, bool strict) { char lock_file[MAXPGPATH]; int fd = 0; - char buffer[MAXPGPATH * 2 + 256]; + char buffer[256]; int ntries = LOCK_TIMEOUT; int empty_tries = LOCK_STALE_TIMEOUT; int len; @@ -312,6 +327,14 @@ grab_excl_lock_file(const char *root_dir, const char *backup_id, bool strict) if (fd >= 0) break; /* Success; exit the retry loop */ + /* read-only fs is a special case */ + if (errno == EROFS) + { + elog(WARNING, "Could not create lock file \"%s\": %s", + lock_file, strerror(errno)); + return LOCK_FAIL_EROFS; + } + /* * Couldn't create the pid file. Probably it already exists. * If file already exists or we have some permission problem (???), @@ -390,7 +413,7 @@ grab_excl_lock_file(const char *root_dir, const char *backup_id, bool strict) * exist. */ if (encoded_pid == my_pid) - return 0; + return LOCK_OK; if (kill(encoded_pid, 0) == 0) { @@ -437,7 +460,7 @@ grab_excl_lock_file(const char *root_dir, const char *backup_id, bool strict) /* Failed to acquire exclusive lock in time */ if (fd <= 0) - return 1; + return LOCK_FAIL_TIMEOUT; /* * Successfully created the file, now fill it. @@ -457,7 +480,7 @@ grab_excl_lock_file(const char *root_dir, const char *backup_id, bool strict) * Only delete command should be run in lax mode. */ if (!strict && save_errno == ENOSPC) - return 2; + return LOCK_FAIL_ENOSPC; else elog(ERROR, "Could not write lock file \"%s\": %s", lock_file, strerror(save_errno)); @@ -475,7 +498,7 @@ grab_excl_lock_file(const char *root_dir, const char *backup_id, bool strict) * Only delete command should be run in lax mode. */ if (!strict && save_errno == ENOSPC) - return 2; + return LOCK_FAIL_ENOSPC; else elog(ERROR, "Could not flush lock file \"%s\": %s", lock_file, strerror(save_errno)); @@ -488,7 +511,7 @@ grab_excl_lock_file(const char *root_dir, const char *backup_id, bool strict) fio_unlink(lock_file, FIO_BACKUP_HOST); if (!strict && errno == ENOSPC) - return 2; + return LOCK_FAIL_ENOSPC; else elog(ERROR, "Could not close lock file \"%s\": %s", lock_file, strerror(save_errno)); @@ -498,7 +521,7 @@ grab_excl_lock_file(const char *root_dir, const char *backup_id, bool strict) // base36enc(backup->start_time), // LOCK_TIMEOUT - ntries + LOCK_STALE_TIMEOUT - empty_tries); - return 0; + return LOCK_OK; } /* Wait until all shared lock owners are gone @@ -648,7 +671,12 @@ grab_shared_lock_file(pgBackup *backup) fp_out = fopen(lock_file_tmp, "w"); if (fp_out == NULL) + { + if (errno == EROFS) + return 0; + elog(ERROR, "Cannot open temp lock file \"%s\": %s", lock_file_tmp, strerror(errno)); + } /* add my own pid */ buffer_len += snprintf(buffer+buffer_len, sizeof(buffer), "%u\n", my_pid); @@ -679,7 +707,7 @@ unlock_backup(const char *backup_dir, const char *backup_id, bool exclusive) } /* To remove shared lock, we must briefly obtain exclusive lock, ... */ - if (grab_excl_lock_file(backup_dir, backup_id, false) != 0) + if (grab_excl_lock_file(backup_dir, backup_id, false) != LOCK_OK) /* ... if it's not possible then leave shared lock */ return; From 4a4af6270fc3a06195e604064cccc359080526a4 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Wed, 10 Feb 2021 14:25:13 +0300 Subject: [PATCH 042/525] bugfix: for PG>=12 instance with missing postgresql.auto.conf restore now correctly recreates postgresql.auto.conf --- src/restore.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/restore.c b/src/restore.c index b3b9965ea..adc20bb6c 100644 --- a/src/restore.c +++ b/src/restore.c @@ -1543,6 +1543,7 @@ update_recovery_options(pgBackup *backup, if (errno != ENOENT) elog(ERROR, "cannot stat file \"%s\": %s", postgres_auto_path, strerror(errno)); + st.st_size = 0; } /* Kludge for 0-sized postgresql.auto.conf file. TODO: make something more intelligent */ From 63d79e2b4fac043e29ea4d54589fbb8788a18ffd Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Wed, 10 Feb 2021 15:10:15 +0300 Subject: [PATCH 043/525] [Issue #323] stable remote agent API --- src/pg_probackup.h | 3 +++ src/utils/remote.c | 5 +++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/src/pg_probackup.h b/src/pg_probackup.h index a766e705e..c15e6efd8 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -307,7 +307,10 @@ typedef enum ShowFormat #define FILE_NOT_FOUND (-2) /* file disappeared during backup */ #define BLOCKNUM_INVALID (-1) #define PROGRAM_VERSION "2.4.9" + +/* update when remote agent API or behaviour changes */ #define AGENT_PROTOCOL_VERSION 20409 +#define AGENT_PROTOCOL_VERSION_STR "2.4.9" /* update only when changing storage format */ #define STORAGE_FORMAT_VERSION "2.4.4" diff --git a/src/utils/remote.c b/src/utils/remote.c index 88b1ce7a6..2bfd24d1e 100644 --- a/src/utils/remote.c +++ b/src/utils/remote.c @@ -247,8 +247,9 @@ bool launch_agent(void) (agent_version / 100) % 100, agent_version % 100); - elog(ERROR, "Remote agent version %s does not match local program version %s", - agent_version_str, PROGRAM_VERSION); + elog(ERROR, "Remote agent protocol version %s does not match local program protocol version %s, " + "consider to upgrade pg_probackup binary", + agent_version_str, AGENT_PROTOCOL_VERSION_STR); } return true; From 6dcf64a90705411e2764800ff8dd2ccb5fc1a4d3 Mon Sep 17 00:00:00 2001 From: anastasia Date: Wed, 10 Feb 2021 16:22:40 +0300 Subject: [PATCH 044/525] Tests. Fix cfs_backup tests compatibility with ptrack 2.1 --- tests/cfs_backup.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/tests/cfs_backup.py b/tests/cfs_backup.py index 5a3665518..3d2fa8de7 100644 --- a/tests/cfs_backup.py +++ b/tests/cfs_backup.py @@ -20,9 +20,9 @@ def setUp(self): self.node = self.make_simple_node( base_dir="{0}/{1}/node".format(module_name, self.fname), set_replication=True, + ptrack_enable=True, initdb_params=['--data-checksums'], pg_options={ - 'ptrack_enable': 'on', 'cfs_encryption': 'off', 'max_wal_senders': '2', 'shared_buffers': '200MB' @@ -35,6 +35,11 @@ def setUp(self): self.node.slow_start() + if self.node.major_version >= 12: + self.node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") + self.create_tblspace_in_node(self.node, tblspace_name, cfs=True) tblspace = self.node.safe_psql( From 2305d1fdda20fc16af053a06805416de0d2e69de Mon Sep 17 00:00:00 2001 From: anastasia Date: Wed, 10 Feb 2021 19:44:38 +0300 Subject: [PATCH 045/525] Tests. Expect failure in test_validate_target_lsn, because it requires multi-timeline WAL validation, which is not implemented yet --- tests/validate.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/validate.py b/tests/validate.py index 6777b92a1..cfec234d0 100644 --- a/tests/validate.py +++ b/tests/validate.py @@ -3451,7 +3451,8 @@ def test_validate_corrupt_tablespace_map(self): # Clean after yourself self.del_test_dir(module_name, fname) - # @unittest.expectedFailure + #TODO fix the test + @unittest.expectedFailure # @unittest.skip("skip") def test_validate_target_lsn(self): """ From bdcc28d36cd227635d8a6dba8e2498d91069d27b Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Thu, 11 Feb 2021 09:51:38 +0300 Subject: [PATCH 046/525] tests: minor fixes for backup and merge modules --- tests/backup.py | 1 + tests/merge.py | 1 + 2 files changed, 2 insertions(+) diff --git a/tests/backup.py b/tests/backup.py index 2ee5b9ace..70c4dc13f 100644 --- a/tests/backup.py +++ b/tests/backup.py @@ -5,6 +5,7 @@ import shutil from distutils.dir_util import copy_tree from testgres import ProcessType +import subprocess module_name = 'backup' diff --git a/tests/merge.py b/tests/merge.py index bf21405bc..29d60433c 100644 --- a/tests/merge.py +++ b/tests/merge.py @@ -7,6 +7,7 @@ import shutil from datetime import datetime, timedelta import time +import subprocess module_name = "merge" From 5207900fb843572398513998b6ec158d74473d23 Mon Sep 17 00:00:00 2001 From: anastasia Date: Thu, 11 Feb 2021 12:01:13 +0300 Subject: [PATCH 047/525] Update tests/Readme.md. Add note about python3. --- tests/Readme.md | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/tests/Readme.md b/tests/Readme.md index 3adf0c019..adcf5380e 100644 --- a/tests/Readme.md +++ b/tests/Readme.md @@ -1,7 +1,11 @@ -[см wiki](https://confluence.postgrespro.ru/display/DEV/pg_probackup) +[see wiki](https://confluence.postgrespro.ru/display/DEV/pg_probackup) ``` -Note: For now these are works on Linux and "kinda" works on Windows +Note: For now these tests work on Linux and "kinda" work on Windows +``` + +``` +Note: tests require python3 to work properly. ``` ``` From 71661abc3f4930d5d960f85dda0deae71a31fc63 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Thu, 11 Feb 2021 19:28:42 +0300 Subject: [PATCH 048/525] tests: fix "cfs_backup" module --- tests/cfs_backup.py | 76 ++++++++++++++++++++++----------------------- 1 file changed, 37 insertions(+), 39 deletions(-) diff --git a/tests/cfs_backup.py b/tests/cfs_backup.py index 3d2fa8de7..b7ebff0fe 100644 --- a/tests/cfs_backup.py +++ b/tests/cfs_backup.py @@ -45,13 +45,17 @@ def setUp(self): tblspace = self.node.safe_psql( "postgres", "SELECT * FROM pg_tablespace WHERE spcname='{0}'".format( - tblspace_name) - ) - self.assertTrue( - tblspace_name in tblspace and "compression=true" in tblspace, + tblspace_name)) + + self.assertIn( + tblspace_name, str(tblspace), "ERROR: The tablespace not created " - "or it create without compressions" - ) + "or it create without compressions") + + self.assertIn( + "compression=true", str(tblspace), + "ERROR: The tablespace not created " + "or it create without compressions") self.assertTrue( find_by_name( @@ -743,12 +747,14 @@ def test_multiple_segments(self): # CHECK FULL BACKUP self.node.stop() self.node.cleanup() - shutil.rmtree( - self.get_tblspace_path(self.node, tblspace_name), - ignore_errors=True) + shutil.rmtree(self.get_tblspace_path(self.node, tblspace_name)) self.restore_node( - self.backup_dir, 'node', self.node, - backup_id=backup_id_full, options=["-j", "4"]) + self.backup_dir, 'node', self.node, backup_id=backup_id_full, + options=[ + "-j", "4", + "--recovery-target=immediate", + "--recovery-target-action=promote"]) + self.node.slow_start() self.assertEqual( full_result, @@ -762,8 +768,12 @@ def test_multiple_segments(self): self.get_tblspace_path(self.node, tblspace_name), ignore_errors=True) self.restore_node( - self.backup_dir, 'node', self.node, - backup_id=backup_id_page, options=["-j", "4"]) + self.backup_dir, 'node', self.node, backup_id=backup_id_page, + options=[ + "-j", "4", + "--recovery-target=immediate", + "--recovery-target-action=promote"]) + self.node.slow_start() self.assertEqual( page_result, @@ -791,8 +801,7 @@ def test_multiple_segments_in_multiple_tablespaces(self): "AS SELECT i AS id, MD5(i::text) AS text, " "MD5(repeat(i::text,10))::tsvector AS tsvector " "FROM generate_series(0,1005000) i".format( - 't_heap_1', tblspace_name_1) - ) + 't_heap_1', tblspace_name_1)) self.node.safe_psql( "postgres", @@ -800,8 +809,7 @@ def test_multiple_segments_in_multiple_tablespaces(self): "AS SELECT i AS id, MD5(i::text) AS text, " "MD5(repeat(i::text,10))::tsvector AS tsvector " "FROM generate_series(0,1005000) i".format( - 't_heap_2', tblspace_name_2) - ) + 't_heap_2', tblspace_name_2)) full_result_1 = self.node.safe_psql( "postgres", "SELECT * FROM t_heap_1") @@ -869,21 +877,16 @@ def test_multiple_segments_in_multiple_tablespaces(self): # CHECK FULL BACKUP self.node.stop() - self.node.cleanup() - shutil.rmtree( - self.get_tblspace_path(self.node, tblspace_name), - ignore_errors=True) - shutil.rmtree( - self.get_tblspace_path(self.node, tblspace_name_1), - ignore_errors=True) - shutil.rmtree( - self.get_tblspace_path(self.node, tblspace_name_2), - ignore_errors=True) self.restore_node( self.backup_dir, 'node', self.node, - backup_id=backup_id_full, options=["-j", "4"]) + backup_id=backup_id_full, + options=[ + "-j", "4", "--incremental-mode=checksum", + "--recovery-target=immediate", + "--recovery-target-action=promote"]) self.node.slow_start() + self.assertEqual( full_result_1, self.node.safe_psql("postgres", "SELECT * FROM t_heap_1"), @@ -895,21 +898,16 @@ def test_multiple_segments_in_multiple_tablespaces(self): # CHECK PAGE BACKUP self.node.stop() - self.node.cleanup() - shutil.rmtree( - self.get_tblspace_path(self.node, tblspace_name), - ignore_errors=True) - shutil.rmtree( - self.get_tblspace_path(self.node, tblspace_name_1), - ignore_errors=True) - shutil.rmtree( - self.get_tblspace_path(self.node, tblspace_name_2), - ignore_errors=True) self.restore_node( self.backup_dir, 'node', self.node, - backup_id=backup_id_page, options=["-j", "4"]) + backup_id=backup_id_page, + options=[ + "-j", "4", "--incremental-mode=checksum", + "--recovery-target=immediate", + "--recovery-target-action=promote"]) self.node.slow_start() + self.assertEqual( page_result_1, self.node.safe_psql("postgres", "SELECT * FROM t_heap_1"), From f324b081809b2f27c28f66b1cf2f878f55131c25 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Thu, 11 Feb 2021 20:27:47 +0300 Subject: [PATCH 049/525] tests: more fixes for "cfs_backup" module --- tests/cfs_backup.py | 8 ++++---- tests/helpers/cfs_helpers.py | 4 +++- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/tests/cfs_backup.py b/tests/cfs_backup.py index b7ebff0fe..2e686d46c 100644 --- a/tests/cfs_backup.py +++ b/tests/cfs_backup.py @@ -482,7 +482,7 @@ def test_fullbackup_empty_tablespace_page_after_create_table_stream(self): ) # --- Section: Incremental from fill tablespace --- # - @unittest.expectedFailure + # @unittest.expectedFailure # @unittest.skip("skip") @unittest.skipUnless(ProbackupTest.enterprise, 'skip') def test_fullbackup_after_create_table_ptrack_after_create_table(self): @@ -546,7 +546,7 @@ def test_fullbackup_after_create_table_ptrack_after_create_table(self): ) ) - @unittest.expectedFailure + # @unittest.expectedFailure # @unittest.skip("skip") @unittest.skipUnless(ProbackupTest.enterprise, 'skip') def test_fullbackup_after_create_table_ptrack_after_create_table_stream(self): @@ -612,7 +612,7 @@ def test_fullbackup_after_create_table_ptrack_after_create_table_stream(self): ) ) - @unittest.expectedFailure + # @unittest.expectedFailure # @unittest.skip("skip") @unittest.skipUnless(ProbackupTest.enterprise, 'skip') def test_fullbackup_after_create_table_page_after_create_table(self): @@ -917,7 +917,7 @@ def test_multiple_segments_in_multiple_tablespaces(self): self.node.safe_psql("postgres", "SELECT * FROM t_heap_2"), 'Lost data after restore') - @unittest.expectedFailure + # @unittest.expectedFailure # @unittest.skip("skip") @unittest.skipUnless(ProbackupTest.enterprise, 'skip') def test_fullbackup_after_create_table_page_after_create_table_stream(self): diff --git a/tests/helpers/cfs_helpers.py b/tests/helpers/cfs_helpers.py index 67e2b331b..31af76f2e 100644 --- a/tests/helpers/cfs_helpers.py +++ b/tests/helpers/cfs_helpers.py @@ -88,4 +88,6 @@ def corrupt_file(filename): def random_string(n): a = string.ascii_letters + string.digits - return ''.join([random.choice(a) for i in range(int(n)+1)]) \ No newline at end of file + random_str = ''.join([random.choice(a) for i in range(int(n)+1)]) + return str.encode(random_str) +# return ''.join([random.choice(a) for i in range(int(n)+1)]) From 31a8ea3ff04c226a12ed93db3aa82b4fd959367b Mon Sep 17 00:00:00 2001 From: anastasia Date: Fri, 12 Feb 2021 13:08:12 +0300 Subject: [PATCH 050/525] tests: Skip test_incr_lsn_long_xact_1, when testing with PostgresPro Enterprise --- tests/incr_restore.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/incr_restore.py b/tests/incr_restore.py index 1d3a52df8..a228bdd79 100644 --- a/tests/incr_restore.py +++ b/tests/incr_restore.py @@ -1711,6 +1711,9 @@ def test_incr_checksum_long_xact(self): # @unittest.skip("skip") # @unittest.expectedFailure + # This test will pass with Enterprise + # because it has checksums enabled by default + @unittest.skipIf(ProbackupTest.enterprise, 'skip') def test_incr_lsn_long_xact_1(self): """ """ From 8691e08e220804248dd1c241279a98fb7f050d50 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Fri, 12 Feb 2021 15:32:51 +0300 Subject: [PATCH 051/525] tests: some minor fixes for "page" module --- tests/page.py | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/tests/page.py b/tests/page.py index 323c0a6de..c0dbb69f2 100644 --- a/tests/page.py +++ b/tests/page.py @@ -757,8 +757,6 @@ def test_page_backup_with_lost_wal_segment(self): self.output, self.cmd)) except ProbackupException as e: self.assertTrue( - 'INFO: Wait for WAL segment' in e.message and - 'to be archived' in e.message and 'Could not read WAL record at' in e.message and 'is absent' in e.message, '\n Unexpected Error Message: {0}\n CMD: {1}'.format( @@ -782,8 +780,6 @@ def test_page_backup_with_lost_wal_segment(self): self.output, self.cmd)) except ProbackupException as e: self.assertTrue( - 'INFO: Wait for WAL segment' in e.message and - 'to be archived' in e.message and 'Could not read WAL record at' in e.message and 'is absent' in e.message, '\n Unexpected Error Message: {0}\n CMD: {1}'.format( @@ -872,8 +868,6 @@ def test_page_backup_with_corrupted_wal_segment(self): self.output, self.cmd)) except ProbackupException as e: self.assertTrue( - 'INFO: Wait for WAL segment' in e.message and - 'to be archived' in e.message and 'Could not read WAL record at' in e.message and 'Possible WAL corruption. Error has occured during reading WAL segment' in e.message, '\n Unexpected Error Message: {0}\n CMD: {1}'.format( @@ -896,8 +890,6 @@ def test_page_backup_with_corrupted_wal_segment(self): self.output, self.cmd)) except ProbackupException as e: self.assertTrue( - 'INFO: Wait for WAL segment' in e.message and - 'to be archived' in e.message and 'Could not read WAL record at' in e.message and 'Possible WAL corruption. Error has occured during reading WAL segment "{0}"'.format( file) in e.message, @@ -997,8 +989,6 @@ def test_page_backup_with_alien_wal_segment(self): self.output, self.cmd)) except ProbackupException as e: self.assertTrue( - 'INFO: Wait for WAL segment' in e.message and - 'to be archived' in e.message and 'Could not read WAL record at' in e.message and 'Possible WAL corruption. Error has occured during reading WAL segment' in e.message, '\n Unexpected Error Message: {0}\n CMD: {1}'.format( @@ -1020,8 +1010,6 @@ def test_page_backup_with_alien_wal_segment(self): "Output: {0} \n CMD: {1}".format( self.output, self.cmd)) except ProbackupException as e: - self.assertIn('INFO: Wait for WAL segment', e.message) - self.assertIn('to be archived', e.message) self.assertIn('Could not read WAL record at', e.message) self.assertIn('WAL file is from different database system: ' 'WAL file database system identifier is', e.message) From 212fad4f931e1269d0eb290e33c5a57435ff9582 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Fri, 12 Feb 2021 16:48:51 +0300 Subject: [PATCH 052/525] Version 2.4.10 --- src/pg_probackup.h | 2 +- tests/expected/option_version.out | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/pg_probackup.h b/src/pg_probackup.h index c15e6efd8..1e001bd05 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -306,7 +306,7 @@ typedef enum ShowFormat #define BYTES_INVALID (-1) /* file didn`t changed since previous backup, DELTA backup do not rely on it */ #define FILE_NOT_FOUND (-2) /* file disappeared during backup */ #define BLOCKNUM_INVALID (-1) -#define PROGRAM_VERSION "2.4.9" +#define PROGRAM_VERSION "2.4.10" /* update when remote agent API or behaviour changes */ #define AGENT_PROTOCOL_VERSION 20409 diff --git a/tests/expected/option_version.out b/tests/expected/option_version.out index 1481e32db..b5204e46e 100644 --- a/tests/expected/option_version.out +++ b/tests/expected/option_version.out @@ -1 +1 @@ -pg_probackup 2.4.9 \ No newline at end of file +pg_probackup 2.4.10 \ No newline at end of file From 8e930b79abc2f2a0440eb5e1528fe562f2a971c9 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Sat, 13 Feb 2021 04:03:04 +0300 Subject: [PATCH 053/525] make compiler happy --- src/catalog.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/catalog.c b/src/catalog.c index b9d384b69..423e83f7b 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -533,7 +533,7 @@ wait_shared_owners(pgBackup *backup) { FILE *fp = NULL; char buffer[256]; - pid_t encoded_pid; + pid_t encoded_pid = 0; int ntries = LOCK_TIMEOUT; char lock_file[MAXPGPATH]; From b11bd8eb74b829509dc963aa674fedf4148a3fe9 Mon Sep 17 00:00:00 2001 From: "a.kozhemyakin" Date: Sun, 14 Feb 2021 10:34:49 +0300 Subject: [PATCH 054/525] fix: clean node data after test --- tests/helpers/ptrack_helpers.py | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index 833e95a36..9af7cbc1c 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -145,6 +145,7 @@ def slow_start(self, replica=False): class ProbackupTest(object): # Class attributes enterprise = is_enterprise() + nodes = [] def __init__(self, *args, **kwargs): super(ProbackupTest, self).__init__(*args, **kwargs) @@ -402,7 +403,7 @@ def make_simple_node( if node.major_version >= 13: self.set_auto_conf( node, {}, 'postgresql.conf', ['wal_keep_segments']) - + self.nodes.append(node) return node def create_tblspace_in_node(self, node, tblspc_name, tblspc_path=None, cfs=False): @@ -1521,8 +1522,17 @@ def del_test_dir(self, module_name, fname, nodes=[]): except: pass - for node in nodes: - node.stop() + try: + if not nodes: + for node in list(self.nodes): + node.stop() + self.nodes.remove(node) + else: + for node in list(nodes): + node.stop() + self.nodes.remove(node) + except: + pass shutil.rmtree( os.path.join( @@ -1533,7 +1543,7 @@ def del_test_dir(self, module_name, fname, nodes=[]): ignore_errors=True ) try: - os.rmdir(os.path.join(self.tmp_path, module_name)) + shutil.rmtree(os.path.join(self.tmp_path, module_name), ignore_errors=True) except: pass From 6ff3633d1686a0e5a87d2989a756cdaba12644d9 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Sun, 14 Feb 2021 23:45:54 +0300 Subject: [PATCH 055/525] bump year in license --- LICENSE | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LICENSE b/LICENSE index dc4e8b8d5..0ba831507 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2015-2019, Postgres Professional +Copyright (c) 2015-2020, Postgres Professional Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group From 86b71f934d9a951a2a3d14842705307dd01528b7 Mon Sep 17 00:00:00 2001 From: "a.kozhemyakin" Date: Mon, 15 Feb 2021 06:32:28 +0300 Subject: [PATCH 056/525] clear data nodes after run test --- tests/helpers/ptrack_helpers.py | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index 833e95a36..623eac708 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -145,6 +145,7 @@ def slow_start(self, replica=False): class ProbackupTest(object): # Class attributes enterprise = is_enterprise() + nodes = [] def __init__(self, *args, **kwargs): super(ProbackupTest, self).__init__(*args, **kwargs) @@ -402,7 +403,7 @@ def make_simple_node( if node.major_version >= 13: self.set_auto_conf( node, {}, 'postgresql.conf', ['wal_keep_segments']) - + self.nodes.append(node) return node def create_tblspace_in_node(self, node, tblspc_name, tblspc_path=None, cfs=False): @@ -1521,8 +1522,16 @@ def del_test_dir(self, module_name, fname, nodes=[]): except: pass - for node in nodes: - node.stop() + if not nodes: + nodes = self.nodes + for node in list(nodes): + try: + if node.status() == 0: + node.stop() + except: + pass + if node in self.nodes: + self.nodes.remove(node) shutil.rmtree( os.path.join( From fb5debda8ab7a9d51feb30b226e1c9f43f431f7b Mon Sep 17 00:00:00 2001 From: "a.kozhemyakin" Date: Mon, 15 Feb 2021 10:24:04 +0300 Subject: [PATCH 057/525] fix for test test_pgpro434_4 (not zero code exit) --- tests/helpers/ptrack_helpers.py | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index 623eac708..e8374c981 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -1528,23 +1528,20 @@ def del_test_dir(self, module_name, fname, nodes=[]): try: if node.status() == 0: node.stop() - except: - pass - if node in self.nodes: - self.nodes.remove(node) + except Exception as e: + print("Error stop node ", e) + raise + finally: + if node in self.nodes: + self.nodes.remove(node) shutil.rmtree( os.path.join( self.tmp_path, - module_name, - fname + module_name ), ignore_errors=True ) - try: - os.rmdir(os.path.join(self.tmp_path, module_name)) - except: - pass def pgdata_content(self, pgdata, ignore_ptrack=True, exclude_dirs=None): """ return dict with directory content. " From 5ea3615711e526102f67586cc150c1eb361e9a9a Mon Sep 17 00:00:00 2001 From: "a.kozhemyakin" Date: Mon, 15 Feb 2021 10:32:32 +0300 Subject: [PATCH 058/525] fix --- tests/helpers/ptrack_helpers.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index e8374c981..0f8cd2130 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -1528,8 +1528,7 @@ def del_test_dir(self, module_name, fname, nodes=[]): try: if node.status() == 0: node.stop() - except Exception as e: - print("Error stop node ", e) + except: raise finally: if node in self.nodes: From 4261d5eba46648943172bb273544618b286dcaa6 Mon Sep 17 00:00:00 2001 From: "a.kozhemyakin" Date: Mon, 15 Feb 2021 10:49:16 +0300 Subject: [PATCH 059/525] fix --- tests/helpers/ptrack_helpers.py | 7 ------- 1 file changed, 7 deletions(-) diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index b03467c02..0f8cd2130 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -1541,13 +1541,6 @@ def del_test_dir(self, module_name, fname, nodes=[]): ), ignore_errors=True ) -<<<<<<< HEAD -======= - try: - shutil.rmtree(os.path.join(self.tmp_path, module_name), ignore_errors=True) - except: - pass ->>>>>>> 7f1d12b457f54ddebc0b6f65a1124e7f75dd50ae def pgdata_content(self, pgdata, ignore_ptrack=True, exclude_dirs=None): """ return dict with directory content. " From 294e7dc2056cf4bfc188b8604d678f1295a352a7 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Mon, 15 Feb 2021 14:53:52 +0300 Subject: [PATCH 060/525] [Issue #326] added tests.archive.ArchiveTest.test_archive_empty_history_file --- tests/archive.py | 90 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 90 insertions(+) diff --git a/tests/archive.py b/tests/archive.py index 05675065a..e651ee77b 100644 --- a/tests/archive.py +++ b/tests/archive.py @@ -2474,6 +2474,96 @@ def test_archive_show_partial_files_handling(self): # Clean after yourself self.del_test_dir(module_name, fname) + # @unittest.expectedFailure + # @unittest.skip("skip") + def test_archive_empty_history_file(self): + """ + https://github.com/postgrespro/pg_probackup/issues/326 + """ + fname = self.id().split('.')[3] + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'archive_timeout': '30s', + 'checkpoint_timeout': '30s', + 'autovacuum': 'off'}) + + if self.get_version(node) < self.version_to_num('9.6.0'): + self.del_test_dir(module_name, fname) + return unittest.skip( + 'Skipped because backup from replica is not supported in PG 9.5') + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + + node.slow_start() + + node.pgbench_init(scale=5) + + # FULL + self.backup_node(backup_dir, 'node', node) + + node.pgbench_init(scale=5) + node.cleanup() + + self.restore_node( + backup_dir, 'node', node, + options=[ + '--recovery-target=latest', + '--recovery-target-action=promote']) + + # '--recovery-target-timeline=2', + # Node in timeline 2 + node.slow_start() + + node.pgbench_init(scale=5) + node.cleanup() + + self.restore_node( + backup_dir, 'node', node, + options=[ + '--recovery-target=latest', + '--recovery-target-timeline=2', + '--recovery-target-action=promote']) + + # Node in timeline 3 + node.slow_start() + + node.pgbench_init(scale=5) + node.cleanup() + + self.restore_node( + backup_dir, 'node', node, + options=[ + '--recovery-target=latest', + '--recovery-target-timeline=3', + '--recovery-target-action=promote']) + + # Node in timeline 4 + node.slow_start() + node.pgbench_init(scale=1) + + # Truncate history files + for tli in range(2, 4): + file = os.path.join( + backup_dir, 'wal', 'node', '0000000{0}.history'.format(tli)) + with open(file, "w+") as f: + f.truncate() + + show = self.show_archive(backup_dir, 'node') + + timelines = show['timelines'] + + # check that all timelines are ok + for timeline in replica_timelines: + print(timeline) + + self.del_test_dir(module_name, fname) + # TODO test with multiple not archived segments. # TODO corrupted file in archive. From 74cd9c54dfc79179f95350c14fcbe6b1f268e478 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Mon, 15 Feb 2021 15:49:30 +0300 Subject: [PATCH 061/525] [Issue #326] Handle empty history files correctly --- src/catalog.c | 4 ++++ src/restore.c | 11 +++++++++++ 2 files changed, 15 insertions(+) diff --git a/src/catalog.c b/src/catalog.c index 423e83f7b..15386d426 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -1581,6 +1581,10 @@ catalog_get_timelines(InstanceConfig *instance) sscanf(file->name, "%08X.history", &tli); timelines = read_timeline_history(arclog_path, tli, true); + /* History file is empty or corrupted, disregard it */ + if (!timelines) + continue; + if (!tlinfo || tlinfo->tli != tli) { tlinfo = timelineInfoNew(tli); diff --git a/src/restore.c b/src/restore.c index adc20bb6c..f28265ccd 100644 --- a/src/restore.c +++ b/src/restore.c @@ -289,6 +289,9 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt, /* Read timeline history files from archives */ timelines = read_timeline_history(arclog_path, rt->target_tli, true); + if (!timelines) + elog(WARNING, "Failed to get history file for target timeline %i", rt->target_tli); + if (!satisfy_timeline(timelines, current_backup)) { if (target_backup_id != INVALID_BACKUP_ID) @@ -1778,6 +1781,14 @@ read_timeline_history(const char *arclog_path, TimeLineID targetTLI, bool strict if (last_timeline && targetTLI <= last_timeline->tli) elog(ERROR, "Timeline IDs must be less than child timeline's ID."); + /* History file is empty or corrupted */ + if (parray_num(result) != 1) + { + elog(WARNING, "History file is corrupted: \"%s\"", path); + pg_free(result); + return NULL; + } + /* append target timeline */ entry = pgut_new(TimeLineHistoryEntry); entry->tli = targetTLI; From 4ab117bd3c0caacef09b68105ec769fb577614a8 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Mon, 15 Feb 2021 16:19:55 +0300 Subject: [PATCH 062/525] [Issue #326] some minor fixes --- src/restore.c | 4 ++-- tests/archive.py | 40 +++++++++++++++++++++------------------- 2 files changed, 23 insertions(+), 21 deletions(-) diff --git a/src/restore.c b/src/restore.c index f28265ccd..1a22a9a28 100644 --- a/src/restore.c +++ b/src/restore.c @@ -290,7 +290,7 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt, timelines = read_timeline_history(arclog_path, rt->target_tli, true); if (!timelines) - elog(WARNING, "Failed to get history file for target timeline %i", rt->target_tli); + elog(ERROR, "Failed to get history file for target timeline %i", rt->target_tli); if (!satisfy_timeline(timelines, current_backup)) { @@ -1782,7 +1782,7 @@ read_timeline_history(const char *arclog_path, TimeLineID targetTLI, bool strict elog(ERROR, "Timeline IDs must be less than child timeline's ID."); /* History file is empty or corrupted */ - if (parray_num(result) != 1) + if (parray_num(result) == 0) { elog(WARNING, "History file is corrupted: \"%s\"", path); pg_free(result); diff --git a/tests/archive.py b/tests/archive.py index e651ee77b..0cf2f703f 100644 --- a/tests/archive.py +++ b/tests/archive.py @@ -2485,23 +2485,13 @@ def test_archive_empty_history_file(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'archive_timeout': '30s', - 'checkpoint_timeout': '30s', - 'autovacuum': 'off'}) - - if self.get_version(node) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') + initdb_params=['--data-checksums']) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) node.slow_start() - node.pgbench_init(scale=5) # FULL @@ -2516,7 +2506,6 @@ def test_archive_empty_history_file(self): '--recovery-target=latest', '--recovery-target-action=promote']) - # '--recovery-target-timeline=2', # Node in timeline 2 node.slow_start() @@ -2545,22 +2534,35 @@ def test_archive_empty_history_file(self): # Node in timeline 4 node.slow_start() - node.pgbench_init(scale=1) + node.pgbench_init(scale=5) # Truncate history files - for tli in range(2, 4): + for tli in range(2, 5): file = os.path.join( backup_dir, 'wal', 'node', '0000000{0}.history'.format(tli)) with open(file, "w+") as f: f.truncate() - show = self.show_archive(backup_dir, 'node') + timelines = self.show_archive(backup_dir, 'node', options=['--log-level-file=INFO']) - timelines = show['timelines'] + # check that all timelines has zero switchpoint + for timeline in timelines: + self.assertEqual(timeline['switchpoint'], '0/0') - # check that all timelines are ok - for timeline in replica_timelines: - print(timeline) + log_file = os.path.join(backup_dir, 'log', 'pg_probackup.log') + with open(log_file, 'r') as f: + log_content = f.read() + wal_dir = os.path.join(backup_dir, 'wal', 'node') + + self.assertIn( + 'WARNING: History file is corrupted: "{0}"'.format(os.path.join(wal_dir, '00000002.history')), + log_content) + self.assertIn( + 'WARNING: History file is corrupted: "{0}"'.format(os.path.join(wal_dir, '00000003.history')), + log_content) + self.assertIn( + 'WARNING: History file is corrupted: "{0}"'.format(os.path.join(wal_dir, '00000004.history')), + log_content) self.del_test_dir(module_name, fname) From 92dfb3a31313a37e6e5e355bdbf51c58d10de914 Mon Sep 17 00:00:00 2001 From: kav23alex Date: Tue, 16 Feb 2021 09:34:01 +0700 Subject: [PATCH 063/525] clean data after run test --- tests/helpers/ptrack_helpers.py | 28 ++++++++++++---------------- 1 file changed, 12 insertions(+), 16 deletions(-) diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index 0f8cd2130..dbd0d7feb 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -1,5 +1,6 @@ # you need os for unittest to work import os +import gc from sys import exit, argv, version_info import subprocess import shutil @@ -1515,29 +1516,24 @@ def get_version(self, node): def get_bin_path(self, binary): return testgres.get_bin_path(binary) + def clean_all(self): + for o in gc.get_referrers(testgres.PostgresNode): + if o.__class__ is testgres.PostgresNode: + o.cleanup() + def del_test_dir(self, module_name, fname, nodes=[]): """ Del testdir and optimistically try to del module dir""" try: - testgres.clean_all() - except: - pass - - if not nodes: - nodes = self.nodes - for node in list(nodes): - try: - if node.status() == 0: - node.stop() - except: - raise - finally: - if node in self.nodes: - self.nodes.remove(node) + self.clean_all() + except Exception as e: + raise e shutil.rmtree( os.path.join( self.tmp_path, - module_name + module_name, + fname, + "backup" ), ignore_errors=True ) From f69d4a1d1878ed419f7152b0c1bd9d0223c488c9 Mon Sep 17 00:00:00 2001 From: kav23alex Date: Tue, 16 Feb 2021 10:12:46 +0700 Subject: [PATCH 064/525] merge From f29ab895b386ad23ccb624fc4f673642b4650323 Mon Sep 17 00:00:00 2001 From: kav23alex Date: Tue, 16 Feb 2021 10:15:27 +0700 Subject: [PATCH 065/525] remove self.nodes --- tests/helpers/ptrack_helpers.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index dbd0d7feb..028e9cd16 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -146,7 +146,6 @@ def slow_start(self, replica=False): class ProbackupTest(object): # Class attributes enterprise = is_enterprise() - nodes = [] def __init__(self, *args, **kwargs): super(ProbackupTest, self).__init__(*args, **kwargs) @@ -404,7 +403,6 @@ def make_simple_node( if node.major_version >= 13: self.set_auto_conf( node, {}, 'postgresql.conf', ['wal_keep_segments']) - self.nodes.append(node) return node def create_tblspace_in_node(self, node, tblspc_name, tblspc_path=None, cfs=False): From a59631646e9edb1c9a6a8e64f254a216119073be Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Tue, 16 Feb 2021 12:03:16 +0300 Subject: [PATCH 066/525] [Issue #310] Detect timeline switch via repprotocol --- src/backup.c | 6 +- src/catalog.c | 5 +- src/pg_probackup.h | 3 + src/stream.c | 203 ++++++++++++++++++++++++++++++++++++++++++++- src/utils/pgut.c | 5 +- src/utils/pgut.h | 4 +- 6 files changed, 217 insertions(+), 9 deletions(-) diff --git a/src/backup.c b/src/backup.c index ca8baa777..62bc2bbab 100644 --- a/src/backup.c +++ b/src/backup.c @@ -165,8 +165,10 @@ do_backup_instance(PGconn *backup_conn, PGNodeInfo *nodeInfo, bool no_sync, bool "trying to look up on previous timelines", current.tli); - /* TODO: use read_timeline_history */ - tli_list = catalog_get_timelines(&instance_config); + tli_list = get_history_streaming(&instance_config.conn_opt, current.tli, backup_list); + if (!tli_list) + /* fallback to using archive */ + tli_list = catalog_get_timelines(&instance_config); if (parray_num(tli_list) == 0) elog(WARNING, "Cannot find valid backup on previous timelines, " diff --git a/src/catalog.c b/src/catalog.c index 15386d426..b0051f699 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -48,7 +48,7 @@ typedef struct LockInfo bool exclusive; } LockInfo; -static timelineInfo * +timelineInfo * timelineInfoNew(TimeLineID tli) { timelineInfo *tlinfo = (timelineInfo *) pgut_malloc(sizeof(timelineInfo)); @@ -74,7 +74,8 @@ timelineInfoFree(void *tliInfo) if (tli->backups) { - parray_walk(tli->backups, pgBackupFree); + /* backups themselves should freed separately */ +// parray_walk(tli->backups, pgBackupFree); parray_free(tli->backups); } diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 1e001bd05..e5f05338b 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -891,6 +891,8 @@ extern int validate_one_page(Page page, BlockNumber absolute_blkno, uint32 checksum_version); extern bool validate_tablespace_map(pgBackup *backup); +extern parray* get_history_streaming(ConnectionOptions *conn_opt, TimeLineID tli, parray *backup_list); + /* return codes for validate_one_page */ /* TODO: use enum */ #define PAGE_IS_VALID (-1) @@ -920,6 +922,7 @@ extern pgBackup *catalog_get_last_data_backup(parray *backup_list, extern pgBackup *get_multi_timeline_parent(parray *backup_list, parray *tli_list, TimeLineID current_tli, time_t current_start_time, InstanceConfig *instance); +extern timelineInfo *timelineInfoNew(TimeLineID tli); extern void timelineInfoFree(void *tliInfo); extern parray *catalog_get_timelines(InstanceConfig *instance); extern void do_set_backup(const char *instance_name, time_t backup_id, diff --git a/src/stream.c b/src/stream.c index 825aa0e7d..21204ae2c 100644 --- a/src/stream.c +++ b/src/stream.c @@ -10,6 +10,7 @@ #include "pg_probackup.h" #include "receivelog.h" #include "streamutil.h" +#include "access/timeline.h" #include #include @@ -69,6 +70,7 @@ static void add_walsegment_to_filelist(parray *filelist, uint32 timeline, uint32 xlog_seg_size); static void add_history_file_to_filelist(parray *filelist, uint32 timeline, char *basedir); +static parray* parse_tli_history_buffer(char *history, TimeLineID tli); /* * Run IDENTIFY_SYSTEM through a given connection and @@ -353,6 +355,204 @@ stop_streaming(XLogRecPtr xlogpos, uint32 timeline, bool segment_finished) /* --- External API --- */ +/* + * Maybe add a StreamOptions struct ? + * Backup conn only needed to calculate stream_stop_timeout. Think about refactoring it. + */ +parray* +get_history_streaming(ConnectionOptions *conn_opt, TimeLineID tli, parray *backup_list) +{ + PGresult *res; + PGconn *conn; + char *history; + char query[128]; + parray *result = NULL; + parray *tli_list = NULL; + timelineInfo *tlinfo = NULL; + int i,j; + + snprintf(query, sizeof(query), "TIMELINE_HISTORY %u", tli); + + /* + * Connect in replication mode to the server. + */ + conn = pgut_connect_replication(conn_opt->pghost, + conn_opt->pgport, + conn_opt->pgdatabase, + conn_opt->pguser, + false); + + if (!conn) + return NULL; + + res = PQexec(conn, query); + PQfinish(conn); + + if (PQresultStatus(res) != PGRES_TUPLES_OK) + { + elog(WARNING, "Could not send replication command \"%s\": %s", + query, PQresultErrorMessage(res)); + PQclear(res); + return NULL; + } + + /* + * The response to TIMELINE_HISTORY is a single row result set + * with two fields: filename and content + */ + + if (PQnfields(res) != 2 || PQntuples(res) != 1) + { + elog(WARNING, "Unexpected response to TIMELINE_HISTORY command: " + "got %d rows and %d fields, expected %d rows and %d fields", + PQntuples(res), PQnfields(res), 1, 2); + PQclear(res); + return NULL; + } + + history = pgut_strdup(PQgetvalue(res, 0, 1)); + result = parse_tli_history_buffer(history, tli); + + /* some cleanup */ + pg_free(history); + PQclear(res); + + if (result) + tlinfo = timelineInfoNew(tli); + else + return NULL; + + /* transform TimeLineHistoryEntry into timelineInfo */ + for (i = parray_num(result) -1; i >= 0; i--) + { + TimeLineHistoryEntry *tln = (TimeLineHistoryEntry *) parray_get(result, i); + + tlinfo->parent_tli = tln->tli; + tlinfo->switchpoint = tln->end; + + if (!tli_list) + tli_list = parray_new(); + + parray_append(tli_list, tlinfo); + + /* Next tli */ + tlinfo = timelineInfoNew(tln->tli); + + /* oldest tli */ + if (i == 0) + { + tlinfo->tli = tln->tli; + tlinfo->parent_tli = 0; + tlinfo->switchpoint = 0; + parray_append(tli_list, tlinfo); + } + } + + /* link parent to child */ + for (i = 0; i < parray_num(tli_list); i++) + { + timelineInfo *tlinfo = (timelineInfo *) parray_get(tli_list, i); + + for (j = 0; j < parray_num(tli_list); j++) + { + timelineInfo *tlinfo_parent = (timelineInfo *) parray_get(tli_list, j); + + if (tlinfo->parent_tli == tlinfo_parent->tli) + { + tlinfo->parent_link = tlinfo_parent; + break; + } + } + } + + /* add backups to each timeline info */ + for (i = 0; i < parray_num(tli_list); i++) + { + timelineInfo *tlinfo = parray_get(tli_list, i); + for (j = 0; j < parray_num(backup_list); j++) + { + pgBackup *backup = parray_get(backup_list, j); + if (tlinfo->tli == backup->tli) + { + if (tlinfo->backups == NULL) + tlinfo->backups = parray_new(); + parray_append(tlinfo->backups, backup); + } + } + } + + /* cleanup */ + parray_walk(result, pg_free); + pg_free(result); + + return tli_list; +} + +parray* +parse_tli_history_buffer(char *history, TimeLineID tli) +{ + char *curLine = history; + TimeLineHistoryEntry *entry; + TimeLineHistoryEntry *last_timeline = NULL; + parray *result = NULL; + + /* Parse timeline history buffer string by string */ + while (curLine) + { + char tempStr[1024]; + char *nextLine = strchr(curLine, '\n'); + int curLineLen = nextLine ? (nextLine-curLine) : strlen(curLine); + + memcpy(tempStr, curLine, curLineLen); + tempStr[curLineLen] = '\0'; // NUL-terminate! + curLine = nextLine ? (nextLine+1) : NULL; + + if (curLineLen > 0) + { + char *ptr; + TimeLineID tli; + uint32 switchpoint_hi; + uint32 switchpoint_lo; + int nfields; + + for (ptr = tempStr; *ptr; ptr++) + { + if (!isspace((unsigned char) *ptr)) + break; + } + if (*ptr == '\0' || *ptr == '#') + continue; + + nfields = sscanf(tempStr, "%u\t%X/%X", &tli, &switchpoint_hi, &switchpoint_lo); + + if (nfields < 1) + { + /* expect a numeric timeline ID as first field of line */ + elog(ERROR, "Syntax error in timeline history: \"%s\". Expected a numeric timeline ID.", tempStr); + } + if (nfields != 3) + elog(ERROR, "Syntax error in timeline history: \"%s\". Expected a transaction log switchpoint location.", tempStr); + + if (last_timeline && tli <= last_timeline->tli) + elog(ERROR, "Timeline IDs must be in increasing sequence: \"%s\"", tempStr); + + entry = pgut_new(TimeLineHistoryEntry); + entry->tli = tli; + entry->end = ((uint64) switchpoint_hi << 32) | switchpoint_lo; + + last_timeline = entry; + /* Build list with newest item first */ + if (!result) + result = parray_new(); + parray_append(result, entry); + + /* we ignore the remainder of each line */ + } + } + + return result; +} + /* * Maybe add a StreamOptions struct ? * Backup conn only needed to calculate stream_stop_timeout. Think about refactoring it. @@ -374,7 +574,8 @@ start_WAL_streaming(PGconn *backup_conn, char *stream_dst_path, ConnectionOption stream_thread_arg.conn = pgut_connect_replication(conn_opt->pghost, conn_opt->pgport, conn_opt->pgdatabase, - conn_opt->pguser); + conn_opt->pguser, + true); /* sanity check*/ IdentifySystem(&stream_thread_arg); diff --git a/src/utils/pgut.c b/src/utils/pgut.c index ef3472e26..a1631b106 100644 --- a/src/utils/pgut.c +++ b/src/utils/pgut.c @@ -270,7 +270,8 @@ pgut_connect(const char *host, const char *port, PGconn * pgut_connect_replication(const char *host, const char *port, - const char *dbname, const char *username) + const char *dbname, const char *username, + bool strict) { PGconn *tmpconn; int argcount = 7; /* dbname, replication, fallback_app_name, @@ -356,7 +357,7 @@ pgut_connect_replication(const char *host, const char *port, continue; } - elog(ERROR, "could not connect to database %s: %s", + elog(strict ? ERROR : WARNING, "could not connect to database %s: %s", dbname, PQerrorMessage(tmpconn)); PQfinish(tmpconn); free(values); diff --git a/src/utils/pgut.h b/src/utils/pgut.h index d196aad3d..e6ccbf211 100644 --- a/src/utils/pgut.h +++ b/src/utils/pgut.h @@ -40,8 +40,8 @@ extern char *pgut_get_conninfo_string(PGconn *conn); extern PGconn *pgut_connect(const char *host, const char *port, const char *dbname, const char *username); extern PGconn *pgut_connect_replication(const char *host, const char *port, - const char *dbname, - const char *username); + const char *dbname, const char *username, + bool strict); extern void pgut_disconnect(PGconn *conn); extern void pgut_disconnect_callback(bool fatal, void *userdata); extern PGresult *pgut_execute(PGconn* conn, const char *query, int nParams, From 04bab3e92702f7e7dc8a0cbfd2aea3a1564ffbe7 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Wed, 17 Feb 2021 12:50:38 +0300 Subject: [PATCH 067/525] [Issue #328] added tests.retention.RetentionTest.test_concurrent_running_backup --- tests/retention.py | 39 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/tests/retention.py b/tests/retention.py index 6dc8536ca..0122913d3 100644 --- a/tests/retention.py +++ b/tests/retention.py @@ -2536,3 +2536,42 @@ def test_basic_wal_depth(self): self.validate_pb(backup_dir, 'node') self.del_test_dir(module_name, fname, [node]) + + def test_concurrent_running_full_backup(self): + """ + https://github.com/postgrespro/pg_probackup/issues/328 + """ + fname = self.id().split('.')[3] + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # FULL + self.backup_node(backup_dir, 'node', node) + + gdb = self.backup_node(backup_dir, 'node', node, gdb=True) + gdb.set_breakpoint('backup_data_file') + gdb.run_until_break() + gdb.kill() + + self.assertTrue( + self.show_pb(backup_dir, 'node')[0]['status'], + 'RUNNING') + + print(self.backup_node( + backup_dir, 'node', node, backup_type='delta', + options=['--retention-redundancy=2', '--delete-expired', '--log-level-console=VERBOSE'], + return_id=False)) + + self.assertTrue( + self.show_pb(backup_dir, 'node')[1]['status'], + 'RUNNING') + + self.del_test_dir(module_name, fname, [node]) From 316f0d267bd2e79d88303b1f6a14d82a22793193 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Wed, 17 Feb 2021 12:52:35 +0300 Subject: [PATCH 068/525] [Issue #328] Do not delete invalid full backups within retention redundancy range --- src/delete.c | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/src/delete.c b/src/delete.c index c2b935d67..ec51374b0 100644 --- a/src/delete.c +++ b/src/delete.c @@ -227,13 +227,14 @@ do_retention_internal(parray *backup_list, parray *to_keep_list, parray *to_purg { pgBackup *backup = (pgBackup *) parray_get(backup_list, i); - /* Consider only valid FULL backups for Redundancy */ - if (instance_config.retention_redundancy > 0 && - backup->backup_mode == BACKUP_MODE_FULL && - (backup->status == BACKUP_STATUS_OK || - backup->status == BACKUP_STATUS_DONE)) + if (backup->backup_mode == BACKUP_MODE_FULL) { - n_full_backups++; + /* Consider only valid FULL backups for Redundancy fulfillment */ + if (backup->status == BACKUP_STATUS_OK || + backup->status == BACKUP_STATUS_DONE) + { + n_full_backups++; + } /* Add every FULL backup that satisfy Redundancy policy to separate list */ if (n_full_backups <= instance_config.retention_redundancy) @@ -413,7 +414,10 @@ do_retention_internal(parray *backup_list, parray *to_keep_list, parray *to_purg pinning_window ? pinning_window : instance_config.retention_window, action); - if (backup->backup_mode == BACKUP_MODE_FULL) + /* Only valid full backups are count to something */ + if (backup->backup_mode == BACKUP_MODE_FULL && + (backup->status == BACKUP_STATUS_OK || + backup->status == BACKUP_STATUS_DONE)) cur_full_backup_num++; } } From 00ce713c021eea4ed8c5f74563429555ef42d028 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Wed, 17 Feb 2021 12:55:18 +0300 Subject: [PATCH 069/525] Do no report meaningless timestamp when deleting backup without valid "recovery-time" attribute. Reported by Roman Zharkov --- src/delete.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/delete.c b/src/delete.c index ec51374b0..625d92ab7 100644 --- a/src/delete.c +++ b/src/delete.c @@ -745,7 +745,10 @@ delete_backup_files(pgBackup *backup) return; } - time2iso(timestamp, lengthof(timestamp), backup->recovery_time, false); + if (backup->recovery_time) + time2iso(timestamp, lengthof(timestamp), backup->recovery_time, false); + else + time2iso(timestamp, lengthof(timestamp), backup->start_time, false); elog(INFO, "Delete: %s %s", base36enc(backup->start_time), timestamp); From be4528caf1a33377050a349037f332fb82e2da2b Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Wed, 17 Feb 2021 12:56:23 +0300 Subject: [PATCH 070/525] [Issue #326] add special handling for Timeline 1 --- src/restore.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/restore.c b/src/restore.c index 1a22a9a28..8d573286a 100644 --- a/src/restore.c +++ b/src/restore.c @@ -1782,9 +1782,9 @@ read_timeline_history(const char *arclog_path, TimeLineID targetTLI, bool strict elog(ERROR, "Timeline IDs must be less than child timeline's ID."); /* History file is empty or corrupted */ - if (parray_num(result) == 0) + if (parray_num(result) == 0 && targetTLI != 1) { - elog(WARNING, "History file is corrupted: \"%s\"", path); + elog(WARNING, "History file is corrupted or missing: \"%s\"", path); pg_free(result); return NULL; } From 42aa2f39ff0c7b14a2f4649d2607fb4b55681b2d Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Wed, 17 Feb 2021 14:50:34 +0300 Subject: [PATCH 071/525] [Issue #328] minor refactoring --- src/delete.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/delete.c b/src/delete.c index 625d92ab7..7458f0100 100644 --- a/src/delete.c +++ b/src/delete.c @@ -229,21 +229,21 @@ do_retention_internal(parray *backup_list, parray *to_keep_list, parray *to_purg if (backup->backup_mode == BACKUP_MODE_FULL) { - /* Consider only valid FULL backups for Redundancy fulfillment */ - if (backup->status == BACKUP_STATUS_OK || - backup->status == BACKUP_STATUS_DONE) - { - n_full_backups++; - } - /* Add every FULL backup that satisfy Redundancy policy to separate list */ - if (n_full_backups <= instance_config.retention_redundancy) + if (n_full_backups < instance_config.retention_redundancy) { if (!redundancy_full_backup_list) redundancy_full_backup_list = parray_new(); parray_append(redundancy_full_backup_list, backup); } + + /* Consider only valid FULL backups for Redundancy fulfillment */ + if (backup->status == BACKUP_STATUS_OK || + backup->status == BACKUP_STATUS_DONE) + { + n_full_backups++; + } } } /* Sort list of full backups to keep */ From be3be870b938b79cba63743f5519d7ae1abac48d Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Wed, 17 Feb 2021 14:50:49 +0300 Subject: [PATCH 072/525] [Issue #328] improve test coverage --- tests/retention.py | 42 +++++++++++++++++++++++++++++++++++++++++- 1 file changed, 41 insertions(+), 1 deletion(-) diff --git a/tests/retention.py b/tests/retention.py index 0122913d3..bd1d9b796 100644 --- a/tests/retention.py +++ b/tests/retention.py @@ -2567,11 +2567,51 @@ def test_concurrent_running_full_backup(self): print(self.backup_node( backup_dir, 'node', node, backup_type='delta', - options=['--retention-redundancy=2', '--delete-expired', '--log-level-console=VERBOSE'], + options=['--retention-redundancy=2', '--delete-expired'], return_id=False)) self.assertTrue( self.show_pb(backup_dir, 'node')[1]['status'], 'RUNNING') + self.backup_node(backup_dir, 'node', node) + + gdb = self.backup_node(backup_dir, 'node', node, gdb=True) + gdb.set_breakpoint('backup_data_file') + gdb.run_until_break() + gdb.kill() + + gdb = self.backup_node(backup_dir, 'node', node, gdb=True) + gdb.set_breakpoint('backup_data_file') + gdb.run_until_break() + gdb.kill() + + self.backup_node(backup_dir, 'node', node) + + gdb = self.backup_node(backup_dir, 'node', node, gdb=True) + gdb.set_breakpoint('backup_data_file') + gdb.run_until_break() + gdb.kill() + + out = self.backup_node( + backup_dir, 'node', node, backup_type='delta', + options=['--retention-redundancy=2', '--delete-expired'], + return_id=False) + + self.assertTrue( + self.show_pb(backup_dir, 'node')[0]['status'], + 'OK') + + self.assertTrue( + self.show_pb(backup_dir, 'node')[1]['status'], + 'RUNNING') + + self.assertTrue( + self.show_pb(backup_dir, 'node')[2]['status'], + 'OK') + + self.assertEqual( + len(self.show_pb(backup_dir, 'node')), + 6) + self.del_test_dir(module_name, fname, [node]) From 7b2f46cbc07dac2b3ec0e06f8bb2aa6e8b7bf6f2 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Wed, 17 Feb 2021 14:58:11 +0300 Subject: [PATCH 073/525] tests: minor fixes --- tests/retention.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/tests/retention.py b/tests/retention.py index bd1d9b796..18023751a 100644 --- a/tests/retention.py +++ b/tests/retention.py @@ -2565,10 +2565,9 @@ def test_concurrent_running_full_backup(self): self.show_pb(backup_dir, 'node')[0]['status'], 'RUNNING') - print(self.backup_node( + self.backup_node( backup_dir, 'node', node, backup_type='delta', - options=['--retention-redundancy=2', '--delete-expired'], - return_id=False)) + options=['--retention-redundancy=2', '--delete-expired']) self.assertTrue( self.show_pb(backup_dir, 'node')[1]['status'], @@ -2593,7 +2592,7 @@ def test_concurrent_running_full_backup(self): gdb.run_until_break() gdb.kill() - out = self.backup_node( + self.backup_node( backup_dir, 'node', node, backup_type='delta', options=['--retention-redundancy=2', '--delete-expired'], return_id=False) From 8a3dfd5e67e7f704e9311fb2d9c03f4141e1a35e Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Wed, 17 Feb 2021 16:07:25 +0300 Subject: [PATCH 074/525] merge: honor the --no-fsync and --no-validate flags --- src/pg_probackup.c | 9 +++++++++ src/pg_probackup.h | 4 ++++ src/utils/file.c | 3 +++ src/validate.c | 5 +++++ 4 files changed, 21 insertions(+) diff --git a/src/pg_probackup.c b/src/pg_probackup.c index 90202fa84..ac927965c 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -128,6 +128,10 @@ bool compress_shortcut = false; /* other options */ char *instance_name; +/* TODO: quick hack */ +bool merge_no_validate = false; +bool merge_no_sync = false; + /* archive push options */ int batch_size = 1; static char *wal_file_path; @@ -830,6 +834,9 @@ main(int argc, char *argv[]) case SHOW_CMD: return do_show(instance_name, current.backup_id, show_archive); case DELETE_CMD: + merge_no_validate = no_validate; + merge_no_sync = no_sync; + if (delete_expired && backup_id_string) elog(ERROR, "You cannot specify --delete-expired and (-i, --backup-id) options together"); if (merge_expired && backup_id_string) @@ -850,6 +857,8 @@ main(int argc, char *argv[]) do_delete(current.backup_id); break; case MERGE_CMD: + merge_no_validate = no_validate; + merge_no_sync = no_sync; do_merge(current.backup_id); break; case SHOW_CONFIG_CMD: diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 1e001bd05..d9c0f3c24 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -784,6 +784,10 @@ extern bool compress_shortcut; /* other options */ extern char *instance_name; +/* temp merge options */ +extern bool merge_no_validate; +extern bool merge_no_sync; + /* show options */ extern ShowFormat show_format; diff --git a/src/utils/file.c b/src/utils/file.c index 15a7085ec..26892fedd 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -1169,6 +1169,9 @@ int fio_sync(char const* path, fio_location location) { int fd; + if (merge_no_sync) + return 0; + fd = open(path, O_WRONLY | PG_BINARY, FILE_PERMISSIONS); if (fd < 0) return -1; diff --git a/src/validate.c b/src/validate.c index 21900c8e4..23819f6b4 100644 --- a/src/validate.c +++ b/src/validate.c @@ -129,6 +129,9 @@ pgBackupValidate(pgBackup *backup, pgRestoreParams *params) // dbOid_exclude_list = get_dbOid_exclude_list(backup, files, params->partial_db_list, // params->partial_restore_type); + if (merge_no_validate) + goto skip_validation; + /* setup threads */ for (i = 0; i < parray_num(files); i++) { @@ -180,6 +183,8 @@ pgBackupValidate(pgBackup *backup, pgRestoreParams *params) pfree(threads); pfree(threads_args); +skip_validation: + /* cleanup */ parray_walk(files, pgFileFree); parray_free(files); From 2193cd76fc4b4cdbfe8383a9c3201dc7e08c8e9c Mon Sep 17 00:00:00 2001 From: "a.kozhemyakin" Date: Thu, 18 Feb 2021 03:01:28 +0000 Subject: [PATCH 075/525] clear pgdata and stop node after tests --- tests/helpers/ptrack_helpers.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index 028e9cd16..76eac6920 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -1521,17 +1521,14 @@ def clean_all(self): def del_test_dir(self, module_name, fname, nodes=[]): """ Del testdir and optimistically try to del module dir""" - try: - self.clean_all() - except Exception as e: - raise e + + self.clean_all() shutil.rmtree( os.path.join( self.tmp_path, module_name, - fname, - "backup" + fname ), ignore_errors=True ) From dd8c23c9b12e7831ec3669b2b52e2f4582d76a62 Mon Sep 17 00:00:00 2001 From: "a.kozhemyakin" Date: Thu, 18 Feb 2021 05:13:11 +0000 Subject: [PATCH 076/525] delete nodes in del_test_dir --- tests/archive.py | 5 ++--- tests/backup.py | 10 +++++----- tests/checkdb.py | 2 +- tests/compression.py | 2 +- tests/delta.py | 2 +- tests/helpers/ptrack_helpers.py | 2 +- tests/incr_restore.py | 22 +++++++++++----------- tests/merge.py | 14 +++++++------- tests/page.py | 2 +- tests/ptrack.py | 2 +- tests/replica.py | 2 +- tests/retention.py | 6 +++--- tests/validate.py | 8 ++++---- 13 files changed, 39 insertions(+), 40 deletions(-) diff --git a/tests/archive.py b/tests/archive.py index 05675065a..e76ca5d3f 100644 --- a/tests/archive.py +++ b/tests/archive.py @@ -981,7 +981,7 @@ def test_basic_master_and_replica_concurrent_archiving(self): self.backup_node(backup_dir, 'master', replica) # Clean after yourself - self.del_test_dir(module_name, fname, nodes=[master, replica]) + self.del_test_dir(module_name, fname) # @unittest.expectedFailure # @unittest.skip("skip") @@ -2016,8 +2016,7 @@ def test_archive_pg_receivexlog_partial_handling(self): # Clean after yourself pg_receivexlog.kill() - self.del_test_dir( - module_name, fname, [node, replica, node_restored]) + self.del_test_dir(module_name, fname) @unittest.skip("skip") def test_multi_timeline_recovery_prefetching(self): diff --git a/tests/backup.py b/tests/backup.py index 70c4dc13f..65851d0f3 100644 --- a/tests/backup.py +++ b/tests/backup.py @@ -1065,7 +1065,7 @@ def test_basic_tablespace_handling(self): self.compare_pgdata(pgdata, pgdata_restored) # Clean after yourself - self.del_test_dir(module_name, fname, nodes=[node]) + self.del_test_dir(module_name, fname) # @unittest.skip("skip") def test_tablespace_handling_1(self): @@ -1599,7 +1599,7 @@ def test_basic_temp_slot_for_stream_backup(self): options=['--stream', '--slot=slot_1', '--temp-slot']) # Clean after yourself - self.del_test_dir(module_name, fname, [node]) + self.del_test_dir(module_name, fname) # @unittest.skip("skip") def test_backup_concurrent_drop_table(self): @@ -1645,7 +1645,7 @@ def test_backup_concurrent_drop_table(self): self.assertEqual(show_backup['status'], "OK") # Clean after yourself - self.del_test_dir(module_name, fname, nodes=[node]) + self.del_test_dir(module_name, fname) # @unittest.skip("skip") def test_pg_11_adjusted_wal_segment_size(self): @@ -1930,7 +1930,7 @@ def test_basic_missing_file_permissions(self): os.chmod(full_path, 700) # Clean after yourself - self.del_test_dir(module_name, fname, [node]) + self.del_test_dir(module_name, fname) # @unittest.skip("skip") def test_basic_missing_dir_permissions(self): @@ -1973,7 +1973,7 @@ def test_basic_missing_dir_permissions(self): os.rmdir(full_path) # Clean after yourself - self.del_test_dir(module_name, fname, [node]) + self.del_test_dir(module_name, fname) # @unittest.skip("skip") def test_backup_with_least_privileges_role(self): diff --git a/tests/checkdb.py b/tests/checkdb.py index 3349ad2ef..5b7a156cc 100644 --- a/tests/checkdb.py +++ b/tests/checkdb.py @@ -349,7 +349,7 @@ def test_basic_checkdb_amcheck_only_sanity(self): log_file_content) # Clean after yourself - self.del_test_dir(module_name, fname, [node]) + self.del_test_dir(module_name, fname) # @unittest.skip("skip") def test_checkdb_block_validation_sanity(self): diff --git a/tests/compression.py b/tests/compression.py index 321461d6e..c10a59489 100644 --- a/tests/compression.py +++ b/tests/compression.py @@ -117,7 +117,7 @@ def test_basic_compression_stream_zlib(self): self.assertEqual(delta_result, delta_result_new) # Clean after yourself - self.del_test_dir(module_name, fname, [node]) + self.del_test_dir(module_name, fname) def test_compression_archive_zlib(self): """ diff --git a/tests/delta.py b/tests/delta.py index daa423d49..e18b8fb63 100644 --- a/tests/delta.py +++ b/tests/delta.py @@ -80,7 +80,7 @@ def test_basic_delta_vacuum_truncate(self): node_restored.slow_start() # Clean after yourself - self.del_test_dir(module_name, fname, [node, node_restored]) + self.del_test_dir(module_name, fname) # @unittest.skip("skip") def test_delta_vacuum_truncate_1(self): diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index 76eac6920..a84caba4d 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -1519,7 +1519,7 @@ def clean_all(self): if o.__class__ is testgres.PostgresNode: o.cleanup() - def del_test_dir(self, module_name, fname, nodes=[]): + def del_test_dir(self, module_name, fname): """ Del testdir and optimistically try to del module dir""" self.clean_all() diff --git a/tests/incr_restore.py b/tests/incr_restore.py index a228bdd79..885a88c2e 100644 --- a/tests/incr_restore.py +++ b/tests/incr_restore.py @@ -720,7 +720,7 @@ def test_basic_incr_restore_sanity(self): repr(e.message), self.cmd)) # Clean after yourself - self.del_test_dir(module_name, fname, [node]) + self.del_test_dir(module_name, fname) # @unittest.skip("skip") def test_incr_checksum_restore(self): @@ -809,7 +809,7 @@ def test_incr_checksum_restore(self): self.compare_pgdata(pgdata, pgdata_restored) # Clean after yourself - self.del_test_dir(module_name, fname, [node, node_1]) + self.del_test_dir(module_name, fname) # @unittest.skip("skip") @@ -898,7 +898,7 @@ def test_incr_lsn_restore(self): self.compare_pgdata(pgdata, pgdata_restored) # Clean after yourself - self.del_test_dir(module_name, fname, [node, node_1]) + self.del_test_dir(module_name, fname) # @unittest.skip("skip") def test_incr_lsn_sanity(self): @@ -967,7 +967,7 @@ def test_incr_lsn_sanity(self): repr(e.message), self.cmd)) # Clean after yourself - self.del_test_dir(module_name, fname, [node_1]) + self.del_test_dir(module_name, fname) # @unittest.skip("skip") def test_incr_checksum_sanity(self): @@ -1026,7 +1026,7 @@ def test_incr_checksum_sanity(self): self.compare_pgdata(pgdata, pgdata_restored) # Clean after yourself - self.del_test_dir(module_name, fname, [node_1]) + self.del_test_dir(module_name, fname) # @unittest.skip("skip") @@ -1561,7 +1561,7 @@ def test_make_replica_via_incr_checksum_restore(self): pgbench.wait() # Clean after yourself - self.del_test_dir(module_name, fname, [new_master, old_master]) + self.del_test_dir(module_name, fname) # @unittest.skip("skip") def test_make_replica_via_incr_lsn_restore(self): @@ -1634,7 +1634,7 @@ def test_make_replica_via_incr_lsn_restore(self): pgbench.wait() # Clean after yourself - self.del_test_dir(module_name, fname, [new_master, old_master]) + self.del_test_dir(module_name, fname) # @unittest.skip("skip") # @unittest.expectedFailure @@ -1878,7 +1878,7 @@ def test_incr_lsn_long_xact_2(self): '1') # Clean after yourself - self.del_test_dir(module_name, fname, [node]) + self.del_test_dir(module_name, fname) # @unittest.skip("skip") # @unittest.expectedFailure @@ -2137,7 +2137,7 @@ def test_incremental_partial_restore_exclude_checksum(self): self.assertNotIn('PANIC', output) # Clean after yourself - self.del_test_dir(module_name, fname, [node, node2]) + self.del_test_dir(module_name, fname) def test_incremental_partial_restore_exclude_lsn(self): """""" @@ -2247,7 +2247,7 @@ def test_incremental_partial_restore_exclude_lsn(self): self.assertNotIn('PANIC', output) # Clean after yourself - self.del_test_dir(module_name, fname, [node2]) + self.del_test_dir(module_name, fname) def test_incremental_partial_restore_exclude_tablespace_checksum(self): """""" @@ -2391,7 +2391,7 @@ def test_incremental_partial_restore_exclude_tablespace_checksum(self): self.assertNotIn('PANIC', output) # Clean after yourself - self.del_test_dir(module_name, fname, [node2]) + self.del_test_dir(module_name, fname) def test_incremental_pg_filenode_map(self): """ diff --git a/tests/merge.py b/tests/merge.py index 29d60433c..186b2f203 100644 --- a/tests/merge.py +++ b/tests/merge.py @@ -101,7 +101,7 @@ def test_basic_merge_full_page(self): self.assertEqual(count1, count2) # Clean after yourself - self.del_test_dir(module_name, fname, [node]) + self.del_test_dir(module_name, fname) def test_merge_compressed_backups(self): """ @@ -2245,7 +2245,7 @@ def test_merge_multiple_descendants(self): repr(e.message), self.cmd)) # Clean after yourself - self.del_test_dir(module_name, fname, [node]) + self.del_test_dir(module_name, fname) # @unittest.skip("skip") def test_smart_merge(self): @@ -2305,7 +2305,7 @@ def test_smart_merge(self): logfile_content = f.read() # Clean after yourself - self.del_test_dir(module_name, fname, [node]) + self.del_test_dir(module_name, fname) def test_idempotent_merge(self): """ @@ -2380,7 +2380,7 @@ def test_idempotent_merge(self): self.assertEqual( page_id_2, self.show_pb(backup_dir, 'node')[0]['id']) - self.del_test_dir(module_name, fname, [node]) + self.del_test_dir(module_name, fname) def test_merge_correct_inheritance(self): """ @@ -2435,7 +2435,7 @@ def test_merge_correct_inheritance(self): page_meta['expire-time'], self.show_pb(backup_dir, 'node', page_id)['expire-time']) - self.del_test_dir(module_name, fname, [node]) + self.del_test_dir(module_name, fname) def test_merge_correct_inheritance_1(self): """ @@ -2485,7 +2485,7 @@ def test_merge_correct_inheritance_1(self): 'expire-time', self.show_pb(backup_dir, 'node', page_id)) - self.del_test_dir(module_name, fname, [node]) + self.del_test_dir(module_name, fname) # @unittest.skip("skip") # @unittest.expectedFailure @@ -2604,7 +2604,7 @@ def test_multi_timeline_merge(self): '-d', 'postgres', '-p', str(node_restored.port)]) # Clean after yourself - self.del_test_dir(module_name, fname, [node, node_restored]) + self.del_test_dir(module_name, fname) # @unittest.skip("skip") # @unittest.expectedFailure diff --git a/tests/page.py b/tests/page.py index c0dbb69f2..8208e8319 100644 --- a/tests/page.py +++ b/tests/page.py @@ -100,7 +100,7 @@ def test_basic_page_vacuum_truncate(self): self.assertEqual(result1, result2) # Clean after yourself - self.del_test_dir(module_name, fname, [node, node_restored]) + self.del_test_dir(module_name, fname) # @unittest.skip("skip") def test_page_vacuum_truncate_1(self): diff --git a/tests/ptrack.py b/tests/ptrack.py index c45ecd6ec..de76d1d36 100644 --- a/tests/ptrack.py +++ b/tests/ptrack.py @@ -3148,7 +3148,7 @@ def test_basic_ptrack_truncate_replica(self): 'select 1') # Clean after yourself - self.del_test_dir(module_name, fname, [master, replica, node]) + self.del_test_dir(module_name, fname) # @unittest.skip("skip") # @unittest.expectedFailure diff --git a/tests/replica.py b/tests/replica.py index f664ca886..345f8a7dc 100644 --- a/tests/replica.py +++ b/tests/replica.py @@ -418,7 +418,7 @@ def test_basic_make_replica_via_restore(self): options=['--archive-timeout=30s', '--stream']) # Clean after yourself - self.del_test_dir(module_name, fname, [master, replica]) + self.del_test_dir(module_name, fname) # @unittest.skip("skip") def test_take_backup_from_delayed_replica(self): diff --git a/tests/retention.py b/tests/retention.py index 6dc8536ca..24884574a 100644 --- a/tests/retention.py +++ b/tests/retention.py @@ -1008,7 +1008,7 @@ def test_basic_window_merge_multiple_descendants(self): 'FULL') # Clean after yourself - self.del_test_dir(module_name, fname, [node]) + self.del_test_dir(module_name, fname) # @unittest.skip("skip") def test_basic_window_merge_multiple_descendants_1(self): @@ -1275,7 +1275,7 @@ def test_basic_window_merge_multiple_descendants_1(self): '--delete-expired', '--log-level-console=log']) # Clean after yourself - self.del_test_dir(module_name, fname, [node]) + self.del_test_dir(module_name, fname) # @unittest.skip("skip") def test_window_chains(self): @@ -2535,4 +2535,4 @@ def test_basic_wal_depth(self): self.validate_pb(backup_dir, 'node') - self.del_test_dir(module_name, fname, [node]) + self.del_test_dir(module_name, fname) diff --git a/tests/validate.py b/tests/validate.py index cfec234d0..62116be89 100644 --- a/tests/validate.py +++ b/tests/validate.py @@ -298,7 +298,7 @@ def test_basic_validate_corrupted_intermediate_backup(self): 'Backup STATUS should be "ORPHAN"') # Clean after yourself - self.del_test_dir(module_name, fname, [node]) + self.del_test_dir(module_name, fname) # @unittest.skip("skip") def test_validate_corrupted_intermediate_backups(self): @@ -3843,7 +3843,7 @@ def test_validate_corrupt_page_header_map(self): self.assertIn("WARNING: Some backups are not valid", e.message) # Clean after yourself - self.del_test_dir(module_name, fname, [node]) + self.del_test_dir(module_name, fname) # @unittest.expectedFailure # @unittest.skip("skip") @@ -3906,7 +3906,7 @@ def test_validate_truncated_page_header_map(self): self.assertIn("WARNING: Some backups are not valid", e.message) # Clean after yourself - self.del_test_dir(module_name, fname, [node]) + self.del_test_dir(module_name, fname) # @unittest.expectedFailure # @unittest.skip("skip") @@ -3966,7 +3966,7 @@ def test_validate_missing_page_header_map(self): self.assertIn("WARNING: Some backups are not valid", e.message) # Clean after yourself - self.del_test_dir(module_name, fname, [node]) + self.del_test_dir(module_name, fname) # validate empty backup list # page from future during validate From 2cf0de22573411d36fc6a0f83e0b4bd812d4e096 Mon Sep 17 00:00:00 2001 From: Elena Indrupskaya Date: Thu, 18 Feb 2021 12:40:51 +0300 Subject: [PATCH 077/525] [DOC] Added/removed tags surrounding 'PostgreSQL' for correct import into PostgresPro documentation --- doc/pgprobackup.xml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/doc/pgprobackup.xml b/doc/pgprobackup.xml index b0a0f6763..3ac4fdcd3 100644 --- a/doc/pgprobackup.xml +++ b/doc/pgprobackup.xml @@ -155,7 +155,7 @@ doc/src/sgml/pgprobackup.sgml recovery of PostgreSQL database clusters. It is designed to perform periodic backups of the PostgreSQL instance that enable you to restore the server in case of a failure. - pg_probackup supports PostgreSQL 9.5 or higher. + pg_probackup supports PostgreSQL 9.5 or higher. @@ -389,7 +389,7 @@ doc/src/sgml/pgprobackup.sgml - pg_probackup only supports PostgreSQL 9.5 and higher. + pg_probackup only supports PostgreSQL 9.5 and higher. @@ -410,7 +410,7 @@ doc/src/sgml/pgprobackup.sgml - For PostgreSQL 9.5, functions + For PostgreSQL 9.5, functions pg_create_restore_point(text) and pg_switch_xlog() can be executed only if the backup role is a superuser, so backup of a @@ -599,7 +599,7 @@ pg_probackup add-instance -B backup_dir -D to the PostgreSQL server: - For PostgreSQL 9.5: + For PostgreSQL 9.5: BEGIN; @@ -1711,7 +1711,7 @@ pg_probackup restore -B backup_dir --instance The speed of restore from backup can be significantly improved by replacing only invalid and changed pages in already - existing PostgreSQL data directory using + existing PostgreSQL data directory using incremental restore options with the command. @@ -1874,11 +1874,11 @@ pg_probackup restore -B backup_dir --instance - Due to recovery specifics of PostgreSQL versions earlier than 12, + Due to recovery specifics of PostgreSQL versions earlier than 12, it is advisable that you set the hot_standby parameter to off when running partial - restore of a PostgreSQL cluster of version earlier than 12. + restore of a PostgreSQL cluster of version earlier than 12. Otherwise the recovery may fail. From 3c0cc47b6d435e3de95c0c8efd4f1ba5a0b84583 Mon Sep 17 00:00:00 2001 From: Elena Indrupskaya Date: Fri, 19 Feb 2021 16:30:07 +0300 Subject: [PATCH 078/525] [DOC] Recommendation on setting ptrack.map_size updated with ptrack documentation --- doc/pgprobackup.xml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/doc/pgprobackup.xml b/doc/pgprobackup.xml index 3ac4fdcd3..e0a733bde 100644 --- a/doc/pgprobackup.xml +++ b/doc/pgprobackup.xml @@ -1202,8 +1202,9 @@ CREATE EXTENSION ptrack; together, which leads to false-positive results when tracking changed blocks and increases the incremental backup size as unchanged blocks can also be copied into the incremental backup. - Setting ptrack.map_size to a higher value - does not affect PTRACK operation. The maximum allowed value is 1024. + Setting ptrack.map_size to a higher value does not + affect PTRACK operation, but it is not recommended to set this parameter + to a value higher than 1024. From 1fd1c8d35c8a02ef6f3d443080cb46f0bbf640fb Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Sun, 28 Feb 2021 23:20:03 +0300 Subject: [PATCH 079/525] [Issue #327] add test coverage --- tests/archive.py | 77 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 77 insertions(+) diff --git a/tests/archive.py b/tests/archive.py index a70893958..ad2320a3c 100644 --- a/tests/archive.py +++ b/tests/archive.py @@ -983,6 +983,83 @@ def test_basic_master_and_replica_concurrent_archiving(self): # Clean after yourself self.del_test_dir(module_name, fname) + + # @unittest.expectedFailure + # @unittest.skip("skip") + def test_concurrent_archiving(self): + """ + Concurrent archiving from master, replica and cascade replica + https://github.com/postgrespro/pg_probackup/issues/327 + + For PG >= 11 it is expected to pass this test + """ + + if self.pg_config_version < self.version_to_num('11.0'): + return unittest.skip('You need PostgreSQL >= 11 for this test') + + fname = self.id().split('.')[3] + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + master = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'master'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={'autovacuum': 'off'}) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', master) + self.set_archiving(backup_dir, 'node', master, replica=True) + master.slow_start() + + master.pgbench_init(scale=10) + + # TAKE FULL ARCHIVE BACKUP FROM MASTER + self.backup_node(backup_dir, 'node', master) + + # Settings for Replica + replica = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'replica')) + replica.cleanup() + self.restore_node(backup_dir, 'node', replica) + + self.set_replica(master, replica, synchronous=True) + self.set_archiving(backup_dir, 'node', replica, replica=True) + self.set_auto_conf(replica, {'port': replica.port}) + replica.slow_start(replica=True) + + # create cascade replicas + replica1 = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'replica1')) + replica1.cleanup() + + # Settings for casaced replica + self.restore_node(backup_dir, 'node', replica1) + self.set_replica(replica, replica1, synchronous=False) + self.set_auto_conf(replica1, {'port': replica1.port}) + replica1.slow_start(replica=True) + + # Take full backup from master + self.backup_node(backup_dir, 'node', master) + + pgbench = master.pgbench( + stdout=subprocess.PIPE, stderr=subprocess.STDOUT, + options=['-T', '30', '-c', '1']) + + # Take several incremental backups from master + self.backup_node(backup_dir, 'node', master, backup_type='page', options=['--no-validate']) + + self.backup_node(backup_dir, 'node', master, backup_type='page', options=['--no-validate']) + + pgbench.wait() + pgbench.stdout.close() + + with open(os.path.join(replica1.logs_dir, 'postgresql.log'), 'r') as f: + log_content = f.read() + + self.assertNotIn('different checksum', log_content) + + # Clean after yourself + self.del_test_dir(module_name, fname) + # @unittest.expectedFailure # @unittest.skip("skip") def test_archive_pg_receivexlog(self): From 2974c03e7df0b5279d94eb24903a2e1b2679211e Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Sun, 28 Feb 2021 23:22:35 +0300 Subject: [PATCH 080/525] tests: minor fix --- tests/archive.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/tests/archive.py b/tests/archive.py index ad2320a3c..ac1b2a0d4 100644 --- a/tests/archive.py +++ b/tests/archive.py @@ -1052,9 +1052,16 @@ def test_concurrent_archiving(self): pgbench.wait() pgbench.stdout.close() - with open(os.path.join(replica1.logs_dir, 'postgresql.log'), 'r') as f: + with open(os.path.join(master.logs_dir, 'postgresql.log'), 'r') as f: + log_content = f.read() + self.assertNotIn('different checksum', log_content) + + with open(os.path.join(replica.logs_dir, 'postgresql.log'), 'r') as f: log_content = f.read() + self.assertNotIn('different checksum', log_content) + with open(os.path.join(replica1.logs_dir, 'postgresql.log'), 'r') as f: + log_content = f.read() self.assertNotIn('different checksum', log_content) # Clean after yourself From e0dfc9c89e26e9b9e18826dbc0ac36cac7d33359 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Fri, 5 Mar 2021 17:01:15 +0300 Subject: [PATCH 081/525] [Issue #342] check error when writing to pg_probackup.conf and sync it to disk before rename --- src/configure.c | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/src/configure.c b/src/configure.c index 1aae3df13..cf172242a 100644 --- a/src/configure.c +++ b/src/configure.c @@ -299,6 +299,7 @@ do_set_config(bool missing_ok) for (i = 0; instance_options[i].type; i++) { + int rc = 0; ConfigOption *opt = &instance_options[i]; char *value; @@ -319,13 +320,25 @@ do_set_config(bool missing_ok) } if (strchr(value, ' ')) - fprintf(fp, "%s = '%s'\n", opt->lname, value); + rc = fprintf(fp, "%s = '%s'\n", opt->lname, value); else - fprintf(fp, "%s = %s\n", opt->lname, value); + rc = fprintf(fp, "%s = %s\n", opt->lname, value); + + if (rc < 0) + elog(ERROR, "Cannot write to configuration file: \"%s\"", path_temp); + pfree(value); } - fclose(fp); + if (ferror(fp) || fflush(fp)) + elog(ERROR, "Cannot write to configuration file: \"%s\"", path_temp); + + if (fclose(fp)) + elog(ERROR, "Cannot close configuration file: \"%s\"", path_temp); + + if (fio_sync(path_temp, FIO_LOCAL_HOST) != 0) + elog(ERROR, "Failed to sync temp configuration file \"%s\": %s", + path_temp, strerror(errno)); if (rename(path_temp, path) < 0) { From 916ffb6d91b162b018cf29ce755fa5235241e8aa Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Sun, 7 Mar 2021 18:34:45 +0300 Subject: [PATCH 082/525] [Issue #343] redundant errno check when reading "/backup_dir/backups/instance_name" directory --- src/catalog.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/src/catalog.c b/src/catalog.c index 15386d426..995dd8fad 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -972,17 +972,11 @@ catalog_get_backup_list(const char *instance_name, time_t requested_backup_id) continue; } parray_append(backups, backup); - - if (errno && errno != ENOENT) - { - elog(WARNING, "cannot read data directory \"%s\": %s", - data_ent->d_name, strerror(errno)); - goto err_proc; - } } + if (errno) { - elog(WARNING, "cannot read backup root directory \"%s\": %s", + elog(WARNING, "Cannot read backup root directory \"%s\": %s", backup_instance_path, strerror(errno)); goto err_proc; } From 79a98911a40ea7bfcade730d152e1903e53659c3 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Sat, 20 Mar 2021 22:23:28 +0300 Subject: [PATCH 083/525] fix achive_timeout=0 due to old bug from 2.0.x version --- src/backup.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/src/backup.c b/src/backup.c index ca8baa777..be0147e91 100644 --- a/src/backup.c +++ b/src/backup.c @@ -1572,8 +1572,13 @@ pg_stop_backup(pgBackup *backup, PGconn *pg_startbackup_conn, */ if (pg_stop_backup_is_sent && !in_cleanup) { + int timeout = ARCHIVE_TIMEOUT_DEFAULT; res = NULL; + /* kludge against some old bug in archive_timeout. TODO: remove in 3.0.0 */ + if (instance_config.archive_timeout > 0) + timeout = instance_config.archive_timeout; + while (1) { if (!PQconsumeInput(conn)) @@ -1598,11 +1603,10 @@ pg_stop_backup(pgBackup *backup, PGconn *pg_startbackup_conn, * If postgres haven't answered in archive_timeout seconds, * send an interrupt. */ - if (pg_stop_backup_timeout > instance_config.archive_timeout) + if (pg_stop_backup_timeout > timeout) { pgut_cancel(conn); - elog(ERROR, "pg_stop_backup doesn't answer in %d seconds, cancel it", - instance_config.archive_timeout); + elog(ERROR, "pg_stop_backup doesn't answer in %d seconds, cancel it", timeout); } } else From 69754925b2a952c65f743eaa6fbd41ff69beca11 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Sat, 20 Mar 2021 23:26:19 +0300 Subject: [PATCH 084/525] [Issue #348] added tests.compatibility.CompatibilityTest.test_compatibility_tablespace --- tests/compatibility.py | 82 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 82 insertions(+) diff --git a/tests/compatibility.py b/tests/compatibility.py index 18f601506..da9d72f83 100644 --- a/tests/compatibility.py +++ b/tests/compatibility.py @@ -1419,3 +1419,85 @@ def test_hidden_files(self): # Clean after yourself self.del_test_dir(module_name, fname) + + # @unittest.skip("skip") + def test_compatibility_tablespace(self): + """ + https://github.com/postgrespro/pg_probackup/issues/348 + """ + fname = self.id().split('.')[3] + node = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node, old_binary=True) + node.slow_start() + + backup_id = self.backup_node( + backup_dir, 'node', node, backup_type="full", + options=["-j", "4", "--stream"], old_binary=True) + + tblspace_old_path = self.get_tblspace_path(node, 'tblspace_old') + + self.create_tblspace_in_node( + node, 'tblspace', + tblspc_path=tblspace_old_path) + + node.safe_psql( + "postgres", + "create table t_heap_lame tablespace tblspace " + "as select 1 as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,1000) i") + + tblspace_new_path = self.get_tblspace_path(node, 'tblspace_new') + + node_restored = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node_restored')) + node_restored.cleanup() + + try: + self.restore_node( + backup_dir, 'node', node_restored, + options=[ + "-j", "4", + "-T", "{0}={1}".format( + tblspace_old_path, tblspace_new_path)]) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because tablespace mapping is incorrect" + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: Backup {0} has no tablespaceses, ' + 'nothing to remap'.format(backup_id), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.backup_node( + backup_dir, 'node', node, backup_type="delta", + options=["-j", "4", "--stream"], old_binary=True) + + self.restore_node( + backup_dir, 'node', node_restored, + options=[ + "-j", "4", + "-T", "{0}={1}".format( + tblspace_old_path, tblspace_new_path)]) + + if self.paranoia: + pgdata = self.pgdata_content(node.data_dir) + + if self.paranoia: + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # Clean after yourself + self.del_test_dir(module_name, fname) From 58d02b27071980023bf7f2157b577b31e62d96c0 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Sat, 20 Mar 2021 23:27:34 +0300 Subject: [PATCH 085/525] [Issue #248] Use crc32 when validating backup of version "2.0.21 < version <= 2.0.25" --- src/validate.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/validate.c b/src/validate.c index 21900c8e4..a91fe817f 100644 --- a/src/validate.c +++ b/src/validate.c @@ -716,6 +716,8 @@ validate_tablespace_map(pgBackup *backup) pgFile **tablespace_map = NULL; pg_crc32 crc; parray *files = get_backup_filelist(backup, true); + bool use_crc32c = parse_program_version(backup->program_version) <= 20021 || + parse_program_version(backup->program_version) >= 20025; parray_qsort(files, pgFileCompareRelPathWithExternal); join_path_components(map_path, backup->database_dir, PG_TABLESPACE_MAP_FILE); @@ -738,7 +740,7 @@ validate_tablespace_map(pgBackup *backup) map_path, base36enc(backup->backup_id)); /* check tablespace map checksumms */ - crc = pgFileGetCRC(map_path, true, false); + crc = pgFileGetCRC(map_path, use_crc32c, false); if ((*tablespace_map)->crc != crc) elog(ERROR, "Invalid CRC of tablespace map file \"%s\" : %X. Expected %X, " From f4ab4b9d2b46f337b601af9b6ee640d5f03abf47 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Sun, 21 Mar 2021 01:33:36 +0300 Subject: [PATCH 086/525] Version 2.4.11 --- src/pg_probackup.h | 2 +- tests/expected/option_version.out | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 1e001bd05..f28f96e2e 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -306,7 +306,7 @@ typedef enum ShowFormat #define BYTES_INVALID (-1) /* file didn`t changed since previous backup, DELTA backup do not rely on it */ #define FILE_NOT_FOUND (-2) /* file disappeared during backup */ #define BLOCKNUM_INVALID (-1) -#define PROGRAM_VERSION "2.4.10" +#define PROGRAM_VERSION "2.4.11" /* update when remote agent API or behaviour changes */ #define AGENT_PROTOCOL_VERSION 20409 diff --git a/tests/expected/option_version.out b/tests/expected/option_version.out index b5204e46e..35f065a13 100644 --- a/tests/expected/option_version.out +++ b/tests/expected/option_version.out @@ -1 +1 @@ -pg_probackup 2.4.10 \ No newline at end of file +pg_probackup 2.4.11 \ No newline at end of file From 0876dd61900a0ef5a733f3025c7a732dc6e208b9 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Mon, 22 Mar 2021 20:01:53 +0300 Subject: [PATCH 087/525] [Issue #346] set "interrupted" in elog, wait for streamed segments --- src/backup.c | 32 +++++++++++++++++++------------- src/utils/logger.c | 1 + 2 files changed, 20 insertions(+), 13 deletions(-) diff --git a/src/backup.c b/src/backup.c index be0147e91..4ec790644 100644 --- a/src/backup.c +++ b/src/backup.c @@ -56,7 +56,7 @@ static void pg_stop_backup(pgBackup *backup, PGconn *pg_startbackup_conn, PGNode static XLogRecPtr wait_wal_lsn(XLogRecPtr lsn, bool is_start_lsn, TimeLineID tli, bool in_prev_segment, bool segment_only, - int timeout_elevel, bool in_stream_dir); + int timeout_elevel, bool in_stream_dir, pgBackup *backup); static void check_external_for_tablespaces(parray *external_list, PGconn *backup_conn); @@ -268,7 +268,7 @@ do_backup_instance(PGconn *backup_conn, PGNodeInfo *nodeInfo, bool no_sync, bool * Because WAL streaming will start after pg_start_backup() in stream * mode. */ - wait_wal_lsn(current.start_lsn, true, current.tli, false, true, ERROR, false); + wait_wal_lsn(current.start_lsn, true, current.tli, false, true, ERROR, false, ¤t); } /* start stream replication */ @@ -279,6 +279,12 @@ do_backup_instance(PGconn *backup_conn, PGNodeInfo *nodeInfo, bool no_sync, bool start_WAL_streaming(backup_conn, dst_backup_path, &instance_config.conn_opt, current.start_lsn, current.tli); + + /* Make sure that WAL streaming is working + * PAGE backup in stream mode is waited twice, first for + * segment in WAL archive and then for streamed segment + */ + wait_wal_lsn(current.start_lsn, true, current.tli, false, true, ERROR, true, ¤t); } /* initialize backup's file list */ @@ -1262,7 +1268,7 @@ pg_is_superuser(PGconn *conn) static XLogRecPtr wait_wal_lsn(XLogRecPtr target_lsn, bool is_start_lsn, TimeLineID tli, bool in_prev_segment, bool segment_only, - int timeout_elevel, bool in_stream_dir) + int timeout_elevel, bool in_stream_dir, pgBackup *backup) { XLogSegNo targetSegNo; char pg_wal_dir[MAXPGPATH]; @@ -1294,15 +1300,14 @@ wait_wal_lsn(XLogRecPtr target_lsn, bool is_start_lsn, TimeLineID tli, */ if (in_stream_dir) { - pgBackupGetPath2(¤t, pg_wal_dir, lengthof(pg_wal_dir), - DATABASE_DIR, PG_XLOG_DIR); + join_path_components(pg_wal_dir, backup->database_dir, PG_XLOG_DIR); join_path_components(wal_segment_path, pg_wal_dir, wal_segment); wal_segment_dir = pg_wal_dir; } else { join_path_components(wal_segment_path, arclog_path, wal_segment); - wal_segment_dir = arclog_path; + wal_segment_dir = arclog_path; /* global var */ } /* TODO: remove this in 3.0 (it is a cludge against some old bug with archive_timeout) */ @@ -1394,7 +1399,7 @@ wait_wal_lsn(XLogRecPtr target_lsn, bool is_start_lsn, TimeLineID tli, sleep(1); if (interrupted) - elog(ERROR, "Interrupted during waiting for WAL archiving"); + elog(ERROR, "Interrupted during waiting for WAL %s", in_stream_dir ? "streaming" : "archiving"); try_count++; /* Inform user if WAL segment is absent in first attempt */ @@ -1418,9 +1423,10 @@ wait_wal_lsn(XLogRecPtr target_lsn, bool is_start_lsn, TimeLineID tli, { if (file_exists) elog(timeout_elevel, "WAL segment %s was %s, " - "but target LSN %X/%X could not be archived in %d seconds", + "but target LSN %X/%X could not be %s in %d seconds", wal_segment, wal_delivery_str, - (uint32) (target_lsn >> 32), (uint32) target_lsn, timeout); + (uint32) (target_lsn >> 32), (uint32) target_lsn, + wal_delivery_str, timeout); /* If WAL segment doesn't exist or we wait for previous segment */ else elog(timeout_elevel, @@ -1705,7 +1711,7 @@ pg_stop_backup(pgBackup *backup, PGconn *pg_startbackup_conn, { /* Wait for segment with current stop_lsn, it is ok for it to never arrive */ wait_wal_lsn(stop_backup_lsn_tmp, false, backup->tli, - false, true, WARNING, stream_wal); + false, true, WARNING, stream_wal, backup); /* Get the first record in segment with current stop_lsn */ lsn_tmp = get_first_record_lsn(xlog_path, segno, backup->tli, @@ -1733,7 +1739,7 @@ pg_stop_backup(pgBackup *backup, PGconn *pg_startbackup_conn, * because previous record can be the contrecord. */ lsn_tmp = wait_wal_lsn(stop_backup_lsn_tmp, false, backup->tli, - true, false, ERROR, stream_wal); + true, false, ERROR, stream_wal, backup); /* sanity */ if (!XRecOffIsValid(lsn_tmp) || XLogRecPtrIsInvalid(lsn_tmp)) @@ -1747,7 +1753,7 @@ pg_stop_backup(pgBackup *backup, PGconn *pg_startbackup_conn, { /* Wait for segment with current stop_lsn */ wait_wal_lsn(stop_backup_lsn_tmp, false, backup->tli, - false, true, ERROR, stream_wal); + false, true, ERROR, stream_wal, backup); /* Get the next closest record in segment with current stop_lsn */ lsn_tmp = get_next_record_lsn(xlog_path, segno, backup->tli, @@ -1876,7 +1882,7 @@ pg_stop_backup(pgBackup *backup, PGconn *pg_startbackup_conn, */ if (!stop_lsn_exists) stop_backup_lsn = wait_wal_lsn(stop_backup_lsn_tmp, false, backup->tli, - false, false, ERROR, stream_wal); + false, false, ERROR, stream_wal, backup); if (stream_wal) { diff --git a/src/utils/logger.c b/src/utils/logger.c index f039d4a5d..584b937e7 100644 --- a/src/utils/logger.c +++ b/src/utils/logger.c @@ -169,6 +169,7 @@ exit_if_necessary(int elevel) { /* Interrupt other possible routines */ thread_interrupted = true; + interrupted = true; #ifdef WIN32 ExitThread(elevel); #else From 9fcf99034dc46749f260bcbd30bd30a0075ff65e Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Mon, 22 Mar 2021 21:24:14 +0300 Subject: [PATCH 088/525] tests: store gdb output internally --- tests/helpers/ptrack_helpers.py | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index a84caba4d..b0da8abbb 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -1757,6 +1757,7 @@ def __str__(self): class GDBobj(ProbackupTest): def __init__(self, cmd, verbose, attach=False): self.verbose = verbose + self.output = '' # Check gdb presense try: @@ -1801,7 +1802,8 @@ def __init__(self, cmd, verbose, attach=False): # discard data from pipe, # is there a way to do it a less derpy way? while True: - line = self.proc.stdout.readline() +# line = self.proc.stdout.readline() + line = self.get_line() if 'No such process' in line: raise GdbException(line) @@ -1811,6 +1813,12 @@ def __init__(self, cmd, verbose, attach=False): else: break + def get_line(self): + line = self.proc.stdout.readline() +# self.output += repr(line) + '\n' + self.output += line + return line + def kill(self): self.proc.kill() self.proc.wait() @@ -1932,10 +1940,8 @@ def continue_execution_until_break(self, ignore_count=0): 'Failed to continue execution until break.\n') def stopped_in_breakpoint(self): - output = [] while True: - line = self.proc.stdout.readline() - output += [line] + line = self.get_line() if self.verbose: print(line) if line.startswith('*stopped,reason="breakpoint-hit"'): @@ -1952,7 +1958,7 @@ def _execute(self, cmd, running=True): # look for command we just send while True: - line = self.proc.stdout.readline() + line = self.get_line() if self.verbose: print(repr(line)) @@ -1962,7 +1968,7 @@ def _execute(self, cmd, running=True): break while True: - line = self.proc.stdout.readline() + line = self.get_line() output += [line] if self.verbose: print(repr(line)) From c36ba06c350d7baab3113aa937a0b9e3da8ba80b Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Mon, 22 Mar 2021 21:26:32 +0300 Subject: [PATCH 089/525] [Issue #346] tests: added tests.backup.BackupTest.test_missing_wal_segment --- tests/backup.py | 79 ++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 78 insertions(+), 1 deletion(-) diff --git a/tests/backup.py b/tests/backup.py index 65851d0f3..f14e61745 100644 --- a/tests/backup.py +++ b/tests/backup.py @@ -2365,7 +2365,7 @@ def test_parent_choosing_2(self): # Clean after yourself self.del_test_dir(module_name, fname) - @unittest.skip("skip") + # @unittest.skip("skip") def test_backup_with_less_privileges_role(self): """ check permissions correctness from documentation: @@ -3079,3 +3079,80 @@ def test_incr_backup_filenode_map(self): # Clean after yourself self.del_test_dir(module_name, fname) + + + + # @unittest.skip("skip") + def test_missing_wal_segment(self): + """""" + fname = self.id().split('.')[3] + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node'), + set_replication=True, + ptrack_enable=self.ptrack, + initdb_params=['--data-checksums'], + pg_options={'archive_timeout': '30s'}) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.pgbench_init(scale=10) + + node.safe_psql( + 'postgres', + 'CREATE DATABASE backupdb') + + # get segments in pg_wal, sort then and remove all but the latest + pg_wal_dir = os.path.join(node.data_dir, 'pg_wal') + + if node.major_version >= 10: + pg_wal_dir = os.path.join(node.data_dir, 'pg_wal') + else: + pg_wal_dir = os.path.join(node.data_dir, 'pg_xlog') + + # Full backup in streaming mode + gdb = self.backup_node( + backup_dir, 'node', node, datname='backupdb', + options=['--stream', '--log-level-file=INFO'], gdb=True) + + # break at streaming start + gdb.set_breakpoint('start_WAL_streaming') + gdb.run_until_break() + + # generate some more data + node.pgbench_init(scale=3) + + # remove redundant WAL segments in pg_wal + files = os.listdir(pg_wal_dir) + files.sort(reverse=True) + + # leave first two files in list + del files[:2] + for filename in files: + os.remove(os.path.join(pg_wal_dir, filename)) + + gdb.continue_execution_until_exit() + + self.assertIn( + 'unexpected termination of replication stream: ERROR: requested WAL segment', + gdb.output) + + self.assertIn( + 'has already been removed', + gdb.output) + + self.assertIn( + 'ERROR: Interrupted during waiting for WAL streaming', + gdb.output) + + self.assertIn( + 'WARNING: backup in progress, stop backup', + gdb.output) + + # TODO: check the same for PAGE backup + + # Clean after yourself + self.del_test_dir(module_name, fname) From afcb00a04e466e547145f6a7f2707eed0a9d2eda Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Mon, 22 Mar 2021 21:32:38 +0300 Subject: [PATCH 090/525] tests: remove some old comments --- tests/helpers/ptrack_helpers.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index b0da8abbb..5b4adedcc 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -1799,10 +1799,7 @@ def __init__(self, cmd, verbose, attach=False): ) self.gdb_pid = self.proc.pid - # discard data from pipe, - # is there a way to do it a less derpy way? while True: -# line = self.proc.stdout.readline() line = self.get_line() if 'No such process' in line: @@ -1815,7 +1812,6 @@ def __init__(self, cmd, verbose, attach=False): def get_line(self): line = self.proc.stdout.readline() -# self.output += repr(line) + '\n' self.output += line return line From 59dc864483917ec4bd620bb63d3c30488ae8b990 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Mon, 22 Mar 2021 21:32:52 +0300 Subject: [PATCH 091/525] open header file in append mode --- src/data.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/data.c b/src/data.c index f7032fb55..4db674611 100644 --- a/src/data.c +++ b/src/data.c @@ -2268,7 +2268,7 @@ write_page_headers(BackupPageHeader2 *headers, pgFile *file, HeaderMap *hdr_map, { elog(LOG, "Creating page header map \"%s\"", map_path); - hdr_map->fp = fopen(map_path, PG_BINARY_W); + hdr_map->fp = fopen(map_path, 'a'); if (hdr_map->fp == NULL) elog(ERROR, "Cannot open header file \"%s\": %s", map_path, strerror(errno)); From 1647d1145698792c8583aaee8e6743233cb15167 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Thu, 25 Mar 2021 16:45:24 +0300 Subject: [PATCH 092/525] [Issue #351] fix data type for header offset --- src/catalog.c | 2 +- src/data.c | 10 +++++----- src/pg_probackup.h | 16 +++++++++------- 3 files changed, 15 insertions(+), 13 deletions(-) diff --git a/src/catalog.c b/src/catalog.c index 995dd8fad..25e08e6ff 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -2456,7 +2456,7 @@ write_backup_filelist(pgBackup *backup, parray *files, const char *root, { len += sprintf(line+len, ",\"n_headers\":\"%i\"", file->n_headers); len += sprintf(line+len, ",\"hdr_crc\":\"%u\"", file->hdr_crc); - len += sprintf(line+len, ",\"hdr_off\":\"%li\"", file->hdr_off); + len += sprintf(line+len, ",\"hdr_off\":\"%llu\"", file->hdr_off); len += sprintf(line+len, ",\"hdr_size\":\"%i\"", file->hdr_size); } diff --git a/src/data.c b/src/data.c index 4db674611..544497aca 100644 --- a/src/data.c +++ b/src/data.c @@ -2160,7 +2160,7 @@ get_data_file_headers(HeaderMap *hdr_map, pgFile *file, uint32 backup_version, b if (fseek(in, file->hdr_off, SEEK_SET)) { - elog(strict ? ERROR : WARNING, "Cannot seek to position %lu in page header map \"%s\": %s", + elog(strict ? ERROR : WARNING, "Cannot seek to position %llu in page header map \"%s\": %s", file->hdr_off, hdr_map->path, strerror(errno)); goto cleanup; } @@ -2177,7 +2177,7 @@ get_data_file_headers(HeaderMap *hdr_map, pgFile *file, uint32 backup_version, b if (fread(zheaders, 1, file->hdr_size, in) != file->hdr_size) { - elog(strict ? ERROR : WARNING, "Cannot read header file at offset: %li len: %i \"%s\": %s", + elog(strict ? ERROR : WARNING, "Cannot read header file at offset: %llu len: %i \"%s\": %s", file->hdr_off, file->hdr_size, hdr_map->path, strerror(errno)); goto cleanup; } @@ -2208,7 +2208,7 @@ get_data_file_headers(HeaderMap *hdr_map, pgFile *file, uint32 backup_version, b if (hdr_crc != file->hdr_crc) { elog(strict ? ERROR : WARNING, "Header map for file \"%s\" crc mismatch \"%s\" " - "offset: %lu, len: %lu, current: %u, expected: %u", + "offset: %llu, len: %lu, current: %u, expected: %u", file->rel_path, hdr_map->path, file->hdr_off, read_len, hdr_crc, file->hdr_crc); goto cleanup; } @@ -2268,7 +2268,7 @@ write_page_headers(BackupPageHeader2 *headers, pgFile *file, HeaderMap *hdr_map, { elog(LOG, "Creating page header map \"%s\"", map_path); - hdr_map->fp = fopen(map_path, 'a'); + hdr_map->fp = fopen(map_path, "a"); if (hdr_map->fp == NULL) elog(ERROR, "Cannot open header file \"%s\": %s", map_path, strerror(errno)); @@ -2297,7 +2297,7 @@ write_page_headers(BackupPageHeader2 *headers, pgFile *file, HeaderMap *hdr_map, file->rel_path, z_len); } - elog(VERBOSE, "Writing headers for file \"%s\" offset: %li, len: %i, crc: %u", + elog(VERBOSE, "Writing headers for file \"%s\" offset: %llu, len: %i, crc: %u", file->rel_path, file->hdr_off, z_len, file->hdr_crc); if (fwrite(zheaders, 1, z_len, hdr_map->fp) != z_len) diff --git a/src/pg_probackup.h b/src/pg_probackup.h index f28f96e2e..4cde5e180 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -208,6 +208,8 @@ do { \ FIN_TRADITIONAL_CRC32(crc); \ } while (0) +#define pg_off_t unsigned long long + /* Information about single file (or dir) in backup */ typedef struct pgFile @@ -249,8 +251,8 @@ typedef struct pgFile /* Coordinates in header map */ int n_headers; /* number of blocks in the data file in backup */ pg_crc32 hdr_crc; /* CRC value of header file: name_hdr */ - off_t hdr_off; /* offset in header map */ - int hdr_size; /* offset in header map */ + pg_off_t hdr_off; /* offset in header map */ + int hdr_size; /* length of headers */ } pgFile; typedef struct page_map_entry @@ -406,11 +408,11 @@ typedef struct PGNodeInfo /* structure used for access to block header map */ typedef struct HeaderMap { - char path[MAXPGPATH]; - char path_tmp[MAXPGPATH]; /* used only in merge */ - FILE *fp; /* used only for writing */ - char *buf; /* buffer */ - off_t offset; /* current position in fp */ + char path[MAXPGPATH]; + char path_tmp[MAXPGPATH]; /* used only in merge */ + FILE *fp; /* used only for writing */ + char *buf; /* buffer */ + pg_off_t offset; /* current position in fp */ pthread_mutex_t mutex; } HeaderMap; From 731d19089d7b97c35556e0b4588006db11ea1e91 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Fri, 26 Mar 2021 00:30:29 +0300 Subject: [PATCH 093/525] [Issue #310] test coverage --- tests/backup.py | 261 +++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 259 insertions(+), 2 deletions(-) diff --git a/tests/backup.py b/tests/backup.py index f14e61745..ef3928d5c 100644 --- a/tests/backup.py +++ b/tests/backup.py @@ -3080,8 +3080,6 @@ def test_incr_backup_filenode_map(self): # Clean after yourself self.del_test_dir(module_name, fname) - - # @unittest.skip("skip") def test_missing_wal_segment(self): """""" @@ -3156,3 +3154,262 @@ def test_missing_wal_segment(self): # Clean after yourself self.del_test_dir(module_name, fname) + + # @unittest.skip("skip") + def test_missing_replication_permission(self): + """""" + fname = self.id().split('.')[3] + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node'), + set_replication=True, + ptrack_enable=self.ptrack, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) +# self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # FULL backup + self.backup_node(backup_dir, 'node', node, options=['--stream']) + + # Create replica + replica = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'replica')) + replica.cleanup() + self.restore_node(backup_dir, 'node', replica) + + # Settings for Replica + self.set_replica(node, replica) + replica.slow_start(replica=True) + + node.safe_psql( + 'postgres', + 'CREATE DATABASE backupdb') + + # PG 9.5 + if self.get_version(node) < 90600: + node.safe_psql( + 'backupdb', + "CREATE ROLE backup WITH LOGIN; " + "GRANT CONNECT ON DATABASE backupdb to backup; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") + # PG 9.6 + elif self.get_version(node) > 90600 and self.get_version(node) < 100000: + node.safe_psql( + 'backupdb', + "CREATE ROLE backup WITH LOGIN; " + "GRANT CONNECT ON DATABASE backupdb to backup; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") + # >= 10 + else: + node.safe_psql( + 'backupdb', + "CREATE ROLE backup WITH LOGIN; " + "GRANT CONNECT ON DATABASE backupdb to backup; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + ) + + if ProbackupTest.enterprise: + node.safe_psql( + "backupdb", + "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup") + + node.safe_psql( + "backupdb", + "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup") + + sleep(2) + replica.promote() + + # Delta backup + try: + self.backup_node( + backup_dir, 'node', replica, backup_type='delta', + data_dir=replica.data_dir, datname='backupdb', options=['--stream', '-U', 'backup']) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because incremental backup should not be possible " + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + "FATAL: must be superuser or replication role to start walsender", + e.message, + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) + + # Clean after yourself + self.del_test_dir(module_name, fname) + + # @unittest.skip("skip") + def test_missing_replication_permission_1(self): + """""" + fname = self.id().split('.')[3] + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node'), + set_replication=True, + ptrack_enable=self.ptrack, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # FULL backup + self.backup_node(backup_dir, 'node', node, options=['--stream']) + + # Create replica + replica = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'replica')) + replica.cleanup() + self.restore_node(backup_dir, 'node', replica) + + # Settings for Replica + self.set_replica(node, replica) + replica.slow_start(replica=True) + + node.safe_psql( + 'postgres', + 'CREATE DATABASE backupdb') + + # PG 9.5 + if self.get_version(node) < 90600: + node.safe_psql( + 'backupdb', + "CREATE ROLE backup WITH LOGIN; " + "GRANT CONNECT ON DATABASE backupdb to backup; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") + # PG 9.6 + elif self.get_version(node) > 90600 and self.get_version(node) < 100000: + node.safe_psql( + 'backupdb', + "CREATE ROLE backup WITH LOGIN; " + "GRANT CONNECT ON DATABASE backupdb to backup; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") + # >= 10 + else: + node.safe_psql( + 'backupdb', + "CREATE ROLE backup WITH LOGIN; " + "GRANT CONNECT ON DATABASE backupdb to backup; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + ) + + if ProbackupTest.enterprise: + node.safe_psql( + "backupdb", + "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup") + + node.safe_psql( + "backupdb", + "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup") + + replica.promote() + + # PAGE + output = self.backup_node( + backup_dir, 'node', replica, backup_type='page', + data_dir=replica.data_dir, datname='backupdb', options=['-U', 'backup'], + return_id=False) + + self.assertIn( + 'WARNING: Valid backup on current timeline 2 is not found, trying to look up on previous timelines', + output) + + self.assertIn( + 'WARNING: could not connect to database backupdb: FATAL: must be superuser or replication role to start walsender', + output) + + # Clean after yourself + self.del_test_dir(module_name, fname) From 7329256b95a2549a8364d746d5aebc8a7b45fb00 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Sat, 27 Mar 2021 16:22:40 +0300 Subject: [PATCH 094/525] [Issue #324] "--no-validate" and "--no-sync" flags for merge and delete commands --- src/backup.c | 2 +- src/delete.c | 15 +++++------ src/help.c | 11 ++++++-- src/merge.c | 63 +++++++++++++++++++++++++++------------------- src/pg_probackup.c | 12 ++------- src/pg_probackup.h | 12 +++------ src/utils/file.c | 3 --- src/validate.c | 5 ---- 8 files changed, 61 insertions(+), 62 deletions(-) diff --git a/src/backup.c b/src/backup.c index ac4f1013f..de5c0cee1 100644 --- a/src/backup.c +++ b/src/backup.c @@ -887,7 +887,7 @@ do_backup(pgSetBackupParams *set_backup_params, * which are expired according to retention policies */ if (delete_expired || merge_expired || delete_wal) - do_retention(); + do_retention(no_validate, no_sync); return 0; } diff --git a/src/delete.c b/src/delete.c index 7458f0100..d1afa2874 100644 --- a/src/delete.c +++ b/src/delete.c @@ -19,7 +19,7 @@ static void delete_walfiles_in_tli(XLogRecPtr keep_lsn, timelineInfo *tli, static void do_retention_internal(parray *backup_list, parray *to_keep_list, parray *to_purge_list); static void do_retention_merge(parray *backup_list, parray *to_keep_list, - parray *to_purge_list); + parray *to_purge_list, bool no_validate, bool no_sync); static void do_retention_purge(parray *to_keep_list, parray *to_purge_list); static void do_retention_wal(bool dry_run); @@ -123,7 +123,7 @@ do_delete(time_t backup_id) * which FULL backup should be keeped for redundancy obligation(only valid do), * but if invalid backup is not guarded by retention - it is removed */ -void do_retention(void) +void do_retention(bool no_validate, bool no_sync) { parray *backup_list = NULL; parray *to_keep_list = parray_new(); @@ -172,7 +172,7 @@ void do_retention(void) do_retention_internal(backup_list, to_keep_list, to_purge_list); if (merge_expired && !dry_run && !backup_list_is_empty) - do_retention_merge(backup_list, to_keep_list, to_purge_list); + do_retention_merge(backup_list, to_keep_list, to_purge_list, no_validate, no_sync); if (delete_expired && !dry_run && !backup_list_is_empty) do_retention_purge(to_keep_list, to_purge_list); @@ -424,7 +424,8 @@ do_retention_internal(parray *backup_list, parray *to_keep_list, parray *to_purg /* Merge partially expired incremental chains */ static void -do_retention_merge(parray *backup_list, parray *to_keep_list, parray *to_purge_list) +do_retention_merge(parray *backup_list, parray *to_keep_list, parray *to_purge_list, + bool no_validate, bool no_sync) { int i; int j; @@ -543,7 +544,7 @@ do_retention_merge(parray *backup_list, parray *to_keep_list, parray *to_purge_l */ keep_backup = parray_get(merge_list, 0); - merge_chain(merge_list, full_backup, keep_backup); + merge_chain(merge_list, full_backup, keep_backup, no_validate, no_sync); backup_merged = true; for (j = parray_num(merge_list) - 2; j >= 0; j--) @@ -554,8 +555,8 @@ do_retention_merge(parray *backup_list, parray *to_keep_list, parray *to_purge_l parray_rm(to_purge_list, tmp_backup, pgBackupCompareId); parray_set(to_keep_list, i, NULL); } - - pgBackupValidate(full_backup, NULL); + if (!no_validate) + pgBackupValidate(full_backup, NULL); if (full_backup->status == BACKUP_STATUS_CORRUPT) elog(ERROR, "Merging of backup %s failed", base36enc(full_backup->start_time)); diff --git a/src/help.c b/src/help.c index 2b5bcd06e..f72dc90dc 100644 --- a/src/help.c +++ b/src/help.c @@ -197,11 +197,12 @@ help_pg_probackup(void) printf(_(" [--wal-depth=wal-depth]\n")); printf(_(" [-i backup-id | --delete-expired | --merge-expired | --status=backup_status]\n")); printf(_(" [--delete-wal]\n")); - printf(_(" [--dry-run]\n")); + printf(_(" [--dry-run] [--no-validate] [--no-sync]\n")); printf(_(" [--help]\n")); printf(_("\n %s merge -B backup-path --instance=instance_name\n"), PROGRAM_NAME); printf(_(" -i backup-id [--progress] [-j num-threads]\n")); + printf(_(" [--no-validate] [--no-sync]\n")); printf(_(" [--help]\n")); printf(_("\n %s add-instance -B backup-path -D pgdata-path\n"), PROGRAM_NAME); @@ -631,13 +632,16 @@ help_delete(void) printf(_(" [-j num-threads] [--progress]\n")); printf(_(" [--retention-redundancy=retention-redundancy]\n")); printf(_(" [--retention-window=retention-window]\n")); - printf(_(" [--wal-depth=wal-depth]\n\n")); + printf(_(" [--wal-depth=wal-depth]\n")); + printf(_(" [--no-validate] [--no-sync]\n\n")); printf(_(" -B, --backup-path=backup-path location of the backup storage area\n")); printf(_(" --instance=instance_name name of the instance\n")); printf(_(" -i, --backup-id=backup-id backup to delete\n")); printf(_(" -j, --threads=NUM number of parallel threads\n")); printf(_(" --progress show progress\n")); + printf(_(" --no-validate disable validation during retention merge\n")); + printf(_(" --no-sync do not sync merged files to disk\n")); printf(_("\n Retention options:\n")); printf(_(" --delete-expired delete backups expired according to current\n")); @@ -681,6 +685,7 @@ help_merge(void) { printf(_("\n%s merge -B backup-path --instance=instance_name\n"), PROGRAM_NAME); printf(_(" -i backup-id [-j num-threads] [--progress]\n")); + printf(_(" [--no-validate] [--no-sync]\n")); printf(_(" [--log-level-console=log-level-console]\n")); printf(_(" [--log-level-file=log-level-file]\n")); printf(_(" [--log-filename=log-filename]\n")); @@ -695,6 +700,8 @@ help_merge(void) printf(_(" -j, --threads=NUM number of parallel threads\n")); printf(_(" --progress show progress\n")); + printf(_(" --no-validate disable validation during retention merge\n")); + printf(_(" --no-sync do not sync merged files to disk\n")); printf(_("\n Logging options:\n")); printf(_(" --log-level-console=log-level-console\n")); diff --git a/src/merge.c b/src/merge.c index 3c51a1fae..3fd5b13ae 100644 --- a/src/merge.c +++ b/src/merge.c @@ -30,6 +30,7 @@ typedef struct bool program_version_match; bool use_bitmap; bool is_retry; + bool no_sync; /* * Return value from the thread. @@ -50,13 +51,13 @@ static void merge_data_file(parray *parent_chain, pgBackup *full_backup, pgBackup *dest_backup, pgFile *dest_file, pgFile *tmp_file, const char *to_root, bool use_bitmap, - bool is_retry); + bool is_retry, bool no_sync); static void merge_non_data_file(parray *parent_chain, pgBackup *full_backup, pgBackup *dest_backup, pgFile *dest_file, pgFile *tmp_file, const char *full_database_dir, - const char *full_external_prefix); + const char *full_external_prefix, bool no_sync); static bool is_forward_compatible(parray *parent_chain); @@ -68,7 +69,7 @@ static bool is_forward_compatible(parray *parent_chain); * - Remove unnecessary files, which doesn't exist in the target backup anymore */ void -do_merge(time_t backup_id) +do_merge(time_t backup_id, bool no_validate, bool no_sync) { parray *backups; parray *merge_list = parray_new(); @@ -405,9 +406,10 @@ do_merge(time_t backup_id) catalog_lock_backup_list(merge_list, parray_num(merge_list) - 1, 0, true, true); /* do actual merge */ - merge_chain(merge_list, full_backup, dest_backup); + merge_chain(merge_list, full_backup, dest_backup, no_validate, no_sync); - pgBackupValidate(full_backup, NULL); + if (!no_validate) + pgBackupValidate(full_backup, NULL); if (full_backup->status == BACKUP_STATUS_CORRUPT) elog(ERROR, "Merging of backup %s failed", base36enc(backup_id)); @@ -434,7 +436,8 @@ do_merge(time_t backup_id) * that chain is ok. */ void -merge_chain(parray *parent_chain, pgBackup *full_backup, pgBackup *dest_backup) +merge_chain(parray *parent_chain, pgBackup *full_backup, pgBackup *dest_backup, + bool no_validate, bool no_sync) { int i; char *dest_backup_id; @@ -554,25 +557,28 @@ merge_chain(parray *parent_chain, pgBackup *full_backup, pgBackup *dest_backup) * with sole exception of FULL backup. If it has MERGING status * then it isn't valid backup until merging is finished. */ - elog(INFO, "Validate parent chain for backup %s", - base36enc(dest_backup->start_time)); - - for (i = parray_num(parent_chain) - 1; i >= 0; i--) + if (!no_validate) { - pgBackup *backup = (pgBackup *) parray_get(parent_chain, i); + elog(INFO, "Validate parent chain for backup %s", + base36enc(dest_backup->start_time)); - /* FULL backup is not to be validated if its status is MERGING */ - if (backup->backup_mode == BACKUP_MODE_FULL && - backup->status == BACKUP_STATUS_MERGING) + for (i = parray_num(parent_chain) - 1; i >= 0; i--) { - continue; - } + pgBackup *backup = (pgBackup *) parray_get(parent_chain, i); - pgBackupValidate(backup, NULL); + /* FULL backup is not to be validated if its status is MERGING */ + if (backup->backup_mode == BACKUP_MODE_FULL && + backup->status == BACKUP_STATUS_MERGING) + { + continue; + } - if (backup->status != BACKUP_STATUS_OK) - elog(ERROR, "Backup %s has status %s, merge is aborted", - base36enc(backup->start_time), status2str(backup->status)); + pgBackupValidate(backup, NULL); + + if (backup->status != BACKUP_STATUS_OK) + elog(ERROR, "Backup %s has status %s, merge is aborted", + base36enc(backup->start_time), status2str(backup->status)); + } } /* @@ -665,6 +671,7 @@ merge_chain(parray *parent_chain, pgBackup *full_backup, pgBackup *dest_backup) arg->program_version_match = program_version_match; arg->use_bitmap = use_bitmap; arg->is_retry = is_retry; + arg->no_sync = no_sync; /* By default there are some error */ arg->ret = 1; @@ -1102,14 +1109,16 @@ merge_files(void *arg) dest_file, tmp_file, arguments->full_database_dir, arguments->use_bitmap, - arguments->is_retry); + arguments->is_retry, + arguments->no_sync); else merge_non_data_file(arguments->parent_chain, arguments->full_backup, arguments->dest_backup, dest_file, tmp_file, arguments->full_database_dir, - arguments->full_external_prefix); + arguments->full_external_prefix, + arguments->no_sync); done: parray_append(arguments->merge_filelist, tmp_file); @@ -1202,7 +1211,8 @@ reorder_external_dirs(pgBackup *to_backup, parray *to_external, void merge_data_file(parray *parent_chain, pgBackup *full_backup, pgBackup *dest_backup, pgFile *dest_file, pgFile *tmp_file, - const char *full_database_dir, bool use_bitmap, bool is_retry) + const char *full_database_dir, bool use_bitmap, bool is_retry, + bool no_sync) { FILE *out = NULL; char *buffer = pgut_malloc(STDIO_BUFSIZE); @@ -1273,7 +1283,7 @@ merge_data_file(parray *parent_chain, pgBackup *full_backup, return; /* sync second temp file to disk */ - if (fio_sync(to_fullpath_tmp2, FIO_BACKUP_HOST) != 0) + if (!no_sync && fio_sync(to_fullpath_tmp2, FIO_BACKUP_HOST) != 0) elog(ERROR, "Cannot sync merge temp file \"%s\": %s", to_fullpath_tmp2, strerror(errno)); @@ -1294,7 +1304,8 @@ merge_data_file(parray *parent_chain, pgBackup *full_backup, void merge_non_data_file(parray *parent_chain, pgBackup *full_backup, pgBackup *dest_backup, pgFile *dest_file, pgFile *tmp_file, - const char *full_database_dir, const char *to_external_prefix) + const char *full_database_dir, const char *to_external_prefix, + bool no_sync) { int i; char to_fullpath[MAXPGPATH]; @@ -1378,7 +1389,7 @@ merge_non_data_file(parray *parent_chain, pgBackup *full_backup, to_fullpath_tmp, BACKUP_MODE_FULL, 0, false); /* sync temp file to disk */ - if (fio_sync(to_fullpath_tmp, FIO_BACKUP_HOST) != 0) + if (!no_sync && fio_sync(to_fullpath_tmp, FIO_BACKUP_HOST) != 0) elog(ERROR, "Cannot sync merge temp file \"%s\": %s", to_fullpath_tmp, strerror(errno)); diff --git a/src/pg_probackup.c b/src/pg_probackup.c index ac927965c..854493bdc 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -128,10 +128,6 @@ bool compress_shortcut = false; /* other options */ char *instance_name; -/* TODO: quick hack */ -bool merge_no_validate = false; -bool merge_no_sync = false; - /* archive push options */ int batch_size = 1; static char *wal_file_path; @@ -834,8 +830,6 @@ main(int argc, char *argv[]) case SHOW_CMD: return do_show(instance_name, current.backup_id, show_archive); case DELETE_CMD: - merge_no_validate = no_validate; - merge_no_sync = no_sync; if (delete_expired && backup_id_string) elog(ERROR, "You cannot specify --delete-expired and (-i, --backup-id) options together"); @@ -851,15 +845,13 @@ main(int argc, char *argv[]) if (delete_status) do_delete_status(&instance_config, delete_status); else - do_retention(); + do_retention(no_validate, no_sync); } else do_delete(current.backup_id); break; case MERGE_CMD: - merge_no_validate = no_validate; - merge_no_sync = no_sync; - do_merge(current.backup_id); + do_merge(current.backup_id, no_validate, no_sync); break; case SHOW_CONFIG_CMD: do_show_config(); diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 31fb66235..6df6d0f0e 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -786,10 +786,6 @@ extern bool compress_shortcut; /* other options */ extern char *instance_name; -/* temp merge options */ -extern bool merge_no_validate; -extern bool merge_no_sync; - /* show options */ extern ShowFormat show_format; @@ -843,10 +839,10 @@ extern parray *read_timeline_history(const char *arclog_path, TimeLineID targetT extern bool tliIsPartOfHistory(const parray *timelines, TimeLineID tli); /* in merge.c */ -extern void do_merge(time_t backup_id); +extern void do_merge(time_t backup_id, bool no_validate, bool no_sync); extern void merge_backups(pgBackup *backup, pgBackup *next_backup); -extern void merge_chain(parray *parent_chain, - pgBackup *full_backup, pgBackup *dest_backup); +extern void merge_chain(parray *parent_chain, pgBackup *full_backup, pgBackup *dest_backup, + bool no_validate, bool no_sync); extern parray *read_database_map(pgBackup *backup); @@ -873,7 +869,7 @@ extern int do_show(const char *instance_name, time_t requested_backup_id, bool s /* in delete.c */ extern void do_delete(time_t backup_id); extern void delete_backup_files(pgBackup *backup); -extern void do_retention(void); +extern void do_retention(bool no_validate, bool no_sync); extern int do_delete_instance(void); extern void do_delete_status(InstanceConfig *instance_config, const char *status); diff --git a/src/utils/file.c b/src/utils/file.c index 26892fedd..15a7085ec 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -1169,9 +1169,6 @@ int fio_sync(char const* path, fio_location location) { int fd; - if (merge_no_sync) - return 0; - fd = open(path, O_WRONLY | PG_BINARY, FILE_PERMISSIONS); if (fd < 0) return -1; diff --git a/src/validate.c b/src/validate.c index fc35bf2cb..a91fe817f 100644 --- a/src/validate.c +++ b/src/validate.c @@ -129,9 +129,6 @@ pgBackupValidate(pgBackup *backup, pgRestoreParams *params) // dbOid_exclude_list = get_dbOid_exclude_list(backup, files, params->partial_db_list, // params->partial_restore_type); - if (merge_no_validate) - goto skip_validation; - /* setup threads */ for (i = 0; i < parray_num(files); i++) { @@ -183,8 +180,6 @@ pgBackupValidate(pgBackup *backup, pgRestoreParams *params) pfree(threads); pfree(threads_args); -skip_validation: - /* cleanup */ parray_walk(files, pgFileFree); parray_free(files); From 0751b89626f618a49ef1e63186213890cdbbac12 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Sat, 27 Mar 2021 16:45:07 +0300 Subject: [PATCH 095/525] [Issue #324] update documentation --- doc/pgprobackup.xml | 53 +++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 51 insertions(+), 2 deletions(-) diff --git a/doc/pgprobackup.xml b/doc/pgprobackup.xml index e0a733bde..ae9e5f8bb 100644 --- a/doc/pgprobackup.xml +++ b/doc/pgprobackup.xml @@ -4108,7 +4108,7 @@ pg_probackup validate -B backup_dir merge pg_probackup merge -B backup_dir --instance instance_name -i backup_id -[--help] [-j num_threads] [--progress] +[--help] [-j num_threads] [--progress] [--no-validate] [--no-sync] [logging_options] @@ -4119,6 +4119,30 @@ pg_probackup merge -B backup_dir --instance + + + + + + + + Skips automatic validation before and after merge. + + + + + + + + Do not sync merged files to disk. You can use this flag to speed + up the merge process. Using this flag can result in data + corruption in case of operating system or hardware crash. + + + + + + For details, see the section Merging Backups. @@ -4131,13 +4155,38 @@ pg_probackup delete -B backup_dir --instance num_threads] [--progress] [--retention-redundancy=redundancy][--retention-window=window][--wal-depth=wal_depth] [--delete-wal] {-i backup_id | --delete-expired [--merge-expired] | --merge-expired | --status=backup_status} -[--dry-run] [logging_options] +[--dry-run] [--no-validate] [--no-sync] [logging_options] Deletes backup with specified backup_id or launches the retention purge of backups and archived WAL that do not satisfy the current retention policies. + + + + + + + + Skips automatic validation before and after retention merge. + + + + + + + + + Do not sync merged files to disk. You can use this flag to speed + up the retention merge process. Using this flag can result in data + corruption in case of operating system or hardware crash. + + + + + + For details, see the sections Deleting Backups, From 8dbc90a2eb2cf6898c3b644ef678410b18409de8 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Sun, 28 Mar 2021 17:57:09 +0300 Subject: [PATCH 096/525] Version 2.4.12 --- src/pg_probackup.h | 2 +- tests/expected/option_version.out | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 6df6d0f0e..d072ab715 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -308,7 +308,7 @@ typedef enum ShowFormat #define BYTES_INVALID (-1) /* file didn`t changed since previous backup, DELTA backup do not rely on it */ #define FILE_NOT_FOUND (-2) /* file disappeared during backup */ #define BLOCKNUM_INVALID (-1) -#define PROGRAM_VERSION "2.4.11" +#define PROGRAM_VERSION "2.4.12" /* update when remote agent API or behaviour changes */ #define AGENT_PROTOCOL_VERSION 20409 diff --git a/tests/expected/option_version.out b/tests/expected/option_version.out index 35f065a13..7813360e8 100644 --- a/tests/expected/option_version.out +++ b/tests/expected/option_version.out @@ -1 +1 @@ -pg_probackup 2.4.11 \ No newline at end of file +pg_probackup 2.4.12 \ No newline at end of file From 3813726bbdb20d16ae6d09834ba41144607bd438 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Mon, 29 Mar 2021 13:06:09 +0300 Subject: [PATCH 097/525] tests: some fixes --- tests/archive.py | 6 +++--- tests/backup.py | 52 ++++++++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 53 insertions(+), 5 deletions(-) diff --git a/tests/archive.py b/tests/archive.py index ac1b2a0d4..2d0bb5037 100644 --- a/tests/archive.py +++ b/tests/archive.py @@ -2638,13 +2638,13 @@ def test_archive_empty_history_file(self): wal_dir = os.path.join(backup_dir, 'wal', 'node') self.assertIn( - 'WARNING: History file is corrupted: "{0}"'.format(os.path.join(wal_dir, '00000002.history')), + 'WARNING: History file is corrupted or missing: "{0}"'.format(os.path.join(wal_dir, '00000002.history')), log_content) self.assertIn( - 'WARNING: History file is corrupted: "{0}"'.format(os.path.join(wal_dir, '00000003.history')), + 'WARNING: History file is corrupted or missing: "{0}"'.format(os.path.join(wal_dir, '00000003.history')), log_content) self.assertIn( - 'WARNING: History file is corrupted: "{0}"'.format(os.path.join(wal_dir, '00000004.history')), + 'WARNING: History file is corrupted or missing: "{0}"'.format(os.path.join(wal_dir, '00000004.history')), log_content) self.del_test_dir(module_name, fname) diff --git a/tests/backup.py b/tests/backup.py index ef3928d5c..ec11bdc85 100644 --- a/tests/backup.py +++ b/tests/backup.py @@ -4,7 +4,7 @@ from .helpers.ptrack_helpers import ProbackupTest, ProbackupException import shutil from distutils.dir_util import copy_tree -from testgres import ProcessType +from testgres import ProcessType, QueryException import subprocess @@ -1576,7 +1576,7 @@ def test_basic_temp_slot_for_stream_backup(self): set_replication=True, initdb_params=['--data-checksums'], pg_options={ - 'max_wal_size': '40MB'}) + 'max_wal_size': '40MB', 'default_transaction_read_only': 'on'}) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -3413,3 +3413,51 @@ def test_missing_replication_permission_1(self): # Clean after yourself self.del_test_dir(module_name, fname) + + # @unittest.skip("skip") + def test_basic_backup_default_transaction_read_only(self): + """""" + fname = self.id().split('.')[3] + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={'default_transaction_read_only': 'on'}) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + try: + node.safe_psql( + 'postgres', + 'create temp table t1()') + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because incremental backup should not be possible " + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except QueryException as e: + self.assertIn( + "cannot execute CREATE TABLE in a read-only transaction", + e.message, + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) + + # FULL backup + self.backup_node( + backup_dir, 'node', node, + options=['--stream', '--temp-slot']) + + # DELTA backup + self.backup_node( + backup_dir, 'node', node, backup_type='delta', options=['--stream']) + + # PAGE backup + self.backup_node(backup_dir, 'node', node, backup_type='page') + + # Clean after yourself + self.del_test_dir(module_name, fname) From 15c90460fd3e2fdad89c0b022ca9a6a4778759e1 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Mon, 29 Mar 2021 14:02:13 +0300 Subject: [PATCH 098/525] use 0 instead of BUFSIZ --- src/data.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/data.c b/src/data.c index 544497aca..a4fea7782 100644 --- a/src/data.c +++ b/src/data.c @@ -2156,7 +2156,7 @@ get_data_file_headers(HeaderMap *hdr_map, pgFile *file, uint32 backup_version, b return NULL; } /* disable buffering for header file */ - setvbuf(in, NULL, _IONBF, BUFSIZ); + setvbuf(in, NULL, _IONBF, 0); if (fseek(in, file->hdr_off, SEEK_SET)) { From b794fbc723b73fc3409f0a9adc715b250e09c132 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Mon, 29 Mar 2021 15:11:36 +0300 Subject: [PATCH 099/525] use PG_BINARY_A flag when opening file with map headers --- src/data.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/data.c b/src/data.c index a4fea7782..9ff54ed4b 100644 --- a/src/data.c +++ b/src/data.c @@ -2268,7 +2268,7 @@ write_page_headers(BackupPageHeader2 *headers, pgFile *file, HeaderMap *hdr_map, { elog(LOG, "Creating page header map \"%s\"", map_path); - hdr_map->fp = fopen(map_path, "a"); + hdr_map->fp = fopen(map_path, PG_BINARY_A); if (hdr_map->fp == NULL) elog(ERROR, "Cannot open header file \"%s\": %s", map_path, strerror(errno)); From 5d348641cdec2b231aada3fe76b5e4d863732596 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Thu, 1 Apr 2021 12:02:37 +0300 Subject: [PATCH 100/525] use fseeko instead of fseek when accessing the map of page headers --- src/data.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/data.c b/src/data.c index 9ff54ed4b..999de867d 100644 --- a/src/data.c +++ b/src/data.c @@ -2158,7 +2158,7 @@ get_data_file_headers(HeaderMap *hdr_map, pgFile *file, uint32 backup_version, b /* disable buffering for header file */ setvbuf(in, NULL, _IONBF, 0); - if (fseek(in, file->hdr_off, SEEK_SET)) + if (fseeko(in, file->hdr_off, SEEK_SET)) { elog(strict ? ERROR : WARNING, "Cannot seek to position %llu in page header map \"%s\": %s", file->hdr_off, hdr_map->path, strerror(errno)); From 1d4d293947de69a2cff1159d472d250b9f4b59bf Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Thu, 1 Apr 2021 14:25:30 +0300 Subject: [PATCH 101/525] Version 2.4.13 --- src/pg_probackup.h | 2 +- tests/expected/option_version.out | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/pg_probackup.h b/src/pg_probackup.h index d072ab715..950c71343 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -308,7 +308,7 @@ typedef enum ShowFormat #define BYTES_INVALID (-1) /* file didn`t changed since previous backup, DELTA backup do not rely on it */ #define FILE_NOT_FOUND (-2) /* file disappeared during backup */ #define BLOCKNUM_INVALID (-1) -#define PROGRAM_VERSION "2.4.12" +#define PROGRAM_VERSION "2.4.13" /* update when remote agent API or behaviour changes */ #define AGENT_PROTOCOL_VERSION 20409 diff --git a/tests/expected/option_version.out b/tests/expected/option_version.out index 7813360e8..752e96a8c 100644 --- a/tests/expected/option_version.out +++ b/tests/expected/option_version.out @@ -1 +1 @@ -pg_probackup 2.4.12 \ No newline at end of file +pg_probackup 2.4.13 \ No newline at end of file From 91da77b16f864f57bf57777ae556e4094216686b Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Sat, 10 Apr 2021 15:08:35 +0300 Subject: [PATCH 102/525] add elog message about waiting for pg_start_backup() execution --- src/backup.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/backup.c b/src/backup.c index de5c0cee1..112d8b365 100644 --- a/src/backup.c +++ b/src/backup.c @@ -1044,6 +1044,8 @@ pg_start_backup(const char *label, bool smooth, pgBackup *backup, params[0] = label; + elog(INFO, "wait for pg_start_backup()"); + /* 2nd argument is 'fast'*/ params[1] = smooth ? "false" : "true"; if (!exclusive_backup) From 0e6e9a45e9c49bc15cb76687f5098a92288ccb3e Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Thu, 15 Apr 2021 13:20:54 +0300 Subject: [PATCH 103/525] [Issue #364] honor the "--no-validate" flag when working with tablespace_map --- src/dir.c | 6 ++++-- src/pg_probackup.h | 4 ++-- src/restore.c | 9 ++++++++- src/validate.c | 15 +++++++++------ 4 files changed, 23 insertions(+), 11 deletions(-) diff --git a/src/dir.c b/src/dir.c index d07a4d2f5..e1d9e580a 100644 --- a/src/dir.c +++ b/src/dir.c @@ -1116,6 +1116,8 @@ create_data_directories(parray *dest_files, const char *data_dir, const char *ba elog(VERBOSE, "Create directory \"%s\"", dir->rel_path); join_path_components(to_path, data_dir, dir->rel_path); + + // TODO check exit code fio_mkdir(to_path, dir->mode, location); } @@ -1191,7 +1193,7 @@ read_tablespace_map(parray *links, const char *backup_dir) * 3. backup has tablespaces and some of them are not empty */ int -check_tablespace_mapping(pgBackup *backup, bool incremental, bool force, bool pgdata_is_empty) +check_tablespace_mapping(pgBackup *backup, bool incremental, bool force, bool pgdata_is_empty, bool no_validate) { parray *links = parray_new(); size_t i; @@ -1205,7 +1207,7 @@ check_tablespace_mapping(pgBackup *backup, bool incremental, bool force, bool pg /* validate tablespace map, * if there are no tablespaces, then there is nothing left to do */ - if (!validate_tablespace_map(backup)) + if (!validate_tablespace_map(backup, no_validate)) { /* * Sanity check diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 950c71343..f237250b9 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -891,7 +891,7 @@ extern int do_validate_all(void); extern int validate_one_page(Page page, BlockNumber absolute_blkno, XLogRecPtr stop_lsn, PageState *page_st, uint32 checksum_version); -extern bool validate_tablespace_map(pgBackup *backup); +extern bool validate_tablespace_map(pgBackup *backup, bool no_validate); extern parray* get_history_streaming(ConnectionOptions *conn_opt, TimeLineID tli, parray *backup_list); @@ -987,7 +987,7 @@ extern void create_data_directories(parray *dest_files, extern void read_tablespace_map(parray *links, const char *backup_dir); extern void opt_tablespace_map(ConfigOption *opt, const char *arg); extern void opt_externaldir_map(ConfigOption *opt, const char *arg); -extern int check_tablespace_mapping(pgBackup *backup, bool incremental, bool force, bool pgdata_is_empty); +extern int check_tablespace_mapping(pgBackup *backup, bool incremental, bool force, bool pgdata_is_empty, bool no_validate); extern void check_external_dir_mapping(pgBackup *backup, bool incremental); extern char *get_external_remap(char *current_dir); diff --git a/src/restore.c b/src/restore.c index 8d573286a..f088ca36c 100644 --- a/src/restore.c +++ b/src/restore.c @@ -410,7 +410,7 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt, { int rc = check_tablespace_mapping(dest_backup, params->incremental_mode != INCR_NONE, params->force, - pgdata_is_empty); + pgdata_is_empty, params->no_validate); /* backup contain no tablespaces */ if (rc == NoTblspc) @@ -2173,6 +2173,8 @@ check_incremental_compatibility(const char *pgdata, uint64 system_identifier, postmaster_is_up = true; } + /* check that PG_VERSION is the same */ + /* slurp pg_control and check that system ID is the same * check that instance is not running * if lsn_based, check that there is no backup_label files is around AND @@ -2182,6 +2184,7 @@ check_incremental_compatibility(const char *pgdata, uint64 system_identifier, * data files content, because based on pg_control information we will * choose a backup suitable for lsn based incremental restore. */ + elog(INFO, "Trying to read pg_control file in destination direstory"); system_id_pgdata = get_system_identifier(pgdata); @@ -2214,6 +2217,10 @@ check_incremental_compatibility(const char *pgdata, uint64 system_identifier, if (postmaster_is_up) return POSTMASTER_IS_RUNNING; + /* PG_CONTROL MISSING */ + + /* PG_CONTROL unreadable */ + if (!system_id_match) return SYSTEM_ID_MISMATCH; diff --git a/src/validate.c b/src/validate.c index a91fe817f..6bedd7269 100644 --- a/src/validate.c +++ b/src/validate.c @@ -709,7 +709,7 @@ do_validate_instance(void) * already filled pgBackup.files */ bool -validate_tablespace_map(pgBackup *backup) +validate_tablespace_map(pgBackup *backup, bool no_validate) { char map_path[MAXPGPATH]; pgFile *dummy = NULL; @@ -740,12 +740,15 @@ validate_tablespace_map(pgBackup *backup) map_path, base36enc(backup->backup_id)); /* check tablespace map checksumms */ - crc = pgFileGetCRC(map_path, use_crc32c, false); + if (!no_validate) + { + crc = pgFileGetCRC(map_path, use_crc32c, false); - if ((*tablespace_map)->crc != crc) - elog(ERROR, "Invalid CRC of tablespace map file \"%s\" : %X. Expected %X, " - "probably backup %s is corrupt, validate it", - map_path, crc, (*tablespace_map)->crc, base36enc(backup->backup_id)); + if ((*tablespace_map)->crc != crc) + elog(ERROR, "Invalid CRC of tablespace map file \"%s\" : %X. Expected %X, " + "probably backup %s is corrupt, validate it", + map_path, crc, (*tablespace_map)->crc, base36enc(backup->backup_id)); + } pgFileFree(dummy); parray_walk(files, pgFileFree); From 6e2e78c494f58bfe87c4b0ece48f1744b6413fd0 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Thu, 15 Apr 2021 15:12:25 +0300 Subject: [PATCH 104/525] parse tablespace_map correctly --- src/dir.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/dir.c b/src/dir.c index e1d9e580a..f150039e1 100644 --- a/src/dir.c +++ b/src/dir.c @@ -1153,7 +1153,7 @@ read_tablespace_map(parray *links, const char *backup_dir) path[MAXPGPATH]; pgFile *file; - if (sscanf(buf, "%1023s %1023s", link_name, path) != 2) + if (sscanf(buf, "%s %n", link_name, path) != 2) elog(ERROR, "invalid format found in \"%s\"", map_path); file = pgut_new(pgFile); @@ -1279,13 +1279,19 @@ check_tablespace_mapping(pgBackup *backup, bool incremental, bool force, bool pg TablespaceListCell *cell; bool remapped = false; + elog(INFO, "Linked path: \"%s\"", linked_path); + for (cell = tablespace_dirs.head; cell; cell = cell->next) + { + elog(INFO, "Remap dir: \"%s\"", cell->old_dir); + if (strcmp(link->linked, cell->old_dir) == 0) { linked_path = cell->new_dir; remapped = true; break; } + } if (!is_absolute_path(linked_path)) elog(ERROR, "Tablespace directory path must be an absolute path: %s\n", From ac1e9a7da95bbfe75fdb452d5bbf729d4cab070a Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Thu, 15 Apr 2021 15:33:33 +0300 Subject: [PATCH 105/525] fix tablespace_map parsing --- src/dir.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/dir.c b/src/dir.c index f150039e1..ef23f6842 100644 --- a/src/dir.c +++ b/src/dir.c @@ -1149,11 +1149,11 @@ read_tablespace_map(parray *links, const char *backup_dir) while (fgets(buf, lengthof(buf), fp)) { - char link_name[MAXPGPATH], - path[MAXPGPATH]; - pgFile *file; + char link_name[MAXPGPATH]; + char path[MAXPGPATH]; + pgFile *file; - if (sscanf(buf, "%s %n", link_name, path) != 2) + if (sscanf(buf, "%s %s", link_name, path) != 2) elog(ERROR, "invalid format found in \"%s\"", map_path); file = pgut_new(pgFile); From 7a464232ed5c03a2b560d1bd53c487c23151b634 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Thu, 15 Apr 2021 16:01:17 +0300 Subject: [PATCH 106/525] another fix for tablespace_map parsing --- src/dir.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/src/dir.c b/src/dir.c index ef23f6842..bd91c108d 100644 --- a/src/dir.c +++ b/src/dir.c @@ -1150,12 +1150,22 @@ read_tablespace_map(parray *links, const char *backup_dir) while (fgets(buf, lengthof(buf), fp)) { char link_name[MAXPGPATH]; - char path[MAXPGPATH]; + char *path; + int n = 0; pgFile *file; + int i = 0; - if (sscanf(buf, "%s %s", link_name, path) != 2) + if (sscanf(buf, "%s %n", link_name, &n) != 1) elog(ERROR, "invalid format found in \"%s\"", map_path); + path = buf + n; + + /* remove '\n' */ + i = strlen(path) - 1; + path[i] = '\0'; + + elog(INFO, "STR: '%s'", path); + file = pgut_new(pgFile); memset(file, 0, sizeof(pgFile)); From b7b3bb728dd6210e0316452b352309dbc90f7232 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Thu, 15 Apr 2021 16:34:31 +0300 Subject: [PATCH 107/525] minor cleanup --- src/dir.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/src/dir.c b/src/dir.c index bd91c108d..e7fe9ffa8 100644 --- a/src/dir.c +++ b/src/dir.c @@ -1160,12 +1160,10 @@ read_tablespace_map(parray *links, const char *backup_dir) path = buf + n; - /* remove '\n' */ + /* Remove newline character at the end of string */ i = strlen(path) - 1; path[i] = '\0'; - elog(INFO, "STR: '%s'", path); - file = pgut_new(pgFile); memset(file, 0, sizeof(pgFile)); @@ -1289,12 +1287,8 @@ check_tablespace_mapping(pgBackup *backup, bool incremental, bool force, bool pg TablespaceListCell *cell; bool remapped = false; - elog(INFO, "Linked path: \"%s\"", linked_path); - for (cell = tablespace_dirs.head; cell; cell = cell->next) { - elog(INFO, "Remap dir: \"%s\"", cell->old_dir); - if (strcmp(link->linked, cell->old_dir) == 0) { linked_path = cell->new_dir; @@ -1303,6 +1297,11 @@ check_tablespace_mapping(pgBackup *backup, bool incremental, bool force, bool pg } } + if (remapped) + elog(INFO, "Tablespace %s will be remapped from \"%s\" to \"%s\"", link->name, cell->old_dir, cell->new_dir); + else + elog(INFO, "Tablespace %s will be restored using old path \"%s\"", link->name, linked_path); + if (!is_absolute_path(linked_path)) elog(ERROR, "Tablespace directory path must be an absolute path: %s\n", linked_path); From 2a3c90a65ffe18ca75fd22fff1da60db28a7a750 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Thu, 15 Apr 2021 16:39:15 +0300 Subject: [PATCH 108/525] Version 2.4.14 --- src/pg_probackup.h | 2 +- tests/expected/option_version.out | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/pg_probackup.h b/src/pg_probackup.h index f237250b9..bbd516e4d 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -308,7 +308,7 @@ typedef enum ShowFormat #define BYTES_INVALID (-1) /* file didn`t changed since previous backup, DELTA backup do not rely on it */ #define FILE_NOT_FOUND (-2) /* file disappeared during backup */ #define BLOCKNUM_INVALID (-1) -#define PROGRAM_VERSION "2.4.13" +#define PROGRAM_VERSION "2.4.14" /* update when remote agent API or behaviour changes */ #define AGENT_PROTOCOL_VERSION 20409 diff --git a/tests/expected/option_version.out b/tests/expected/option_version.out index 752e96a8c..c2748505e 100644 --- a/tests/expected/option_version.out +++ b/tests/expected/option_version.out @@ -1 +1 @@ -pg_probackup 2.4.13 \ No newline at end of file +pg_probackup 2.4.14 \ No newline at end of file From a6fabdb1585e1d937d5c44b85bbf641265d843d6 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Fri, 16 Apr 2021 01:40:44 +0300 Subject: [PATCH 109/525] [Issue #366] escape paths in restore_command with double quotes when running restore --- src/restore.c | 2 +- tests/archive.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/restore.c b/src/restore.c index f088ca36c..9594ef0b0 100644 --- a/src/restore.c +++ b/src/restore.c @@ -1375,7 +1375,7 @@ print_recovery_settings(FILE *fp, pgBackup *backup, else { /* default cmdline, ok for local restore */ - sprintf(restore_command_guc, "%s archive-get -B %s --instance %s " + sprintf(restore_command_guc, "\"%s\" archive-get -B \"%s\" --instance \"%s\" " "--wal-file-path=%%p --wal-file-name=%%f", PROGRAM_FULL_PATH ? PROGRAM_FULL_PATH : PROGRAM_NAME, backup_path, instance_name); diff --git a/tests/archive.py b/tests/archive.py index 2d0bb5037..70a86393a 100644 --- a/tests/archive.py +++ b/tests/archive.py @@ -1715,7 +1715,7 @@ def test_archive_options(self): recovery_content = f.read() self.assertIn( - "restore_command = '{0} archive-get -B {1} --instance {2} " + "restore_command = '\"{0}\" archive-get -B \"{1}\" --instance \"{2}\" " "--wal-file-path=%p --wal-file-name=%f --remote-host=localhost " "--remote-port=22 --remote-user={3}'".format( self.probackup_path, backup_dir, 'node', self.user), @@ -1782,7 +1782,7 @@ def test_archive_options_1(self): self.restore_node( backup_dir, 'node', node, options=[ - '--restore-command=none'.format(wal_dir), + '--restore-command=none', '--archive-host=localhost1', '--archive-port=23', '--archive-user={0}'.format(self.user) @@ -1792,7 +1792,7 @@ def test_archive_options_1(self): recovery_content = f.read() self.assertIn( - "restore_command = '{0} archive-get -B {1} --instance {2} " + "restore_command = '\"{0}\" archive-get -B \"{1}\" --instance \"{2}\" " "--wal-file-path=%p --wal-file-name=%f --remote-host=localhost1 " "--remote-port=23 --remote-user={3}'".format( self.probackup_path, backup_dir, 'node', self.user), From 34741de0e1a17f6d8c5021ecc474055dcdebd663 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Thu, 22 Apr 2021 00:52:07 +0300 Subject: [PATCH 110/525] [Issue #364] added tests.validate.ValidateTest.test_no_validate_tablespace_map --- tests/validate.py | 65 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 65 insertions(+) diff --git a/tests/validate.py b/tests/validate.py index 62116be89..b5e7b685a 100644 --- a/tests/validate.py +++ b/tests/validate.py @@ -3968,6 +3968,71 @@ def test_validate_missing_page_header_map(self): # Clean after yourself self.del_test_dir(module_name, fname) + # @unittest.expectedFailure + # @unittest.skip("skip") + def test_no_validate_tablespace_map(self): + """ + Check that --no-validate is propagated to tablespace_map + """ + fname = self.id().split('.')[3] + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + self.create_tblspace_in_node(node, 'external_dir') + + node.safe_psql( + 'postgres', + 'CREATE TABLE t_heap(a int) TABLESPACE "external_dir"') + + tblspace_new = self.get_tblspace_path(node, 'external_dir_new') + + oid = node.safe_psql( + 'postgres', + "select oid from pg_tablespace where spcname = 'external_dir'").decode('utf-8').rstrip() + + # FULL backup + backup_id = self.backup_node( + backup_dir, 'node', node, options=['--stream']) + + pgdata = self.pgdata_content(node.data_dir) + + tablespace_map = os.path.join( + backup_dir, 'backups', 'node', + backup_id, 'database', 'tablespace_map') + + # overwrite tablespace_map file + with open(tablespace_map, "w") as f: + f.write("{0} {1}".format(oid, tblspace_new)) + f.close + + node.cleanup() + + self.restore_node(backup_dir, 'node', node, options=['--no-validate']) + + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # check that tablespace restore as symlink + tablespace_link = os.path.join(node.data_dir, 'pg_tblspc', oid) + self.assertTrue( + os.path.islink(tablespace_link), + "'%s' is not a symlink" % tablespace_link) + + self.assertEqual( + os.readlink(tablespace_link), + tblspace_new, + "Symlink '{0}' do not points to '{1}'".format(tablespace_link, tblspace_new)) + + # Clean after yourself + self.del_test_dir(module_name, fname) + # validate empty backup list # page from future during validate # page from future during backup From b1442f46dc66d29a2f929c3902164f4c65e0872d Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Thu, 22 Apr 2021 02:02:40 +0300 Subject: [PATCH 111/525] [Issue #364] correctly set null-termination char during tablespace_map parsing --- src/dir.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/dir.c b/src/dir.c index e7fe9ffa8..368d40832 100644 --- a/src/dir.c +++ b/src/dir.c @@ -1160,9 +1160,10 @@ read_tablespace_map(parray *links, const char *backup_dir) path = buf + n; - /* Remove newline character at the end of string */ - i = strlen(path) - 1; - path[i] = '\0'; + /* Remove newline character at the end of string if any */ + i = strcspn(path, "\n"); + if (strlen(path) > i) + path[i] = '\0'; file = pgut_new(pgFile); memset(file, 0, sizeof(pgFile)); From 36f21a963ec1b4d33f7bb8fd14694af161582bab Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Thu, 22 Apr 2021 17:35:26 +0300 Subject: [PATCH 112/525] [Issue #369] add elog message about failure of obtaining history file via replication protocol --- src/backup.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/backup.c b/src/backup.c index 112d8b365..ce8e51228 100644 --- a/src/backup.c +++ b/src/backup.c @@ -161,14 +161,17 @@ do_backup_instance(PGconn *backup_conn, PGNodeInfo *nodeInfo, bool no_sync, bool if (prev_backup == NULL) { /* try to setup multi-timeline backup chain */ - elog(WARNING, "Valid backup on current timeline %u is not found, " + elog(WARNING, "Valid full backup on current timeline %u is not found, " "trying to look up on previous timelines", current.tli); tli_list = get_history_streaming(&instance_config.conn_opt, current.tli, backup_list); if (!tli_list) + { + elog(WARNING, "Failed to obtain current timeline history file via replication protocol"); /* fallback to using archive */ tli_list = catalog_get_timelines(&instance_config); + } if (parray_num(tli_list) == 0) elog(WARNING, "Cannot find valid backup on previous timelines, " From b14dda4d49c9fb67fb56b3fef7a5ef096bd7e236 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Thu, 22 Apr 2021 17:36:04 +0300 Subject: [PATCH 113/525] [Issue #369] fix tests --- tests/backup.py | 12 +++++------- tests/false_positive.py | 6 ++---- tests/retention.py | 2 +- 3 files changed, 8 insertions(+), 12 deletions(-) diff --git a/tests/backup.py b/tests/backup.py index ec11bdc85..e3bfc84e4 100644 --- a/tests/backup.py +++ b/tests/backup.py @@ -149,13 +149,11 @@ def test_incremental_backup_without_full(self): repr(self.output), self.cmd)) except ProbackupException as e: self.assertTrue( - "WARNING: Valid backup on current timeline 1 is not found" in e.message and + "WARNING: Valid full backup on current timeline 1 is not found" in e.message and "ERROR: Create new full backup before an incremental one" in e.message, "\n Unexpected Error Message: {0}\n CMD: {1}".format( repr(e.message), self.cmd)) - sleep(1) - try: self.backup_node(backup_dir, 'node', node, backup_type="ptrack") # we should die here because exception is what we expect to happen @@ -166,7 +164,7 @@ def test_incremental_backup_without_full(self): repr(self.output), self.cmd)) except ProbackupException as e: self.assertTrue( - "WARNING: Valid backup on current timeline 1 is not found" in e.message and + "WARNING: Valid full backup on current timeline 1 is not found" in e.message and "ERROR: Create new full backup before an incremental one" in e.message, "\n Unexpected Error Message: {0}\n CMD: {1}".format( repr(e.message), self.cmd)) @@ -230,7 +228,7 @@ def test_incremental_backup_corrupt_full(self): repr(self.output), self.cmd)) except ProbackupException as e: self.assertTrue( - "WARNING: Valid backup on current timeline 1 is not found" in e.message and + "WARNING: Valid full backup on current timeline 1 is not found" in e.message and "ERROR: Create new full backup before an incremental one" in e.message, "\n Unexpected Error Message: {0}\n CMD: {1}".format( repr(e.message), self.cmd)) @@ -2352,7 +2350,7 @@ def test_parent_choosing_2(self): repr(self.output), self.cmd)) except ProbackupException as e: self.assertTrue( - 'WARNING: Valid backup on current timeline 1 is not found' in e.message and + 'WARNING: Valid full backup on current timeline 1 is not found' in e.message and 'ERROR: Create new full backup before an incremental one' in e.message, '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) @@ -3404,7 +3402,7 @@ def test_missing_replication_permission_1(self): return_id=False) self.assertIn( - 'WARNING: Valid backup on current timeline 2 is not found, trying to look up on previous timelines', + 'WARNING: Valid full backup on current timeline 2 is not found, trying to look up on previous timelines', output) self.assertIn( diff --git a/tests/false_positive.py b/tests/false_positive.py index fc9ee4b62..d4e7ccf0d 100644 --- a/tests/false_positive.py +++ b/tests/false_positive.py @@ -83,12 +83,10 @@ def test_incremental_backup_corrupt_full_1(self): except ProbackupException as e: self.assertEqual( e.message, - 'ERROR: Valid backup on current timeline is not found. ' + 'ERROR: Valid full backup on current timeline is not found. ' 'Create new FULL backup before an incremental one.\n', '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - - sleep(1) self.assertFalse( True, "Expecting Error because page backup should not be " @@ -98,7 +96,7 @@ def test_incremental_backup_corrupt_full_1(self): except ProbackupException as e: self.assertEqual( e.message, - 'ERROR: Valid backup on current timeline is not found. ' + 'ERROR: Valid full backup on current timeline is not found. ' 'Create new FULL backup before an incremental one.\n', '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) diff --git a/tests/retention.py b/tests/retention.py index 0a439c9e7..75b19c28a 100644 --- a/tests/retention.py +++ b/tests/retention.py @@ -1712,7 +1712,7 @@ def test_wal_purge_victim(self): repr(self.output), self.cmd)) except ProbackupException as e: self.assertTrue( - "WARNING: Valid backup on current timeline 1 is not found" in e.message and + "WARNING: Valid full backup on current timeline 1 is not found" in e.message and "ERROR: Create new full backup before an incremental one" in e.message, "\n Unexpected Error Message: {0}\n CMD: {1}".format( repr(e.message), self.cmd)) From bd284a7a41d8f5832b62a2c88ab7eea8db3b7ce5 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Thu, 22 Apr 2021 17:36:34 +0300 Subject: [PATCH 114/525] Readme: allow tracing for gdb tests --- tests/Readme.md | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/Readme.md b/tests/Readme.md index adcf5380e..f8dd91db0 100644 --- a/tests/Readme.md +++ b/tests/Readme.md @@ -38,6 +38,7 @@ Run ptrack tests: Usage: + sudo echo 0 > /proc/sys/kernel/yama/ptrace_scope pip install testgres export PG_CONFIG=/path/to/pg_config python -m unittest [-v] tests[.specific_module][.class.test] From ed897d45e38367cdbdda214d1b6d472db1c30c99 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Thu, 22 Apr 2021 17:49:06 +0300 Subject: [PATCH 115/525] [Issue #310] fix tests.replica.ReplicaTest.test_parent_choosing --- tests/replica.py | 20 +++----------------- 1 file changed, 3 insertions(+), 17 deletions(-) diff --git a/tests/replica.py b/tests/replica.py index 345f8a7dc..ff9f09fa0 100644 --- a/tests/replica.py +++ b/tests/replica.py @@ -1660,23 +1660,9 @@ def test_parent_choosing(self): # failing, because without archving, it is impossible to # take multi-timeline backup. - try: - self.backup_node( - backup_dir, 'replica', replica, - backup_type='delta', options=['--stream']) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because of timeline switch " - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'WARNING: Cannot find valid backup on previous timelines, ' - 'WAL archive is not available' in e.message and - 'ERROR: Create new full backup before an incremental one' in e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) + self.backup_node( + backup_dir, 'replica', replica, + backup_type='delta', options=['--stream']) # Clean after yourself self.del_test_dir(module_name, fname) From 278d433194553b3113daae1df5bfd6c6ab1aa1d4 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Thu, 22 Apr 2021 18:02:46 +0300 Subject: [PATCH 116/525] [Issue #310] fix tests.replica.ReplicaTest.test_replica_promote_3 --- tests/replica.py | 79 ------------------------------------------------ 1 file changed, 79 deletions(-) diff --git a/tests/replica.py b/tests/replica.py index ff9f09fa0..ce90ef96e 100644 --- a/tests/replica.py +++ b/tests/replica.py @@ -1278,85 +1278,6 @@ def test_replica_promote_2(self): # Clean after yourself self.del_test_dir(module_name, fname) - # @unittest.skip("skip") - def test_replica_promote_3(self): - """ - """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') - master = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'master'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'master', master) - - master.slow_start() - - self.backup_node(backup_dir, 'master', master, options=['--stream']) - - # Create replica - replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) - replica.cleanup() - self.restore_node(backup_dir, 'master', replica) - - # Settings for Replica - self.set_replica(master, replica) - self.set_auto_conf(replica, {'port': replica.port}) - - replica.slow_start(replica=True) - - master.safe_psql( - 'postgres', - 'CREATE TABLE t1 AS ' - 'SELECT i, repeat(md5(i::text),5006056) AS fat_attr ' - 'FROM generate_series(0,20) i') - self.wait_until_replica_catch_with_master(master, replica) - - self.add_instance(backup_dir, 'replica', replica) - - full_id = self.backup_node( - backup_dir, 'replica', - replica, options=['--stream']) - - master.safe_psql( - 'postgres', - 'CREATE TABLE t2 AS ' - 'SELECT i, repeat(md5(i::text),5006056) AS fat_attr ' - 'FROM generate_series(0,20) i') - self.wait_until_replica_catch_with_master(master, replica) - - self.backup_node( - backup_dir, 'replica', replica, - backup_type='delta', options=['--stream']) - - replica.promote() - - # failing, because without archving, it is impossible to - # take multi-timeline backup. - try: - self.backup_node( - backup_dir, 'replica', replica, - backup_type='delta', options=['--stream']) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because of timeline switch " - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'WARNING: Cannot find valid backup on previous timelines, ' - 'WAL archive is not available' in e.message and - 'ERROR: Create new full backup before an incremental one' in e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_replica_promote_archive_delta(self): """ From 588c4cd04bfdd3dc95e8ac6d3495150b5e05a2bb Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Thu, 22 Apr 2021 18:31:34 +0300 Subject: [PATCH 117/525] tests: fix tests.restore.RestoreTest.test_restore_chain --- tests/restore.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/restore.py b/tests/restore.py index 9db885f09..f42d4fdb9 100644 --- a/tests/restore.py +++ b/tests/restore.py @@ -1616,7 +1616,7 @@ def test_restore_chain(self): try: self.backup_node( backup_dir, 'node', node, - backup_type='delta', options=['--archive-timeout=0s']) + backup_type='delta', options=['-U', 'wrong_name']) except ProbackupException as e: pass @@ -1624,7 +1624,7 @@ def test_restore_chain(self): try: self.backup_node( backup_dir, 'node', node, - backup_type='delta', options=['--archive-timeout=0s']) + backup_type='delta', options=['-U', 'wrong_name']) except ProbackupException as e: pass @@ -1636,7 +1636,7 @@ def test_restore_chain(self): try: self.backup_node( backup_dir, 'node', node, - backup_type='delta', options=['--archive-timeout=0s']) + backup_type='delta', options=['-U', 'wrong_name']) except ProbackupException as e: pass From 4390ad297b832bf18d796f8b6c61a51116a7231d Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Thu, 22 Apr 2021 18:37:29 +0300 Subject: [PATCH 118/525] tests: fix tests.restore.RestoreTest.test_restore_chain_with_corrupted_backup --- tests/restore.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/restore.py b/tests/restore.py index f42d4fdb9..2a11a27a4 100644 --- a/tests/restore.py +++ b/tests/restore.py @@ -1704,7 +1704,7 @@ def test_restore_chain_with_corrupted_backup(self): try: self.backup_node( backup_dir, 'node', node, - backup_type='page', options=['--archive-timeout=0s']) + backup_type='page', options=['-U', 'wrong_name']) except ProbackupException as e: pass @@ -1716,7 +1716,7 @@ def test_restore_chain_with_corrupted_backup(self): try: self.backup_node( backup_dir, 'node', node, - backup_type='delta', options=['--archive-timeout=0s']) + backup_type='delta', options=['-U', 'wrong_name']) except ProbackupException as e: pass @@ -1728,7 +1728,7 @@ def test_restore_chain_with_corrupted_backup(self): try: self.backup_node( backup_dir, 'node', node, - backup_type='delta', options=['--archive-timeout=0s']) + backup_type='delta', options=['-U', 'wrong_name']) except ProbackupException as e: pass From 09accf7adcc5dbc27fbe0fe8ddd2609612de1060 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Thu, 22 Apr 2021 22:59:49 +0300 Subject: [PATCH 119/525] fix instance validation so the validation carry on after corrupt backup detected --- src/parsexlog.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/parsexlog.c b/src/parsexlog.c index 41a410d30..43b26d1e6 100644 --- a/src/parsexlog.c +++ b/src/parsexlog.c @@ -1290,6 +1290,8 @@ RunXLogThreads(const char *archivedir, time_t target_time, if (thread_args[i].ret == 1) result = false; } + thread_interrupted = false; + interrupted = false; /* Release threads here, use thread_args only below */ pfree(threads); From fd73b9ccb6c5bc38fcc11f9c9d9f6bf3e2df5208 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Fri, 23 Apr 2021 13:19:59 +0300 Subject: [PATCH 120/525] add TODO for WAL parsing --- src/parsexlog.c | 5 ++++- src/utils/thread.c | 4 ++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/src/parsexlog.c b/src/parsexlog.c index 43b26d1e6..4a0f38642 100644 --- a/src/parsexlog.c +++ b/src/parsexlog.c @@ -1291,7 +1291,10 @@ RunXLogThreads(const char *archivedir, time_t target_time, result = false; } thread_interrupted = false; - interrupted = false; + +// TODO: we must detect difference between actual error (failed to read WAL) and interrupt signal +// if (interrupted) +// elog(ERROR, "Interrupted during WAL parsing"); /* Release threads here, use thread_args only below */ pfree(threads); diff --git a/src/utils/thread.c b/src/utils/thread.c index 5ceee068d..1c469bd29 100644 --- a/src/utils/thread.c +++ b/src/utils/thread.c @@ -11,6 +11,10 @@ #include "thread.h" +/* + * Global var used to detect error condition (not signal interrupt!) in threads, + * so if one thread errored out, then others may abort + */ bool thread_interrupted = false; #ifdef WIN32 From 82af8f35a6d74c8c312a9d4683dcbde5f1255ae1 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Fri, 23 Apr 2021 14:09:11 +0300 Subject: [PATCH 121/525] [Issue #346] detect failure of streaming thread --- src/backup.c | 2 +- src/data.c | 4 ++-- src/dir.c | 2 ++ src/stream.c | 6 ++++++ src/utils/logger.c | 1 - 5 files changed, 11 insertions(+), 4 deletions(-) diff --git a/src/backup.c b/src/backup.c index ce8e51228..3815900b9 100644 --- a/src/backup.c +++ b/src/backup.c @@ -1405,7 +1405,7 @@ wait_wal_lsn(XLogRecPtr target_lsn, bool is_start_lsn, TimeLineID tli, } sleep(1); - if (interrupted) + if (interrupted || thread_interrupted) elog(ERROR, "Interrupted during waiting for WAL %s", in_stream_dir ? "streaming" : "archiving"); try_count++; diff --git a/src/data.c b/src/data.c index 999de867d..4370bcbbc 100644 --- a/src/data.c +++ b/src/data.c @@ -1851,7 +1851,7 @@ get_checksum_map(const char *fullpath, uint32 checksum_version, if (feof(in)) break; - if (interrupted) + if (interrupted || thread_interrupted) elog(ERROR, "Interrupted during page reading"); } @@ -1914,7 +1914,7 @@ get_lsn_map(const char *fullpath, uint32 checksum_version, if (feof(in)) break; - if (interrupted) + if (interrupted || thread_interrupted) elog(ERROR, "Interrupted during page reading"); } diff --git a/src/dir.c b/src/dir.c index 368d40832..c5c5b3297 100644 --- a/src/dir.c +++ b/src/dir.c @@ -781,6 +781,8 @@ dir_check_file(pgFile *file, bool backup_logs) * List files in parent->path directory. If "exclude" is true do not add into * "files" files from pgdata_exclude_files and directories from * pgdata_exclude_dir. + * + * TODO: should we check for interrupt here ? */ static void dir_list_file_internal(parray *files, pgFile *parent, const char *parent_dir, diff --git a/src/stream.c b/src/stream.c index 21204ae2c..01161f720 100644 --- a/src/stream.c +++ b/src/stream.c @@ -233,7 +233,10 @@ StreamLog(void *arg) ctl.mark_done = false; if(ReceiveXlogStream(stream_arg->conn, &ctl) == false) + { + interrupted = true; elog(ERROR, "Problem in receivexlog"); + } #if PG_VERSION_NUM >= 100000 if (!ctl.walmethod->finish()) @@ -245,7 +248,10 @@ StreamLog(void *arg) if(ReceiveXlogStream(stream_arg->conn, stream_arg->startpos, stream_arg->starttli, NULL, (char *) stream_arg->basedir, stop_streaming, standby_message_timeout, NULL, false, false) == false) + { + interrupted = true; elog(ERROR, "Problem in receivexlog"); + } #endif /* be paranoid and sort xlog_files_list, diff --git a/src/utils/logger.c b/src/utils/logger.c index 584b937e7..f039d4a5d 100644 --- a/src/utils/logger.c +++ b/src/utils/logger.c @@ -169,7 +169,6 @@ exit_if_necessary(int elevel) { /* Interrupt other possible routines */ thread_interrupted = true; - interrupted = true; #ifdef WIN32 ExitThread(elevel); #else From a4ddadd0c3db31cfe0caee1bc9530b697da9c75f Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Fri, 23 Apr 2021 15:31:39 +0300 Subject: [PATCH 122/525] [Issue #355] documentation update --- doc/pgprobackup.xml | 42 +++++++++++++++++++++--------------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/doc/pgprobackup.xml b/doc/pgprobackup.xml index ae9e5f8bb..d5cce129a 100644 --- a/doc/pgprobackup.xml +++ b/doc/pgprobackup.xml @@ -794,7 +794,7 @@ ALTER ROLE backup WITH REPLICATION; parameter, as follows: -archive_command = 'install_dir/pg_probackup archive-push -B backup_dir --instance instance_name --wal-file-name=%f [remote_options]' +archive_command = '"install_dir/pg_probackup" archive-push -B "backup_dir" --instance instance_name --wal-file-name=%f [remote_options]' @@ -1588,7 +1588,7 @@ pg_probackup validate -B backup_dir --instance -pg_probackup validate -B backup_dir --instance instance_name -i PT8XFX --recovery-target-time='2017-05-18 14:18:11+03' +pg_probackup validate -B backup_dir --instance instance_name -i PT8XFX --recovery-target-time="2017-05-18 14:18:11+03" If you specify the backup_id of an incremental backup, @@ -1915,7 +1915,7 @@ pg_probackup restore -B backup_dir --instance -pg_probackup restore -B backup_dir --instance instance_name --recovery-target-time='2017-05-18 14:18:11+03' +pg_probackup restore -B backup_dir --instance instance_name --recovery-target-time="2017-05-18 14:18:11+03" @@ -1942,7 +1942,7 @@ pg_probackup restore -B backup_dir --instance --recovery-target-name option: -pg_probackup restore -B backup_dir --instance instance_name --recovery-target-name='before_app_upgrade' +pg_probackup restore -B backup_dir --instance instance_name --recovery-target-name="before_app_upgrade" @@ -1952,7 +1952,7 @@ pg_probackup restore -B backup_dir --instance latest value: -pg_probackup restore -B backup_dir --instance instance_name --recovery-target='latest' +pg_probackup restore -B backup_dir --instance instance_name --recovery-target="latest" @@ -2079,14 +2079,14 @@ pg_probackup restore -B backup_dir --instance restore_command: -restore_command = 'install_dir/pg_probackup archive-get -B backup_dir --instance instance_name --wal-file-path=%p --wal-file-name=%f --remote-host=192.168.0.3 --remote-port=2303 --remote-user=backup' +restore_command = '"install_dir/pg_probackup" archive-get -B "backup_dir" --instance instance_name --wal-file-path=%p --wal-file-name=%f --remote-host=192.168.0.3 --remote-port=2303 --remote-user=backup' Alternatively, you can use the option to provide the entire restore_command: -pg_probackup restore -B backup_dir --instance instance_name --remote-user=postgres --remote-host=192.168.0.2 --remote-port=2302 --restore-command='install_dir/pg_probackup archive-get -B backup_dir --instance instance_name --wal-file-path=%p --wal-file-name=%f --remote-host=192.168.0.3 --remote-port=2303 --remote-user=backup' +pg_probackup restore -B backup_dir --instance instance_name --remote-user=postgres --remote-host=192.168.0.2 --remote-port=2302 --restore-command='"install_dir/pg_probackup" archive-get -B "backup_dir" --instance instance_name --wal-file-path=%p --wal-file-name=%f --remote-host=192.168.0.3 --remote-port=2303 --remote-user=backup' @@ -3134,7 +3134,7 @@ pg_probackup set-backup -B backup_dir --instance --expire-time option. For example: -pg_probackup set-backup -B backup_dir --instance instance_name -i backup_id --expire-time='2020-01-01 00:00:00+03' +pg_probackup set-backup -B backup_dir --instance instance_name -i backup_id --expire-time="2020-01-01 00:00:00+03" Alternatively, you can use the and @@ -3144,7 +3144,7 @@ pg_probackup set-backup -B backup_dir --instance pg_probackup backup -B backup_dir --instance instance_name -b FULL --ttl=30d -pg_probackup backup -B backup_dir --instance instance_name -b FULL --expire-time='2020-01-01 00:00:00+03' +pg_probackup backup -B backup_dir --instance instance_name -b FULL --expire-time="2020-01-01 00:00:00+03" To check if the backup is pinned, @@ -3868,7 +3868,7 @@ pg_probackup restore -B backup_dir --instance -R flag is specified. - Example: --primary-conninfo='host=192.168.1.50 port=5432 user=foo password=foopass' + Example: --primary-conninfo="host=192.168.1.50 port=5432 user=foo password=foopass" @@ -4469,7 +4469,7 @@ pg_probackup archive-get -B backup_dir --instance - Example: --recovery-target-time='2020-01-01 00:00:00+03' + Example: --recovery-target-time="2020-01-01 00:00:00+03" @@ -4659,7 +4659,7 @@ pg_probackup archive-get -B backup_dir --instance - Example: --expire-time='2020-01-01 00:00:00+03' + Example: --expire-time="2020-01-01 00:00:00+03" @@ -5202,7 +5202,7 @@ pg_probackup archive-get -B backup_dir --instance keep-alive for SSH connections opened by pg_probackup: - --ssh-options='-o ServerAliveCountMax=5 -o ServerAliveInterval=60'. + --ssh-options="-o ServerAliveCountMax=5 -o ServerAliveInterval=60". For the full list of possible options, see ssh_config manual page. @@ -5555,14 +5555,14 @@ INFO: Backup catalog '/mnt/backups' successfully inited Add instance <literal>pg-11</literal> to the backup catalog: -[backupman@backup_host]$ pg_probackup-11 add-instance -B /mnt/backups --instance 'pg-11' --remote-host=postgres_host --remote-user=postgres -D /var/lib/postgresql/11/main +[backupman@backup_host]$ pg_probackup-11 add-instance -B /mnt/backups --instance pg-11 --remote-host=postgres_host --remote-user=postgres -D /var/lib/postgresql/11/main INFO: Instance 'node' successfully inited Take a FULL backup: -[backupman@backup_host] pg_probackup-11 backup -B /mnt/backups --instance 'pg-11' -b FULL --stream --remote-host=postgres_host --remote-user=postgres -U backup -d backupdb +[backupman@backup_host] pg_probackup-11 backup -B /mnt/backups --instance pg-11 -b FULL --stream --remote-host=postgres_host --remote-user=postgres -U backup -d backupdb INFO: Backup start, pg_probackup version: 2.2.0, instance: node, backup ID: PZ7YK2, backup mode: FULL, wal mode: STREAM, remote: true, compress-algorithm: none, compress-level: 1 INFO: Start transferring data files INFO: Data files are transferred @@ -5577,7 +5577,7 @@ INFO: Backup PZ7YK2 completed Let's take a look at the backup catalog: -[backupman@backup_host] pg_probackup-11 show -B /mnt/backups --instance 'pg-11' +[backupman@backup_host] pg_probackup-11 show -B /mnt/backups --instance pg-11 BACKUP INSTANCE 'pg-11' ================================================================================================================================== @@ -5589,7 +5589,7 @@ BACKUP INSTANCE 'pg-11' Take an incremental backup in the DELTA mode: -[backupman@backup_host] pg_probackup-11 backup -B /mnt/backups --instance 'pg-11' -b delta --stream --remote-host=postgres_host --remote-user=postgres -U backup -d backupdb +[backupman@backup_host] pg_probackup-11 backup -B /mnt/backups --instance pg-11 -b delta --stream --remote-host=postgres_host --remote-user=postgres -U backup -d backupdb INFO: Backup start, pg_probackup version: 2.2.0, instance: node, backup ID: PZ7YMP, backup mode: DELTA, wal mode: STREAM, remote: true, compress-algorithm: none, compress-level: 1 INFO: Parent backup: PZ7YK2 INFO: Start transferring data files @@ -5606,14 +5606,14 @@ INFO: Backup PZ7YMP completed Let's add some parameters to <application>pg_probackup</application> configuration file, so that you can omit them from the command line: -[backupman@backup_host] pg_probackup-11 set-config -B /mnt/backups --instance 'pg-11' --remote-host=postgres_host --remote-user=postgres -U backup -d backupdb +[backupman@backup_host] pg_probackup-11 set-config -B /mnt/backups --instance pg-11 --remote-host=postgres_host --remote-user=postgres -U backup -d backupdb Take another incremental backup in the DELTA mode, omitting some of the previous parameters: -[backupman@backup_host] pg_probackup-11 backup -B /mnt/backups --instance 'pg-11' -b delta --stream +[backupman@backup_host] pg_probackup-11 backup -B /mnt/backups --instance pg-11 -b delta --stream INFO: Backup start, pg_probackup version: 2.2.0, instance: node, backup ID: PZ7YR5, backup mode: DELTA, wal mode: STREAM, remote: true, compress-algorithm: none, compress-level: 1 INFO: Parent backup: PZ7YMP INFO: Start transferring data files @@ -5629,7 +5629,7 @@ INFO: Backup PZ7YR5 completed Let's take a look at the instance configuration: -[backupman@backup_host] pg_probackup-11 show-config -B /mnt/backups --instance 'pg-11' +[backupman@backup_host] pg_probackup-11 show-config -B /mnt/backups --instance pg-11 # Backup instance information pgdata = /var/lib/postgresql/11/main @@ -5668,7 +5668,7 @@ remote-host = postgres_host Let's take a look at the backup catalog: -[backupman@backup_host] pg_probackup-11 show -B /mnt/backups --instance 'pg-11' +[backupman@backup_host] pg_probackup-11 show -B /mnt/backups --instance pg-11 ==================================================================================================================================== Instance Version ID Recovery Time Mode WAL Mode TLI Time Data WAL Zratio Start LSN Stop LSN Status From 637f1d1c059b8fd20899571865edee84a326c64b Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Sun, 25 Apr 2021 00:26:40 +0300 Subject: [PATCH 123/525] tests: fix archive.ArchiveTest.test_archive_pg_receivexlog_partial_handling --- tests/archive.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/archive.py b/tests/archive.py index 70a86393a..7f5c75879 100644 --- a/tests/archive.py +++ b/tests/archive.py @@ -2090,11 +2090,11 @@ def test_archive_pg_receivexlog_partial_handling(self): result = node.safe_psql( "postgres", - "select sum(id) from t_heap") + "select sum(id) from t_heap").decode('utf-8').rstrip() result_new = node_restored.safe_psql( "postgres", - "select sum(id) from t_heap") + "select sum(id) from t_heap").decode('utf-8').rstrip() self.assertEqual(result, result_new) From 898d5f329273d50e91c88bb66ee18b318dac598a Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Sun, 25 Apr 2021 01:10:36 +0300 Subject: [PATCH 124/525] sleep in slow_start --- tests/helpers/ptrack_helpers.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index 5b4adedcc..793a0b147 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -142,6 +142,7 @@ def slow_start(self, replica=False): else: raise e + sleep(1) class ProbackupTest(object): # Class attributes From 4bcdda4346fc84bdfba0d620faf2c2d06d585702 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Sun, 25 Apr 2021 02:22:30 +0300 Subject: [PATCH 125/525] tests: fix test_archive_pg_receivexlog_partial_handling --- tests/archive.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/archive.py b/tests/archive.py index 7f5c75879..ac5a6569e 100644 --- a/tests/archive.py +++ b/tests/archive.py @@ -2008,8 +2008,7 @@ def test_archive_pg_receivexlog_partial_handling(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={'archive_timeout': '10s'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) From 8291582b4a546629df06d343f5aaa99aad8e7ba5 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Sun, 25 Apr 2021 02:30:39 +0300 Subject: [PATCH 126/525] tests: another fix for test_archive_pg_receivexlog_partial_handling --- tests/archive.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/archive.py b/tests/archive.py index ac5a6569e..40be72aab 100644 --- a/tests/archive.py +++ b/tests/archive.py @@ -2032,8 +2032,10 @@ def test_archive_pg_receivexlog_partial_handling(self): replica.slow_start(replica=True) if self.get_version(replica) < 100000: + app_name = 'pg_receivexlog' pg_receivexlog_path = self.get_bin_path('pg_receivexlog') else: + app_name = 'pg_receivewal' pg_receivexlog_path = self.get_bin_path('pg_receivewal') cmdline = [ @@ -2079,6 +2081,7 @@ def test_archive_pg_receivexlog_partial_handling(self): node_restored.data_dir, options=['--recovery-target=latest', '--recovery-target-action=promote']) self.set_auto_conf(node_restored, {'port': node_restored.port}) self.set_auto_conf(node_restored, {'hot_standby': 'off'}) + self.set_auto_conf(node_restored, {'synchronous_standby_names': app_name}) # it will set node_restored as warm standby. # with open(os.path.join(node_restored.data_dir, "standby.signal"), 'w') as f: From b6e948be69f5e91f1eb5fa5c27aa1bf354731b88 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Sun, 25 Apr 2021 14:04:37 +0300 Subject: [PATCH 127/525] tests: another fix for test_archive_pg_receivexlog_partial_handling --- tests/archive.py | 40 +++++++++++----------------------------- 1 file changed, 11 insertions(+), 29 deletions(-) diff --git a/tests/archive.py b/tests/archive.py index 40be72aab..cbb5059c8 100644 --- a/tests/archive.py +++ b/tests/archive.py @@ -2015,23 +2015,7 @@ def test_archive_pg_receivexlog_partial_handling(self): node.slow_start() - self.backup_node(backup_dir, 'node', node, options=['--stream']) - - replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) - replica.cleanup() - - self.restore_node( - backup_dir, 'node', replica, replica.data_dir, options=['-R']) - self.set_auto_conf(replica, {'port': replica.port}) - self.set_replica(node, replica) - - self.add_instance(backup_dir, 'replica', replica) - # self.set_archiving(backup_dir, 'replica', replica, replica=True) - - replica.slow_start(replica=True) - - if self.get_version(replica) < 100000: + if self.get_version(node) < 100000: app_name = 'pg_receivexlog' pg_receivexlog_path = self.get_bin_path('pg_receivexlog') else: @@ -2039,8 +2023,8 @@ def test_archive_pg_receivexlog_partial_handling(self): pg_receivexlog_path = self.get_bin_path('pg_receivewal') cmdline = [ - pg_receivexlog_path, '-p', str(replica.port), '--synchronous', - '-D', os.path.join(backup_dir, 'wal', 'replica')] + pg_receivexlog_path, '-p', str(node.port), '--synchronous', + '-D', os.path.join(backup_dir, 'wal', 'node')] if self.archive_compress and node.major_version >= 10: cmdline += ['-Z', '1'] @@ -2053,8 +2037,12 @@ def test_archive_pg_receivexlog_partial_handling(self): 'Failed to start pg_receivexlog: {0}'.format( pg_receivexlog.communicate()[1])) + self.set_auto_conf(node, {'synchronous_standby_names': app_name}) + self.set_auto_conf(node, {'synchronous_commit': 'on'}) + node.reload() + # FULL - self.backup_node(backup_dir, 'replica', replica, options=['--stream']) + self.backup_node(backup_dir, 'node', node, options=['--stream']) node.safe_psql( "postgres", @@ -2064,7 +2052,7 @@ def test_archive_pg_receivexlog_partial_handling(self): # PAGE self.backup_node( - backup_dir, 'replica', replica, backup_type='delta', options=['--stream']) + backup_dir, 'node', node, backup_type='page', options=['--stream']) node.safe_psql( "postgres", @@ -2077,16 +2065,10 @@ def test_archive_pg_receivexlog_partial_handling(self): node_restored.cleanup() self.restore_node( - backup_dir, 'replica', node_restored, - node_restored.data_dir, options=['--recovery-target=latest', '--recovery-target-action=promote']) + backup_dir, 'node', node_restored, node_restored.data_dir, + options=['--recovery-target=latest', '--recovery-target-action=promote']) self.set_auto_conf(node_restored, {'port': node_restored.port}) self.set_auto_conf(node_restored, {'hot_standby': 'off'}) - self.set_auto_conf(node_restored, {'synchronous_standby_names': app_name}) - - # it will set node_restored as warm standby. -# with open(os.path.join(node_restored.data_dir, "standby.signal"), 'w') as f: -# f.flush() -# f.close() node_restored.slow_start() From 7a3f26fbbad3bbf6b7ac22331ab9689b7d5106fd Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Sun, 25 Apr 2021 14:15:48 +0300 Subject: [PATCH 128/525] tests: disable autovacuum in test_archive_pg_receivexlog_partial_handling --- tests/archive.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/archive.py b/tests/archive.py index cbb5059c8..6bb85e558 100644 --- a/tests/archive.py +++ b/tests/archive.py @@ -2008,7 +2008,8 @@ def test_archive_pg_receivexlog_partial_handling(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums']) + initdb_params=['--data-checksums'], + pg_options={'autovacuum': 'off'}) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) From df8aadfe2df44d4530d6dc4549e47996284f3ab6 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Sun, 25 Apr 2021 14:22:51 +0300 Subject: [PATCH 129/525] tests: kill pg_receivexlog after test_archive_pg_receivexlog_partial_handling --- tests/archive.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/archive.py b/tests/archive.py index 6bb85e558..7f11b808c 100644 --- a/tests/archive.py +++ b/tests/archive.py @@ -2061,6 +2061,8 @@ def test_archive_pg_receivexlog_partial_handling(self): "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(1000000,2000000) i") + pg_receivexlog.kill() + node_restored = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node_restored')) node_restored.cleanup() @@ -2084,7 +2086,6 @@ def test_archive_pg_receivexlog_partial_handling(self): self.assertEqual(result, result_new) # Clean after yourself - pg_receivexlog.kill() self.del_test_dir(module_name, fname) @unittest.skip("skip") From 13492bf8ca135f8770601be7976612fd3c8cc75b Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Sun, 25 Apr 2021 15:04:06 +0300 Subject: [PATCH 130/525] tests: disable autovacuum in test_checksum_corruption_detection --- tests/incr_restore.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/incr_restore.py b/tests/incr_restore.py index 885a88c2e..4838fefc9 100644 --- a/tests/incr_restore.py +++ b/tests/incr_restore.py @@ -136,7 +136,8 @@ def test_checksum_corruption_detection(self): fname = self.id().split('.')[3] node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), - initdb_params=['--data-checksums']) + initdb_params=['--data-checksums'], + pg_options={'autovacuum': 'off'}) backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') self.init_pb(backup_dir) From 9f29fb9b59fcb070376b00e559edf4f9f7986d75 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Sun, 25 Apr 2021 15:33:49 +0300 Subject: [PATCH 131/525] tests: sleep in slow_start --- tests/helpers/ptrack_helpers.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index 793a0b147..d06527887 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -138,11 +138,11 @@ def slow_start(self, replica=False): except testgres.QueryException as e: if 'database system is starting up' in e.message: - continue + pass else: raise e - sleep(1) + sleep(0.5) class ProbackupTest(object): # Class attributes From e387e8e0d508fbab620f29bdc84f3a2a0e0d5ab3 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Sun, 25 Apr 2021 16:58:32 +0300 Subject: [PATCH 132/525] tests: update expected help --- tests/expected/option_help.out | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/expected/option_help.out b/tests/expected/option_help.out index 2170e2773..c2b15e7ac 100644 --- a/tests/expected/option_help.out +++ b/tests/expected/option_help.out @@ -122,11 +122,12 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database. [--wal-depth=wal-depth] [-i backup-id | --delete-expired | --merge-expired | --status=backup_status] [--delete-wal] - [--dry-run] + [--dry-run] [--no-validate] [--no-sync] [--help] pg_probackup merge -B backup-path --instance=instance_name -i backup-id [--progress] [-j num-threads] + [--no-validate] [--no-sync] [--help] pg_probackup add-instance -B backup-path -D pgdata-path From 931e0a451c39aaa45c0d89af832800659d2ebdcd Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Sun, 25 Apr 2021 17:00:53 +0300 Subject: [PATCH 133/525] Version 2.4.15 --- src/pg_probackup.h | 2 +- tests/expected/option_version.out | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/pg_probackup.h b/src/pg_probackup.h index bbd516e4d..f5bac64a0 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -308,7 +308,7 @@ typedef enum ShowFormat #define BYTES_INVALID (-1) /* file didn`t changed since previous backup, DELTA backup do not rely on it */ #define FILE_NOT_FOUND (-2) /* file disappeared during backup */ #define BLOCKNUM_INVALID (-1) -#define PROGRAM_VERSION "2.4.14" +#define PROGRAM_VERSION "2.4.15" /* update when remote agent API or behaviour changes */ #define AGENT_PROTOCOL_VERSION 20409 diff --git a/tests/expected/option_version.out b/tests/expected/option_version.out index c2748505e..05a8660ab 100644 --- a/tests/expected/option_version.out +++ b/tests/expected/option_version.out @@ -1 +1 @@ -pg_probackup 2.4.14 \ No newline at end of file +pg_probackup 2.4.15 \ No newline at end of file From 2bbecfd06c43be5dc2721a0adee129a7792994eb Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Sun, 25 Apr 2021 19:13:23 +0300 Subject: [PATCH 134/525] tests: added test_validate_instance_with_several_corrupt_backups and test_validate_instance_with_several_corrupt_backups_interrupt --- tests/validate.py | 198 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 198 insertions(+) diff --git a/tests/validate.py b/tests/validate.py index b5e7b685a..eee990ad6 100644 --- a/tests/validate.py +++ b/tests/validate.py @@ -982,6 +982,204 @@ def test_validate_specific_target_corrupted_intermediate_backups(self): # Clean after yourself self.del_test_dir(module_name, fname) + # @unittest.skip("skip") + def test_validate_instance_with_several_corrupt_backups(self): + """ + make archive node, take FULL1, PAGE1_1, FULL2, PAGE2_1 backups, FULL3 + corrupt file in FULL and FULL2 and run validate on instance, + expect FULL1 to gain status CORRUPT, PAGE1_1 to gain status ORPHAN + FULL2 to gain status CORRUPT, PAGE2_1 to gain status ORPHAN + """ + fname = self.id().split('.')[3] + node = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + "postgres", + "create table t_heap as select generate_series(0,1) i") + # FULL1 + backup_id_1 = self.backup_node( + backup_dir, 'node', node, options=['--no-validate']) + + # FULL2 + backup_id_2 = self.backup_node(backup_dir, 'node', node) + rel_path = node.safe_psql( + "postgres", + "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip() + + node.safe_psql( + "postgres", + "insert into t_heap values(2)") + + backup_id_3 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # FULL3 + backup_id_4 = self.backup_node(backup_dir, 'node', node) + + node.safe_psql( + "postgres", + "insert into t_heap values(3)") + + backup_id_5 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # FULL4 + backup_id_6 = self.backup_node( + backup_dir, 'node', node, options=['--no-validate']) + + # Corrupt some files in FULL2 and FULL3 backup + os.remove(os.path.join( + backup_dir, 'backups', 'node', backup_id_2, + 'database', rel_path)) + os.remove(os.path.join( + backup_dir, 'backups', 'node', backup_id_4, + 'database', rel_path)) + + # Validate Instance + try: + self.validate_pb(backup_dir, 'node', options=["-j", "4", "--log-level-file=LOG"]) + self.assertEqual( + 1, 0, + "Expecting Error because of data files corruption.\n " + "Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertTrue( + "INFO: Validate backups of the instance 'node'" in e.message, + "\n Unexpected Error Message: {0}\n " + "CMD: {1}".format(repr(e.message), self.cmd)) + self.assertTrue( + 'WARNING: Some backups are not valid' in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.assertEqual( + 'OK', self.show_pb(backup_dir, 'node', backup_id_1)['status'], + 'Backup STATUS should be "OK"') + self.assertEqual( + 'CORRUPT', self.show_pb(backup_dir, 'node', backup_id_2)['status'], + 'Backup STATUS should be "CORRUPT"') + self.assertEqual( + 'ORPHAN', self.show_pb(backup_dir, 'node', backup_id_3)['status'], + 'Backup STATUS should be "ORPHAN"') + self.assertEqual( + 'CORRUPT', self.show_pb(backup_dir, 'node', backup_id_4)['status'], + 'Backup STATUS should be "CORRUPT"') + self.assertEqual( + 'ORPHAN', self.show_pb(backup_dir, 'node', backup_id_5)['status'], + 'Backup STATUS should be "ORPHAN"') + self.assertEqual( + 'OK', self.show_pb(backup_dir, 'node', backup_id_6)['status'], + 'Backup STATUS should be "OK"') + + # Clean after yourself + self.del_test_dir(module_name, fname) + + # @unittest.skip("skip") + def test_validate_instance_with_several_corrupt_backups_interrupt(self): + """ + check that interrupt during validation is handled correctly + """ + fname = self.id().split('.')[3]q + node = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + "postgres", + "create table t_heap as select generate_series(0,1) i") + # FULL1 + backup_id_1 = self.backup_node( + backup_dir, 'node', node, options=['--no-validate']) + + # FULL2 + backup_id_2 = self.backup_node(backup_dir, 'node', node) + rel_path = node.safe_psql( + "postgres", + "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip() + + node.safe_psql( + "postgres", + "insert into t_heap values(2)") + + backup_id_3 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # FULL3 + backup_id_4 = self.backup_node(backup_dir, 'node', node) + + node.safe_psql( + "postgres", + "insert into t_heap values(3)") + + backup_id_5 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # FULL4 + backup_id_6 = self.backup_node( + backup_dir, 'node', node, options=['--no-validate']) + + # Corrupt some files in FULL2 and FULL3 backup + os.remove(os.path.join( + backup_dir, 'backups', 'node', backup_id_1, + 'database', rel_path)) + os.remove(os.path.join( + backup_dir, 'backups', 'node', backup_id_3, + 'database', rel_path)) + + # Validate Instance + gdb = self.validate_pb( + backup_dir, 'node', options=["-j", "4", "--log-level-file=LOG"], gdb=True) + + gdb.set_breakpoint('validate_file_pages') + gdb.run_until_break() + gdb.continue_execution_until_break() + gdb.remove_all_breakpoints() + gdb._execute('signal SIGINT') + gdb.continue_execution_until_error() + + self.assertEqual( + 'DONE', self.show_pb(backup_dir, 'node', backup_id_1)['status'], + 'Backup STATUS should be "OK"') + self.assertEqual( + 'OK', self.show_pb(backup_dir, 'node', backup_id_2)['status'], + 'Backup STATUS should be "OK"') + self.assertEqual( + 'OK', self.show_pb(backup_dir, 'node', backup_id_3)['status'], + 'Backup STATUS should be "CORRUPT"') + self.assertEqual( + 'OK', self.show_pb(backup_dir, 'node', backup_id_4)['status'], + 'Backup STATUS should be "ORPHAN"') + self.assertEqual( + 'OK', self.show_pb(backup_dir, 'node', backup_id_5)['status'], + 'Backup STATUS should be "OK"') + self.assertEqual( + 'DONE', self.show_pb(backup_dir, 'node', backup_id_6)['status'], + 'Backup STATUS should be "OK"') + + log_file = os.path.join(backup_dir, 'log', 'pg_probackup.log') + with open(log_file, 'r') as f: + log_content = f.read() + self.assertNotIn( + 'Interrupted while locking backup', log_content) + + # Clean after yourself + self.del_test_dir(module_name, fname) + # @unittest.skip("skip") def test_validate_instance_with_corrupted_page(self): """ From 1c860ff9bceadc7f2b66925e292fe5a79204b24b Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Sun, 25 Apr 2021 21:39:56 +0300 Subject: [PATCH 135/525] tests: fix typo in test_validate_instance_with_several_corrupt_backups_interrupt --- tests/validate.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/validate.py b/tests/validate.py index eee990ad6..c5cc80733 100644 --- a/tests/validate.py +++ b/tests/validate.py @@ -1088,7 +1088,7 @@ def test_validate_instance_with_several_corrupt_backups_interrupt(self): """ check that interrupt during validation is handled correctly """ - fname = self.id().split('.')[3]q + fname = self.id().split('.')[3] node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), initdb_params=['--data-checksums']) From 23b00b1ddfec8a262b200228fc06f8e77bb9e2c8 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Mon, 26 Apr 2021 12:52:37 +0300 Subject: [PATCH 136/525] tests: set env for run_binary --- tests/archive.py | 4 +++- tests/helpers/ptrack_helpers.py | 10 +++++++--- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/tests/archive.py b/tests/archive.py index 7f11b808c..c506ccbf5 100644 --- a/tests/archive.py +++ b/tests/archive.py @@ -2030,7 +2030,9 @@ def test_archive_pg_receivexlog_partial_handling(self): if self.archive_compress and node.major_version >= 10: cmdline += ['-Z', '1'] - pg_receivexlog = self.run_binary(cmdline, asynchronous=True) + env = self.test_env + env["PGAPPNAME"] = app_name + pg_receivexlog = self.run_binary(cmdline, asynchronous=True, env) if pg_receivexlog.returncode: self.assertFalse( diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index d06527887..b0400a72d 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -779,7 +779,11 @@ def run_pb(self, command, asynchronous=False, gdb=False, old_binary=False, retur except subprocess.CalledProcessError as e: raise ProbackupException(e.output.decode('utf-8'), self.cmd) - def run_binary(self, command, asynchronous=False): + def run_binary(self, command, asynchronous=False, env=None): + + if not env: + env = self.test_env + if self.verbose: print([' '.join(map(str, command))]) try: @@ -789,13 +793,13 @@ def run_binary(self, command, asynchronous=False): stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, - env=self.test_env + env=env ) else: self.output = subprocess.check_output( command, stderr=subprocess.STDOUT, - env=self.test_env + env=env ).decode('utf-8') return self.output except subprocess.CalledProcessError as e: From 3837a62dcace3736d24225b80e02409d292ed59f Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Mon, 26 Apr 2021 12:56:44 +0300 Subject: [PATCH 137/525] tests: fix env in test_archive_pg_receivexlog_partial_handling --- tests/archive.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/archive.py b/tests/archive.py index c506ccbf5..2ebe09b39 100644 --- a/tests/archive.py +++ b/tests/archive.py @@ -2032,7 +2032,7 @@ def test_archive_pg_receivexlog_partial_handling(self): env = self.test_env env["PGAPPNAME"] = app_name - pg_receivexlog = self.run_binary(cmdline, asynchronous=True, env) + pg_receivexlog = self.run_binary(cmdline, asynchronous=True, env=env) if pg_receivexlog.returncode: self.assertFalse( From b1ee3a9dc36bc07689f5c4c69d92428e1af2ed07 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Tue, 27 Apr 2021 22:19:29 +0300 Subject: [PATCH 138/525] DOC: update --- doc/pgprobackup.xml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/doc/pgprobackup.xml b/doc/pgprobackup.xml index d5cce129a..740517313 100644 --- a/doc/pgprobackup.xml +++ b/doc/pgprobackup.xml @@ -761,10 +761,9 @@ ALTER ROLE backup WITH REPLICATION; Setting up Continuous WAL Archiving Making backups in PAGE backup mode, performing - PITR, + PITR and making backups with - ARCHIVE WAL delivery mode and - running incremental backup after timeline switch + ARCHIVE WAL delivery mode require continuous WAL archiving to be enabled. To set up continuous From 17251d6677baac69117647541e8ee4087dcddad1 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Wed, 28 Apr 2021 11:04:48 +0300 Subject: [PATCH 139/525] [Issue #376] follow symlinks when walking the WAL archive directory --- src/catalog.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/catalog.c b/src/catalog.c index 6ee7b93d4..981841747 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -1404,7 +1404,7 @@ catalog_get_timelines(InstanceConfig *instance) /* read all xlog files that belong to this archive */ sprintf(arclog_path, "%s/%s/%s", backup_path, "wal", instance->name); - dir_list_file(xlog_files_list, arclog_path, false, false, false, false, true, 0, FIO_BACKUP_HOST); + dir_list_file(xlog_files_list, arclog_path, false, true, false, false, true, 0, FIO_BACKUP_HOST); parray_qsort(xlog_files_list, pgFileCompareName); timelineinfos = parray_new(); From 30f7f78b086846cd823e9a1b9d8a9cff486fc0e6 Mon Sep 17 00:00:00 2001 From: dld-r00f Date: Mon, 3 May 2021 20:07:54 +0300 Subject: [PATCH 140/525] Update README.md Error in ubuntu repo installation command for not super user. Sudo command works only for echo command not for redirection and plain user have no rights to write to /etc/apt/sources.list.d/ folder. --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 2ecaf9695..d4833c8de 100644 --- a/README.md +++ b/README.md @@ -72,7 +72,7 @@ Installers are available in release **assets**. [Latests](https://github.com/pos #### pg_probackup for vanilla PostgreSQL ```shell #DEB Ubuntu|Debian Packages -sudo echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup/deb/ $(lsb_release -cs) main-$(lsb_release -cs)" > /etc/apt/sources.list.d/pg_probackup.list +sudo sh -c 'echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup/deb/ $(lsb_release -cs) main-$(lsb_release -cs)" > /etc/apt/sources.list.d/pg_probackup.list' sudo wget -O - https://repo.postgrespro.ru/pg_probackup/keys/GPG-KEY-PG_PROBACKUP | sudo apt-key add - && sudo apt-get update sudo apt-get install pg-probackup-{13,12,11,10,9.6,9.5} sudo apt-get install pg-probackup-{13,12,11,10,9.6,9.5}-dbg From 89188e81cfd28b6f3395258ff1ea0dee6bb2aa6b Mon Sep 17 00:00:00 2001 From: dld-r00f Date: Tue, 4 May 2021 11:14:09 +0300 Subject: [PATCH 141/525] Update README.md Change repo setup for Ubuntu/Deb for PostgresPro section. --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index d4833c8de..ab6af6685 100644 --- a/README.md +++ b/README.md @@ -130,7 +130,7 @@ sudo apt-get install pg_probackup-{13,12,11,10,9.6,9.5}-debuginfo #### pg_probackup for PostgresPro Standard and Enterprise ```shell #DEB Ubuntu|Debian Packages -sudo echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup-forks/deb/ $(lsb_release -cs) main-$(lsb_release -cs)" > /etc/apt/sources.list.d/pg_probackup-forks.list +sudo sh -c 'echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup-forks/deb/ $(lsb_release -cs) main-$(lsb_release -cs)" > /etc/apt/sources.list.d/pg_probackup-forks.list' sudo wget -O - https://repo.postgrespro.ru/pg_probackup-forks/keys/GPG-KEY-PG_PROBACKUP | sudo apt-key add - && sudo apt-get update sudo apt-get install pg-probackup-{std,ent}-{12,11,10,9.6} sudo apt-get install pg-probackup-{std,ent}-{12,11,10,9.6}-dbg From 46bf00d60a3f65359d9153be0db7fe6e704ce538 Mon Sep 17 00:00:00 2001 From: dld-r00f Date: Tue, 4 May 2021 11:50:08 +0300 Subject: [PATCH 142/525] Update README.md Change repo setup for all sections where the `sudo echo` commands are located. --- README.md | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index ab6af6685..49b9351df 100644 --- a/README.md +++ b/README.md @@ -78,8 +78,8 @@ sudo apt-get install pg-probackup-{13,12,11,10,9.6,9.5} sudo apt-get install pg-probackup-{13,12,11,10,9.6,9.5}-dbg #DEB-SRC Packages -sudo echo "deb-src [arch=amd64] https://repo.postgrespro.ru/pg_probackup/deb/ $(lsb_release -cs) main-$(lsb_release -cs)" >>\ - /etc/apt/sources.list.d/pg_probackup.list && sudo apt-get update +sudo sh -c 'echo "deb-src [arch=amd64] https://repo.postgrespro.ru/pg_probackup/deb/ $(lsb_release -cs) main-$(lsb_release -cs)" >>\ + /etc/apt/sources.list.d/pg_probackup.list' && sudo apt-get update sudo apt-get source pg-probackup-{13,12,11,10,9.6,9.5} #RPM Centos Packages @@ -109,19 +109,19 @@ zypper install pg_probackup-{13,12,11,10,9.6,9.5}-debuginfo zypper si pg_probackup-{13,12,11,10,9.6,9.5} #RPM ALT Linux 7 -sudo echo "rpm https://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-p7 x86_64 vanilla" > /etc/apt/sources.list.d/pg_probackup.list +sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-p7 x86_64 vanilla" > /etc/apt/sources.list.d/pg_probackup.list' sudo apt-get update sudo apt-get install pg_probackup-{13,12,11,10,9.6,9.5} sudo apt-get install pg_probackup-{13,12,11,10,9.6,9.5}-debuginfo #RPM ALT Linux 8 -sudo echo "rpm https://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-p8 x86_64 vanilla" > /etc/apt/sources.list.d/pg_probackup.list +sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-p8 x86_64 vanilla" > /etc/apt/sources.list.d/pg_probackup.list' sudo apt-get update sudo apt-get install pg_probackup-{13,12,11,10,9.6,9.5} sudo apt-get install pg_probackup-{13,12,11,10,9.6,9.5}-debuginfo #RPM ALT Linux 9 -sudo echo "rpm https://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-p9 x86_64 vanilla" > /etc/apt/sources.list.d/pg_probackup.list +sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-p9 x86_64 vanilla" > /etc/apt/sources.list.d/pg_probackup.list' sudo apt-get update sudo apt-get install pg_probackup-{13,12,11,10,9.6,9.5} sudo apt-get install pg_probackup-{13,12,11,10,9.6,9.5}-debuginfo @@ -151,19 +151,19 @@ yum install pg_probackup-{std,ent}-{12,11,10,9.6} yum install pg_probackup-{std,ent}-{12,11,10,9.6}-debuginfo #RPM ALT Linux 7 -sudo echo "rpm https://repo.postgrespro.ru/pg_probackup-forks/rpm/latest/altlinux-p7 x86_64 forks" > /etc/apt/sources.list.d/pg_probackup_forks.list +sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup-forks/rpm/latest/altlinux-p7 x86_64 forks" > /etc/apt/sources.list.d/pg_probackup_forks.list' sudo apt-get update sudo apt-get install pg_probackup-{std,ent}-{12,11,10,9.6} sudo apt-get install pg_probackup-{std,ent}-{12,11,10,9.6}-debuginfo #RPM ALT Linux 8 -sudo echo "rpm https://repo.postgrespro.ru/pg_probackup-forks/rpm/latest/altlinux-p8 x86_64 forks" > /etc/apt/sources.list.d/pg_probackup_forks.list +sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup-forks/rpm/latest/altlinux-p8 x86_64 forks" > /etc/apt/sources.list.d/pg_probackup_forks.list' sudo apt-get update sudo apt-get install pg_probackup-{std,ent}-{12,11,10,9.6} sudo apt-get install pg_probackup-{std,ent}-{12,11,10,9.6}-debuginfo #RPM ALT Linux 9 -sudo echo "rpm https://repo.postgrespro.ru/pg_probackup-forks/rpm/latest/altlinux-p9 x86_64 forks" > /etc/apt/sources.list.d/pg_probackup_forks.list && sudo apt-get update +sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup-forks/rpm/latest/altlinux-p9 x86_64 forks" > /etc/apt/sources.list.d/pg_probackup_forks.list' && sudo apt-get update sudo apt-get install pg_probackup-{std,ent}-{12,11,10,9.6} sudo apt-get install pg_probackup-{std,ent}-{12,11,10,9.6}-debuginfo ``` From 33c28fdd7f5fe7349b7e4cc9ebb8f1e81fa11bb9 Mon Sep 17 00:00:00 2001 From: Alexey Kondratov Date: Wed, 12 May 2021 20:39:05 +0300 Subject: [PATCH 143/525] Accept ptrack v2.2 as well --- src/ptrack.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/ptrack.c b/src/ptrack.c index 06ba44eeb..dc1a2c74c 100644 --- a/src/ptrack.c +++ b/src/ptrack.c @@ -201,6 +201,8 @@ get_ptrack_version(PGconn *backup_conn, PGNodeInfo *nodeInfo) nodeInfo->ptrack_version_num = 20; else if (strcmp(ptrack_version_str, "2.1") == 0) nodeInfo->ptrack_version_num = 21; + else if (strcmp(ptrack_version_str, "2.2") == 0) + nodeInfo->ptrack_version_num = 22; else elog(WARNING, "Update your ptrack to the version 2.1 or upper. Current version is %s", ptrack_version_str); From 0e4b3a970a811fc02bd9830317d2a0d9776607e6 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Tue, 18 May 2021 12:12:42 +0300 Subject: [PATCH 144/525] [Issue #385] pg_stop_backup refactoring, part 1 --- src/backup.c | 899 ++++++++++++++++++++++++--------------------- src/pg_probackup.h | 5 + src/utils/pgut.c | 16 +- src/utils/pgut.h | 3 +- tests/archive.py | 11 +- tests/replica.py | 6 +- 6 files changed, 517 insertions(+), 423 deletions(-) diff --git a/src/backup.c b/src/backup.c index a4b88e02f..46e3ba482 100644 --- a/src/backup.c +++ b/src/backup.c @@ -32,13 +32,25 @@ static parray *backup_files_list = NULL; /* We need critical section for datapagemap_add() in case of using threads */ static pthread_mutex_t backup_pagemap_mutex = PTHREAD_MUTEX_INITIALIZER; - +// TODO: move to PGnodeInfo bool exclusive_backup = false; /* Is pg_start_backup() was executed */ static bool backup_in_progress = false; -/* Is pg_stop_backup() was sent */ -static bool pg_stop_backup_is_sent = false; + +struct pg_stop_backup_result { + /* + * We will use values of snapshot_xid and invocation_time if there are + * no transactions between start_lsn and stop_lsn. + */ + TransactionId snapshot_xid; + time_t invocation_time; + XLogRecPtr lsn; + size_t backup_label_content_len; + char *backup_label_content; + size_t tablespace_map_content_len; + char *tablespace_map_content; +}; /* * Backup routines @@ -53,7 +65,11 @@ static void do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, static void pg_start_backup(InstanceState *instanceState, const char *label, bool smooth, pgBackup *backup, PGNodeInfo *nodeInfo, PGconn *conn); static void pg_switch_wal(PGconn *conn); +static void pg_silent_client_messages(PGconn *conn); +static void pg_create_restore_point(PGconn *conn, time_t backup_start_time); + static void pg_stop_backup(InstanceState *instanceState, pgBackup *backup, PGconn *pg_startbackup_conn, PGNodeInfo *nodeInfo); +static void pg_stop_backup_send(PGconn *conn, int server_version, bool is_started_on_replica, bool is_exclusive, char **query_text); static XLogRecPtr wait_wal_lsn(InstanceState *instanceState, XLogRecPtr lsn, bool is_start_lsn, TimeLineID tli, bool in_prev_segment, bool segment_only, @@ -74,18 +90,20 @@ static void check_server_version(PGconn *conn, PGNodeInfo *nodeInfo); static void confirm_block_size(PGconn *conn, const char *name, int blcksz); static void set_cfs_datafiles(parray *files, const char *root, char *relative, size_t i); +static StopBackupCallbackState stop_callback_state; + static void backup_stopbackup_callback(bool fatal, void *userdata) { - InstanceState *instanceState = (InstanceState *) userdata; - PGconn *pg_startbackup_conn = instanceState->conn; + StopBackupCallbackState *st = (StopBackupCallbackState *) userdata; /* * If backup is in progress, notify stop of backup to PostgreSQL */ if (backup_in_progress) { elog(WARNING, "backup in progress, stop backup"); - pg_stop_backup(instanceState, NULL, pg_startbackup_conn, NULL); /* don't care about stop_lsn in case of error */ + /* don't care about stop_lsn in case of error */ + pg_stop_backup_send(st->conn, st->server_version, current.from_replica, exclusive_backup, NULL); } } @@ -1048,7 +1066,6 @@ pg_start_backup(InstanceState *instanceState, const char *label, bool smooth, pg const char *params[2]; uint32 lsn_hi; uint32 lsn_lo; - params[0] = label; elog(INFO, "wait for pg_start_backup()"); @@ -1071,8 +1088,9 @@ pg_start_backup(InstanceState *instanceState, const char *label, bool smooth, pg * is necessary to call pg_stop_backup() in backup_cleanup(). */ backup_in_progress = true; - instanceState->conn = conn; - pgut_atexit_push(backup_stopbackup_callback, instanceState); + stop_callback_state.conn = conn; + stop_callback_state.server_version = nodeInfo->server_version; + pgut_atexit_push(backup_stopbackup_callback, &stop_callback_state); /* Extract timeline and LSN from results of pg_start_backup() */ XLogDataFromLSN(PQgetvalue(res, 0, 0), &lsn_hi, &lsn_lo); @@ -1103,9 +1121,7 @@ pg_switch_wal(PGconn *conn) { PGresult *res; - /* Remove annoying NOTICE messages generated by backend */ - res = pgut_execute(conn, "SET client_min_messages = warning;", 0, NULL); - PQclear(res); + pg_silent_client_messages(conn); #if PG_VERSION_NUM >= 100000 res = pgut_execute(conn, "SELECT pg_catalog.pg_switch_wal()", 0, NULL); @@ -1450,70 +1466,101 @@ wait_wal_lsn(InstanceState *instanceState, XLogRecPtr target_lsn, bool is_start_ } } -/* - * Notify end of backup to PostgreSQL server. - */ +/* Remove annoying NOTICE messages generated by backend */ static void -pg_stop_backup(InstanceState *instanceState, pgBackup *backup, PGconn *pg_startbackup_conn, - PGNodeInfo *nodeInfo) +pg_silent_client_messages(PGconn *conn) { - PGconn *conn; - PGresult *res; - PGresult *tablespace_map_content = NULL; - uint32 lsn_hi; - uint32 lsn_lo; - //XLogRecPtr restore_lsn = InvalidXLogRecPtr; - int pg_stop_backup_timeout = 0; - char path[MAXPGPATH]; - char backup_label[MAXPGPATH]; - FILE *fp; - pgFile *file; - size_t len; - char *val = NULL; - char *stop_backup_query = NULL; - bool stop_lsn_exists = false; - XLogRecPtr stop_backup_lsn_tmp = InvalidXLogRecPtr; - - /* - * We will use this values if there are no transactions between start_lsn - * and stop_lsn. - */ - time_t recovery_time; - TransactionId recovery_xid; - - if (!backup_in_progress) - elog(ERROR, "backup is not in progress"); - - conn = pg_startbackup_conn; - - /* Remove annoying NOTICE messages generated by backend */ + PGresult *res; res = pgut_execute(conn, "SET client_min_messages = warning;", 0, NULL); PQclear(res); +} - /* Make proper timestamp format for parse_time() */ - res = pgut_execute(conn, "SET datestyle = 'ISO, DMY';", 0, NULL); - PQclear(res); +static void +pg_create_restore_point(PGconn *conn, time_t backup_start_time) +{ + PGresult *res; + const char *params[1]; + char name[1024]; - /* Create restore point - * Only if backup is from master. - * For PG 9.5 create restore point only if pguser is superuser. - */ - if (backup != NULL && !backup->from_replica && - !(nodeInfo->server_version < 90600 && - !nodeInfo->is_superuser)) - { - const char *params[1]; - char name[1024]; + snprintf(name, lengthof(name), "pg_probackup, backup_id %s", + base36enc(backup_start_time)); + params[0] = name; - snprintf(name, lengthof(name), "pg_probackup, backup_id %s", - base36enc(backup->start_time)); - params[0] = name; + res = pgut_execute(conn, "SELECT pg_catalog.pg_create_restore_point($1)", + 1, params); + PQclear(res); +} - res = pgut_execute(conn, "SELECT pg_catalog.pg_create_restore_point($1)", - 1, params); - PQclear(res); - } +void +pg_stop_backup_send(PGconn *conn, int server_version, bool is_started_on_replica, bool is_exclusive, char **query_text) +{ + static const char + stop_exlusive_backup_query[] = + /* + * Stop the non-exclusive backup. Besides stop_lsn it returns from + * pg_stop_backup(false) copy of the backup label and tablespace map + * so they can be written to disk by the caller. + * TODO, question: add NULLs as backup_label and tablespace_map? + */ + "SELECT" + " pg_catalog.txid_snapshot_xmax(pg_catalog.txid_current_snapshot())," + " current_timestamp(0)::timestamptz," + " pg_catalog.pg_stop_backup() as lsn", + stop_backup_on_master_query[] = + "SELECT" + " pg_catalog.txid_snapshot_xmax(pg_catalog.txid_current_snapshot())," + " current_timestamp(0)::timestamptz," + " lsn," + " labelfile," + " spcmapfile" + " FROM pg_catalog.pg_stop_backup(false, false)", + stop_backup_on_master_before10_query[] = + "SELECT" + " pg_catalog.txid_snapshot_xmax(pg_catalog.txid_current_snapshot())," + " current_timestamp(0)::timestamptz," + " lsn," + " labelfile," + " spcmapfile" + " FROM pg_catalog.pg_stop_backup(false)", + /* + * In case of backup from replica >= 9.6 we do not trust minRecPoint + * and stop_backup LSN, so we use latest replayed LSN as STOP LSN. + */ + stop_backup_on_replica_query[] = + "SELECT" + " pg_catalog.txid_snapshot_xmax(pg_catalog.txid_current_snapshot())," + " current_timestamp(0)::timestamptz," + " pg_catalog.pg_last_wal_replay_lsn()," + " labelfile," + " spcmapfile" + " FROM pg_catalog.pg_stop_backup(false, false)", + stop_backup_on_replica_before10_query[] = + "SELECT" + " pg_catalog.txid_snapshot_xmax(pg_catalog.txid_current_snapshot())," + " current_timestamp(0)::timestamptz," + " pg_catalog.pg_last_xlog_replay_location()," + " labelfile," + " spcmapfile" + " FROM pg_catalog.pg_stop_backup(false)"; + + const char * const stop_backup_query = + is_exclusive ? + stop_exlusive_backup_query : + server_version >= 100000 ? + (is_started_on_replica ? + stop_backup_on_replica_query : + stop_backup_on_master_query + ) : + (is_started_on_replica ? + stop_backup_on_replica_before10_query : + stop_backup_on_master_before10_query + ); + bool sent = false; + + /* Make proper timestamp format for parse_time(recovery_time) */ + pgut_execute(conn, "SET datestyle = 'ISO, DMY';", 0, NULL); + // TODO: check result /* * send pg_stop_backup asynchronously because we could came @@ -1521,415 +1568,437 @@ pg_stop_backup(InstanceState *instanceState, pgBackup *backup, PGconn *pg_startb * postgres archive_command problem and in this case we will * wait for pg_stop_backup() forever. */ - - if (!pg_stop_backup_is_sent) - { - bool sent = false; - - if (!exclusive_backup) - { - /* - * Stop the non-exclusive backup. Besides stop_lsn it returns from - * pg_stop_backup(false) copy of the backup label and tablespace map - * so they can be written to disk by the caller. - * In case of backup from replica >= 9.6 we do not trust minRecPoint - * and stop_backup LSN, so we use latest replayed LSN as STOP LSN. - */ - - /* current is used here because of cleanup */ - if (current.from_replica) - stop_backup_query = "SELECT" - " pg_catalog.txid_snapshot_xmax(pg_catalog.txid_current_snapshot())," - " current_timestamp(0)::timestamptz," -#if PG_VERSION_NUM >= 100000 - " pg_catalog.pg_last_wal_replay_lsn()," -#else - " pg_catalog.pg_last_xlog_replay_location()," -#endif - " labelfile," - " spcmapfile" -#if PG_VERSION_NUM >= 100000 - " FROM pg_catalog.pg_stop_backup(false, false)"; -#else - " FROM pg_catalog.pg_stop_backup(false)"; -#endif - else - stop_backup_query = "SELECT" - " pg_catalog.txid_snapshot_xmax(pg_catalog.txid_current_snapshot())," - " current_timestamp(0)::timestamptz," - " lsn," - " labelfile," - " spcmapfile" -#if PG_VERSION_NUM >= 100000 - " FROM pg_catalog.pg_stop_backup(false, false)"; -#else - " FROM pg_catalog.pg_stop_backup(false)"; -#endif - - } - else - { - stop_backup_query = "SELECT" - " pg_catalog.txid_snapshot_xmax(pg_catalog.txid_current_snapshot())," - " current_timestamp(0)::timestamptz," - " pg_catalog.pg_stop_backup() as lsn"; - } - - sent = pgut_send(conn, stop_backup_query, 0, NULL, WARNING); - pg_stop_backup_is_sent = true; - if (!sent) - elog(ERROR, "Failed to send pg_stop_backup query"); - } + sent = pgut_send(conn, stop_backup_query, 0, NULL, WARNING); + if (!sent) + elog(ERROR, "Failed to send pg_stop_backup query"); /* After we have sent pg_stop_backup, we don't need this callback anymore */ - instanceState->conn = pg_startbackup_conn; - pgut_atexit_pop(backup_stopbackup_callback, instanceState); + pgut_atexit_pop(backup_stopbackup_callback, &stop_callback_state); - /* - * Wait for the result of pg_stop_backup(), but no longer than - * archive_timeout seconds - */ - if (pg_stop_backup_is_sent && !in_cleanup) - { - int timeout = ARCHIVE_TIMEOUT_DEFAULT; - res = NULL; + if (query_text) + *query_text = pgut_strdup(stop_backup_query); +} - /* kludge against some old bug in archive_timeout. TODO: remove in 3.0.0 */ - if (instance_config.archive_timeout > 0) - timeout = instance_config.archive_timeout; +/* + * pg_stop_backup_consume -- get 'pg_stop_backup' query results + * side effects: + * - allocates memory for tablespace_map and backup_label contents, so it must freed by caller (if its not null) + * parameters: + * - + */ +static void +pg_stop_backup_consume(PGconn *conn, int server_version, + bool is_exclusive, uint32 timeout, const char *query_text, + struct pg_stop_backup_result *result) +{ + PGresult *query_result; + uint32 pg_stop_backup_timeout = 0; + enum stop_backup_query_result_column_numbers { + recovery_xid_colno = 0, + recovery_time_colno, + lsn_colno, + backup_label_colno, + tablespace_map_colno + }; + + /* and now wait */ + while (1) + { + if (!PQconsumeInput(conn)) + elog(ERROR, "pg_stop backup() failed: %s", + PQerrorMessage(conn)); - while (1) + if (PQisBusy(conn)) { - if (!PQconsumeInput(conn)) - elog(ERROR, "pg_stop backup() failed: %s", - PQerrorMessage(conn)); + pg_stop_backup_timeout++; + sleep(1); - if (PQisBusy(conn)) + if (interrupted) { - pg_stop_backup_timeout++; - sleep(1); - - if (interrupted) - { - pgut_cancel(conn); - elog(ERROR, "interrupted during waiting for pg_stop_backup"); - } + pgut_cancel(conn); + elog(ERROR, "interrupted during waiting for pg_stop_backup"); + } - if (pg_stop_backup_timeout == 1) - elog(INFO, "wait for pg_stop_backup()"); + if (pg_stop_backup_timeout == 1) + elog(INFO, "wait for pg_stop_backup()"); - /* - * If postgres haven't answered in archive_timeout seconds, - * send an interrupt. - */ - if (pg_stop_backup_timeout > timeout) - { - pgut_cancel(conn); - elog(ERROR, "pg_stop_backup doesn't answer in %d seconds, cancel it", timeout); - } - } - else + /* + * If postgres haven't answered in archive_timeout seconds, + * send an interrupt. + */ + if (pg_stop_backup_timeout > timeout) { - res = PQgetResult(conn); - break; + pgut_cancel(conn); + elog(ERROR, "pg_stop_backup doesn't answer in %d seconds, cancel it", timeout); } } - - /* Check successfull execution of pg_stop_backup() */ - if (!res) - elog(ERROR, "pg_stop backup() failed"); else { - switch (PQresultStatus(res)) - { - /* - * We should expect only PGRES_TUPLES_OK since pg_stop_backup - * returns tuples. - */ - case PGRES_TUPLES_OK: - break; - default: - elog(ERROR, "query failed: %s query was: %s", - PQerrorMessage(conn), stop_backup_query); - } - elog(INFO, "pg_stop backup() successfully executed"); + query_result = PQgetResult(conn); + break; } + } + /* Check successfull execution of pg_stop_backup() */ + if (!query_result) + elog(ERROR, "pg_stop_backup() failed"); + else + { + switch (PQresultStatus(query_result)) + { + /* + * We should expect only PGRES_TUPLES_OK since pg_stop_backup + * returns tuples. + */ + case PGRES_TUPLES_OK: + break; + default: + elog(ERROR, "query failed: %s query was: %s", + PQerrorMessage(conn), query_text); + } backup_in_progress = false; + elog(INFO, "pg_stop backup() successfully executed"); + } + + /* get results and fill result structure */ + /* get&check recovery_xid */ + if (sscanf(PQgetvalue(query_result, 0, recovery_xid_colno), XID_FMT, &result->snapshot_xid) != 1) + elog(ERROR, + "result of txid_snapshot_xmax() is invalid: %s", + PQgetvalue(query_result, 0, recovery_xid_colno)); + + /* get&check recovery_time */ + if (!parse_time(PQgetvalue(query_result, 0, recovery_time_colno), &result->invocation_time, true)) + elog(ERROR, + "result of current_timestamp is invalid: %s", + PQgetvalue(query_result, 0, recovery_time_colno)); + + /* get stop_backup_lsn */ + { + uint32 lsn_hi; + uint32 lsn_lo; // char *target_lsn = "2/F578A000"; // XLogDataFromLSN(target_lsn, &lsn_hi, &lsn_lo); /* Extract timeline and LSN from results of pg_stop_backup() */ - XLogDataFromLSN(PQgetvalue(res, 0, 2), &lsn_hi, &lsn_lo); + XLogDataFromLSN(PQgetvalue(query_result, 0, lsn_colno), &lsn_hi, &lsn_lo); /* Calculate LSN */ - stop_backup_lsn_tmp = ((uint64) lsn_hi) << 32 | lsn_lo; + result->lsn = ((uint64) lsn_hi) << 32 | lsn_lo; + } - /* It is ok for replica to return invalid STOP LSN - * UPD: Apparently it is ok even for a master. - */ - if (!XRecOffIsValid(stop_backup_lsn_tmp)) - { - char *xlog_path, - stream_xlog_path[MAXPGPATH]; - XLogSegNo segno = 0; - XLogRecPtr lsn_tmp = InvalidXLogRecPtr; + /* get backup_label_content */ + result->backup_label_content = NULL; + // if (!PQgetisnull(query_result, 0, backup_label_colno)) + if (!is_exclusive) + { + result->backup_label_content_len = PQgetlength(query_result, 0, backup_label_colno); + if (result->backup_label_content_len > 0) + result->backup_label_content = pgut_strndup(PQgetvalue(query_result, 0, backup_label_colno), + result->backup_label_content_len); + } else { + result->backup_label_content_len = 0; + } - /* - * Even though the value is invalid, it's expected postgres behaviour - * and we're trying to fix it below. - */ - elog(LOG, "Invalid offset in stop_lsn value %X/%X, trying to fix", - (uint32) (stop_backup_lsn_tmp >> 32), (uint32) (stop_backup_lsn_tmp)); + /* get tablespace_map_content */ + result->tablespace_map_content = NULL; + // if (!PQgetisnull(query_result, 0, tablespace_map_colno)) + if (!is_exclusive) + { + result->tablespace_map_content_len = PQgetlength(query_result, 0, tablespace_map_colno); + if (result->tablespace_map_content_len > 0) + result->tablespace_map_content = pgut_strndup(PQgetvalue(query_result, 0, tablespace_map_colno), + result->tablespace_map_content_len); + } else { + result->tablespace_map_content_len = 0; + } +} - /* - * Note: even with gdb it is very hard to produce automated tests for - * contrecord + invalid LSN, so emulate it for manual testing. - */ - //stop_backup_lsn_tmp = stop_backup_lsn_tmp - XLOG_SEG_SIZE; - //elog(WARNING, "New Invalid stop_backup_lsn value %X/%X", - // (uint32) (stop_backup_lsn_tmp >> 32), (uint32) (stop_backup_lsn_tmp)); +/* + * helper routine used to write backup_label and tablespace_map in pg_stop_backup() + */ +static void +pg_stop_backup_write_file_helper(const char *path, const char *filename, const char *error_msg_filename, + const void *data, size_t len, parray *file_list) +{ + FILE *fp; + pgFile *file; + char full_filename[MAXPGPATH]; + + join_path_components(full_filename, path, filename); + fp = fio_fopen(full_filename, PG_BINARY_W, FIO_BACKUP_HOST); + if (fp == NULL) + elog(ERROR, "can't open %s file \"%s\": %s", + error_msg_filename, full_filename, strerror(errno)); + + if (fio_fwrite(fp, data, len) != len || + fio_fflush(fp) != 0 || + fio_fclose(fp)) + elog(ERROR, "can't write %s file \"%s\": %s", + error_msg_filename, full_filename, strerror(errno)); - if (stream_wal) - { - snprintf(stream_xlog_path, lengthof(stream_xlog_path), - "%s/%s/%s/%s", instanceState->instance_backup_subdir_path, - base36enc(backup->start_time), - DATABASE_DIR, PG_XLOG_DIR); - xlog_path = stream_xlog_path; - } - else - xlog_path = instanceState->instance_wal_subdir_path; + /* + * It's vital to check if backup_files_list is initialized, + * because we could get here because the backup was interrupted + */ + if (file_list) + { + file = pgFileNew(full_filename, filename, true, 0, + FIO_BACKUP_HOST); - GetXLogSegNo(stop_backup_lsn_tmp, segno, instance_config.xlog_seg_size); + if (S_ISREG(file->mode)) + { + file->crc = pgFileGetCRC(full_filename, true, false); - /* - * Note, that there is no guarantee that corresponding WAL file even exists. - * Replica may return LSN from future and keep staying in present. - * Or it can return invalid LSN. - * - * That's bad, since we want to get real LSN to save it in backup label file - * and to use it in WAL validation. - * - * So we try to do the following: - * 1. Wait 'archive_timeout' seconds for segment containing stop_lsn and - * look for the first valid record in it. - * It solves the problem of occasional invalid LSN on write-busy system. - * 2. Failing that, look for record in previous segment with endpoint - * equal or greater than stop_lsn. It may(!) solve the problem of invalid LSN - * on write-idle system. If that fails too, error out. - */ + file->write_size = file->size; + file->uncompressed_size = file->size; + } + parray_append(file_list, file); + } +} - /* stop_lsn is pointing to a 0 byte of xlog segment */ - if (stop_backup_lsn_tmp % instance_config.xlog_seg_size == 0) - { - /* Wait for segment with current stop_lsn, it is ok for it to never arrive */ - wait_wal_lsn(instanceState, stop_backup_lsn_tmp, false, backup->tli, - false, true, WARNING, stream_wal, backup); - - /* Get the first record in segment with current stop_lsn */ - lsn_tmp = get_first_record_lsn(xlog_path, segno, backup->tli, - instance_config.xlog_seg_size, - instance_config.archive_timeout); - - /* Check that returned LSN is valid and greater than stop_lsn */ - if (XLogRecPtrIsInvalid(lsn_tmp) || - !XRecOffIsValid(lsn_tmp) || - lsn_tmp < stop_backup_lsn_tmp) - { - /* Backup from master should error out here */ - if (!backup->from_replica) - elog(ERROR, "Failed to get next WAL record after %X/%X", - (uint32) (stop_backup_lsn_tmp >> 32), - (uint32) (stop_backup_lsn_tmp)); - - /* No luck, falling back to looking up for previous record */ - elog(WARNING, "Failed to get next WAL record after %X/%X, " - "looking for previous WAL record", - (uint32) (stop_backup_lsn_tmp >> 32), - (uint32) (stop_backup_lsn_tmp)); - - /* Despite looking for previous record there is not guarantee of success - * because previous record can be the contrecord. - */ - lsn_tmp = wait_wal_lsn(instanceState, stop_backup_lsn_tmp, false, backup->tli, - true, false, ERROR, stream_wal, backup); - - /* sanity */ - if (!XRecOffIsValid(lsn_tmp) || XLogRecPtrIsInvalid(lsn_tmp)) - elog(ERROR, "Failed to get WAL record prior to %X/%X", - (uint32) (stop_backup_lsn_tmp >> 32), - (uint32) (stop_backup_lsn_tmp)); - } - } - /* stop lsn is aligned to xlog block size, just find next lsn */ - else if (stop_backup_lsn_tmp % XLOG_BLCKSZ == 0) - { - /* Wait for segment with current stop_lsn */ - wait_wal_lsn(instanceState, stop_backup_lsn_tmp, false, backup->tli, - false, true, ERROR, stream_wal, backup); +/* + * Notify end of backup to PostgreSQL server. + */ +static void +pg_stop_backup(InstanceState *instanceState, pgBackup *backup, PGconn *pg_startbackup_conn, + PGNodeInfo *nodeInfo) +{ + PGconn *conn; + bool stop_lsn_exists = false; + struct pg_stop_backup_result stop_backup_result; + char *xlog_path,stream_xlog_path[MAXPGPATH]; + /* kludge against some old bug in archive_timeout. TODO: remove in 3.0.0 */ + int timeout = (instance_config.archive_timeout > 0) ? + instance_config.archive_timeout : ARCHIVE_TIMEOUT_DEFAULT; + char *query_text = NULL; + + /* Remove it ? */ + if (!backup_in_progress) + elog(ERROR, "backup is not in progress"); - /* Get the next closest record in segment with current stop_lsn */ - lsn_tmp = get_next_record_lsn(xlog_path, segno, backup->tli, - instance_config.xlog_seg_size, - instance_config.archive_timeout, - stop_backup_lsn_tmp); + conn = pg_startbackup_conn; - /* sanity */ - if (!XRecOffIsValid(lsn_tmp) || XLogRecPtrIsInvalid(lsn_tmp)) - elog(ERROR, "Failed to get WAL record next to %X/%X", - (uint32) (stop_backup_lsn_tmp >> 32), - (uint32) (stop_backup_lsn_tmp)); - } - /* PostgreSQL returned something very illegal as STOP_LSN, error out */ - else - elog(ERROR, "Invalid stop_backup_lsn value %X/%X", - (uint32) (stop_backup_lsn_tmp >> 32), (uint32) (stop_backup_lsn_tmp)); + pg_silent_client_messages(conn); - /* Setting stop_backup_lsn will set stop point for streaming */ - stop_backup_lsn = lsn_tmp; - stop_lsn_exists = true; - } + /* Create restore point + * Only if backup is from master. + * For PG 9.5 create restore point only if pguser is superuser. + */ + if (!backup->from_replica && + !(nodeInfo->server_version < 90600 && + !nodeInfo->is_superuser)) //TODO: check correctness + pg_create_restore_point(conn, backup->start_time); - elog(LOG, "stop_lsn: %X/%X", - (uint32) (stop_backup_lsn_tmp >> 32), (uint32) (stop_backup_lsn_tmp)); + /* Execute pg_stop_backup using PostgreSQL connection */ + pg_stop_backup_send(conn, nodeInfo->server_version, current.from_replica, exclusive_backup, &query_text); - /* Write backup_label and tablespace_map */ - if (!exclusive_backup) - { - Assert(PQnfields(res) >= 4); - snprintf(path, lengthof(path), "%s/%s/%s", instanceState->instance_backup_subdir_path, - base36enc(backup->start_time), DATABASE_DIR); - - /* Write backup_label */ - join_path_components(backup_label, path, PG_BACKUP_LABEL_FILE); - fp = fio_fopen(backup_label, PG_BINARY_W, FIO_BACKUP_HOST); - if (fp == NULL) - elog(ERROR, "can't open backup label file \"%s\": %s", - backup_label, strerror(errno)); - - len = strlen(PQgetvalue(res, 0, 3)); - if (fio_fwrite(fp, PQgetvalue(res, 0, 3), len) != len || - fio_fflush(fp) != 0 || - fio_fclose(fp)) - elog(ERROR, "can't write backup label file \"%s\": %s", - backup_label, strerror(errno)); + /* + * Wait for the result of pg_stop_backup(), but no longer than + * archive_timeout seconds + */ + pg_stop_backup_consume(conn, nodeInfo->server_version, exclusive_backup, timeout, query_text, &stop_backup_result); - /* - * It's vital to check if backup_files_list is initialized, - * because we could get here because the backup was interrupted - */ - if (backup_files_list) - { - file = pgFileNew(backup_label, PG_BACKUP_LABEL_FILE, true, 0, - FIO_BACKUP_HOST); + /* It is ok for replica to return invalid STOP LSN + * UPD: Apparently it is ok even for a master. + */ + if (!XRecOffIsValid(stop_backup_result.lsn)) + { + char *xlog_path, + stream_xlog_path[MAXPGPATH]; + XLogSegNo segno = 0; + XLogRecPtr lsn_tmp = InvalidXLogRecPtr; - file->crc = pgFileGetCRC(backup_label, true, false); + /* + * Even though the value is invalid, it's expected postgres behaviour + * and we're trying to fix it below. + */ + elog(LOG, "Invalid offset in stop_lsn value %X/%X, trying to fix", + (uint32) (stop_backup_result.lsn >> 32), (uint32) (stop_backup_result.lsn)); - file->write_size = file->size; - file->uncompressed_size = file->size; - parray_append(backup_files_list, file); - } + /* + * Note: even with gdb it is very hard to produce automated tests for + * contrecord + invalid LSN, so emulate it for manual testing. + */ + //stop_backup_result.lsn = stop_backup_result.lsn - XLOG_SEG_SIZE; + //elog(WARNING, "New Invalid stop_backup_lsn value %X/%X", + // (uint32) (stop_backup_result.lsn >> 32), (uint32) (stop_backup_result.lsn)); + + if (stream_wal) + { + snprintf(stream_xlog_path, lengthof(stream_xlog_path), + "%s/%s/%s/%s", instanceState->instance_backup_subdir_path, + base36enc(backup->start_time), + DATABASE_DIR, PG_XLOG_DIR); + xlog_path = stream_xlog_path; } + else + xlog_path = instanceState->instance_wal_subdir_path; + + GetXLogSegNo(stop_backup_result.lsn, segno, instance_config.xlog_seg_size); - if (sscanf(PQgetvalue(res, 0, 0), XID_FMT, &recovery_xid) != 1) - elog(ERROR, - "result of txid_snapshot_xmax() is invalid: %s", - PQgetvalue(res, 0, 0)); - if (!parse_time(PQgetvalue(res, 0, 1), &recovery_time, true)) - elog(ERROR, - "result of current_timestamp is invalid: %s", - PQgetvalue(res, 0, 1)); - - /* Get content for tablespace_map from stop_backup results - * in case of non-exclusive backup + /* + * Note, that there is no guarantee that corresponding WAL file even exists. + * Replica may return LSN from future and keep staying in present. + * Or it can return invalid LSN. + * + * That's bad, since we want to get real LSN to save it in backup label file + * and to use it in WAL validation. + * + * So we try to do the following: + * 1. Wait 'archive_timeout' seconds for segment containing stop_lsn and + * look for the first valid record in it. + * It solves the problem of occasional invalid LSN on write-busy system. + * 2. Failing that, look for record in previous segment with endpoint + * equal or greater than stop_lsn. It may(!) solve the problem of invalid LSN + * on write-idle system. If that fails too, error out. */ - if (!exclusive_backup) - val = PQgetvalue(res, 0, 4); - /* Write tablespace_map */ - if (!exclusive_backup && val && strlen(val) > 0) + /* stop_lsn is pointing to a 0 byte of xlog segment */ + if (stop_backup_result.lsn % instance_config.xlog_seg_size == 0) { - char tablespace_map[MAXPGPATH]; - - join_path_components(tablespace_map, path, PG_TABLESPACE_MAP_FILE); - fp = fio_fopen(tablespace_map, PG_BINARY_W, FIO_BACKUP_HOST); - if (fp == NULL) - elog(ERROR, "can't open tablespace map file \"%s\": %s", - tablespace_map, strerror(errno)); - - len = strlen(val); - if (fio_fwrite(fp, val, len) != len || - fio_fflush(fp) != 0 || - fio_fclose(fp)) - elog(ERROR, "can't write tablespace map file \"%s\": %s", - tablespace_map, strerror(errno)); - - if (backup_files_list) + /* Wait for segment with current stop_lsn, it is ok for it to never arrive */ + wait_wal_lsn(instanceState, stop_backup_result.lsn, false, backup->tli, + false, true, WARNING, stream_wal, backup); + + /* Get the first record in segment with current stop_lsn */ + lsn_tmp = get_first_record_lsn(xlog_path, segno, backup->tli, + instance_config.xlog_seg_size, + instance_config.archive_timeout); + + /* Check that returned LSN is valid and greater than stop_lsn */ + if (XLogRecPtrIsInvalid(lsn_tmp) || + !XRecOffIsValid(lsn_tmp) || + lsn_tmp < stop_backup_result.lsn) { - file = pgFileNew(tablespace_map, PG_TABLESPACE_MAP_FILE, true, 0, - FIO_BACKUP_HOST); - if (S_ISREG(file->mode)) - { - file->crc = pgFileGetCRC(tablespace_map, true, false); - file->write_size = file->size; - } + /* Backup from master should error out here */ + if (!backup->from_replica) + elog(ERROR, "Failed to get next WAL record after %X/%X", + (uint32) (stop_backup_result.lsn >> 32), + (uint32) (stop_backup_result.lsn)); + + /* No luck, falling back to looking up for previous record */ + elog(WARNING, "Failed to get next WAL record after %X/%X, " + "looking for previous WAL record", + (uint32) (stop_backup_result.lsn >> 32), + (uint32) (stop_backup_result.lsn)); + + /* Despite looking for previous record there is not guarantee of success + * because previous record can be the contrecord. + */ + lsn_tmp = wait_wal_lsn(instanceState, stop_backup_result.lsn, false, backup->tli, + true, false, ERROR, stream_wal, backup); - parray_append(backup_files_list, file); + /* sanity */ + if (!XRecOffIsValid(lsn_tmp) || XLogRecPtrIsInvalid(lsn_tmp)) + elog(ERROR, "Failed to get WAL record prior to %X/%X", + (uint32) (stop_backup_result.lsn >> 32), + (uint32) (stop_backup_result.lsn)); } } + /* stop lsn is aligned to xlog block size, just find next lsn */ + else if (stop_backup_result.lsn % XLOG_BLCKSZ == 0) + { + /* Wait for segment with current stop_lsn */ + wait_wal_lsn(instanceState, stop_backup_result.lsn, false, backup->tli, + false, true, ERROR, stream_wal, backup); + + /* Get the next closest record in segment with current stop_lsn */ + lsn_tmp = get_next_record_lsn(xlog_path, segno, backup->tli, + instance_config.xlog_seg_size, + instance_config.archive_timeout, + stop_backup_result.lsn); + + /* sanity */ + if (!XRecOffIsValid(lsn_tmp) || XLogRecPtrIsInvalid(lsn_tmp)) + elog(ERROR, "Failed to get WAL record next to %X/%X", + (uint32) (stop_backup_result.lsn >> 32), + (uint32) (stop_backup_result.lsn)); + } + /* PostgreSQL returned something very illegal as STOP_LSN, error out */ + else + elog(ERROR, "Invalid stop_backup_lsn value %X/%X", + (uint32) (stop_backup_result.lsn >> 32), (uint32) (stop_backup_result.lsn)); - if (tablespace_map_content) - PQclear(tablespace_map_content); - PQclear(res); + /* Setting stop_backup_lsn will set stop point for streaming */ + stop_backup_lsn = lsn_tmp; + stop_lsn_exists = true; } - /* Fill in fields if that is the correct end of backup. */ - if (backup != NULL) + elog(LOG, "stop_lsn: %X/%X", + (uint32) (stop_backup_result.lsn >> 32), (uint32) (stop_backup_result.lsn)); + + /* Write backup_label and tablespace_map */ + if (!exclusive_backup) { - char *xlog_path, - stream_xlog_path[MAXPGPATH]; + char path[MAXPGPATH]; - /* - * Wait for stop_lsn to be archived or streamed. - * If replica returned valid STOP_LSN of not actually existing record, - * look for previous record with endpoint >= STOP_LSN. - */ - if (!stop_lsn_exists) - stop_backup_lsn = wait_wal_lsn(instanceState, stop_backup_lsn_tmp, false, backup->tli, - false, false, ERROR, stream_wal, backup); + Assert(stop_backup_result.backup_label_content != NULL); + snprintf(path, lengthof(path), "%s/%s/%s", instanceState->instance_backup_subdir_path, + base36enc(backup->start_time), DATABASE_DIR); - if (stream_wal) + /* Write backup_label */ + pg_stop_backup_write_file_helper(path, PG_BACKUP_LABEL_FILE, "backup label", + stop_backup_result.backup_label_content, stop_backup_result.backup_label_content_len, + backup_files_list); + free(stop_backup_result.backup_label_content); + stop_backup_result.backup_label_content = NULL; + stop_backup_result.backup_label_content_len = 0; + + /* Write tablespace_map */ + if (stop_backup_result.tablespace_map_content != NULL) { - /* This function will also add list of xlog files - * to the passed filelist */ - if(wait_WAL_streaming_end(backup_files_list)) - elog(ERROR, "WAL streaming failed"); + pg_stop_backup_write_file_helper(path, PG_TABLESPACE_MAP_FILE, "tablespace map", + stop_backup_result.tablespace_map_content, stop_backup_result.tablespace_map_content_len, + backup_files_list); + free(stop_backup_result.tablespace_map_content); + stop_backup_result.tablespace_map_content = NULL; + stop_backup_result.tablespace_map_content_len = 0; + } + } + + /* + * Wait for stop_lsn to be archived or streamed. + * If replica returned valid STOP_LSN of not actually existing record, + * look for previous record with endpoint >= STOP_LSN. + */ + if (!stop_lsn_exists) + stop_backup_lsn = wait_wal_lsn(instanceState, stop_backup_result.lsn, false, backup->tli, + false, false, ERROR, stream_wal, backup); - snprintf(stream_xlog_path, lengthof(stream_xlog_path), "%s/%s/%s/%s", - instanceState->instance_backup_subdir_path, base36enc(backup->start_time), - DATABASE_DIR, PG_XLOG_DIR); + if (stream_wal) + { + /* This function will also add list of xlog files + * to the passed filelist */ + if(wait_WAL_streaming_end(backup_files_list)) + elog(ERROR, "WAL streaming failed"); - xlog_path = stream_xlog_path; - } - else - xlog_path = instanceState->instance_wal_subdir_path; + snprintf(stream_xlog_path, lengthof(stream_xlog_path), "%s/%s/%s/%s", + instanceState->instance_backup_subdir_path, base36enc(backup->start_time), + DATABASE_DIR, PG_XLOG_DIR); - backup->stop_lsn = stop_backup_lsn; - backup->recovery_xid = recovery_xid; + xlog_path = stream_xlog_path; + } + else + xlog_path = instanceState->instance_wal_subdir_path; - elog(LOG, "Getting the Recovery Time from WAL"); + backup->stop_lsn = stop_backup_lsn; + backup->recovery_xid = stop_backup_result.snapshot_xid; - /* iterate over WAL from stop_backup lsn to start_backup lsn */ - if (!read_recovery_info(xlog_path, backup->tli, - instance_config.xlog_seg_size, - backup->start_lsn, backup->stop_lsn, - &backup->recovery_time)) - { - elog(LOG, "Failed to find Recovery Time in WAL, forced to trust current_timestamp"); - backup->recovery_time = recovery_time; - } + elog(LOG, "Getting the Recovery Time from WAL"); + + /* iterate over WAL from stop_backup lsn to start_backup lsn */ + if (!read_recovery_info(xlog_path, backup->tli, + instance_config.xlog_seg_size, + backup->start_lsn, backup->stop_lsn, + &backup->recovery_time)) + { + elog(LOG, "Failed to find Recovery Time in WAL, forced to trust current_timestamp"); + backup->recovery_time = stop_backup_result.invocation_time; } + + /* Cleanup */ + pg_free(query_text); } /* diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 4a97cfd3e..d02bbb033 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -679,6 +679,11 @@ typedef struct BackupPageHeader2 uint16 checksum; } BackupPageHeader2; +typedef struct StopBackupCallbackState { + PGconn *conn; + int server_version; +} StopBackupCallbackState; + /* Special value for compressed_size field */ #define PageIsOk 0 #define SkipCurrentPage -1 diff --git a/src/utils/pgut.c b/src/utils/pgut.c index a1631b106..72f8a2705 100644 --- a/src/utils/pgut.c +++ b/src/utils/pgut.c @@ -3,7 +3,7 @@ * pgut.c * * Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION - * Portions Copyright (c) 2017-2019, Postgres Professional + * Portions Copyright (c) 2017-2021, Postgres Professional * *------------------------------------------------------------------------- */ @@ -902,6 +902,20 @@ pgut_strdup(const char *str) return ret; } +char * +pgut_strndup(const char *str, size_t n) +{ + char *ret; + + if (str == NULL) + return NULL; + + if ((ret = strndup(str, n)) == NULL) + elog(ERROR, "could not duplicate string \"%s\": %s", + str, strerror(errno)); + return ret; +} + FILE * pgut_fopen(const char *path, const char *mode, bool missing_ok) { diff --git a/src/utils/pgut.h b/src/utils/pgut.h index e6ccbf211..6b9e7d740 100644 --- a/src/utils/pgut.h +++ b/src/utils/pgut.h @@ -3,7 +3,7 @@ * pgut.h * * Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION - * Portions Copyright (c) 2017-2019, Postgres Professional + * Portions Copyright (c) 2017-2021, Postgres Professional * *------------------------------------------------------------------------- */ @@ -61,6 +61,7 @@ extern int pgut_wait(int num, PGconn *connections[], struct timeval *timeout); extern void *pgut_malloc(size_t size); extern void *pgut_realloc(void *p, size_t size); extern char *pgut_strdup(const char *str); +extern char *pgut_strndup(const char *str, size_t n); #define pgut_new(type) ((type *) pgut_malloc(sizeof(type))) #define pgut_newarray(type, n) ((type *) pgut_malloc(sizeof(type) * (n))) diff --git a/tests/archive.py b/tests/archive.py index 329c5d676..0eabe5b0c 100644 --- a/tests/archive.py +++ b/tests/archive.py @@ -733,7 +733,7 @@ def test_replica_archive(self): # to original data master.psql( "postgres", - "insert into t_heap as select i as id, md5(i::text) as text, " + "insert into t_heap select i as id, md5(i::text) as text, " "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(256,512) i") before = master.safe_psql("postgres", "SELECT * FROM t_heap") @@ -768,7 +768,7 @@ def test_replica_archive(self): # to original data master.psql( "postgres", - "insert into t_heap as select i as id, md5(i::text) as text, " + "insert into t_heap select i as id, md5(i::text) as text, " "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(512,80680) i") @@ -911,6 +911,11 @@ def test_basic_master_and_replica_concurrent_archiving(self): 'autovacuum': 'off', 'archive_timeout': '10s'}) + if self.get_version(master) < self.version_to_num('9.6.0'): + self.del_test_dir(module_name, fname) + return unittest.skip( + 'Skipped because backup from replica is not supported in PG 9.5') + replica = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'replica')) replica.cleanup() @@ -956,7 +961,7 @@ def test_basic_master_and_replica_concurrent_archiving(self): master.psql( "postgres", - "insert into t_heap as select i as id, md5(i::text) as text, " + "insert into t_heap select i as id, md5(i::text) as text, " "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(0,10000) i") diff --git a/tests/replica.py b/tests/replica.py index ce90ef96e..bab5b563b 100644 --- a/tests/replica.py +++ b/tests/replica.py @@ -149,7 +149,7 @@ def test_replica_stream_ptrack_backup(self): # to original data master.psql( "postgres", - "insert into t_heap as select i as id, md5(i::text) as text, " + "insert into t_heap select i as id, md5(i::text) as text, " "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(256,512) i") before = master.safe_psql("postgres", "SELECT * FROM t_heap") @@ -185,7 +185,7 @@ def test_replica_stream_ptrack_backup(self): # to original data master.psql( "postgres", - "insert into t_heap as select i as id, md5(i::text) as text, " + "insert into t_heap select i as id, md5(i::text) as text, " "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(512,768) i") @@ -279,7 +279,7 @@ def test_replica_archive_page_backup(self): # equal to original data master.psql( "postgres", - "insert into t_heap as select i as id, md5(i::text) as text, " + "insert into t_heap select i as id, md5(i::text) as text, " "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(256,25120) i") From d8050e5ce4e90fbf648877ce0f567479dcb22f5e Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Tue, 18 May 2021 14:59:00 +0300 Subject: [PATCH 145/525] [Issue #385] improve test coverage --- tests/backup.py | 112 ++++++++++++++++++++++++++++++++ tests/helpers/ptrack_helpers.py | 53 +++++++++++++++ 2 files changed, 165 insertions(+) diff --git a/tests/backup.py b/tests/backup.py index e3bfc84e4..72daaa544 100644 --- a/tests/backup.py +++ b/tests/backup.py @@ -3459,3 +3459,115 @@ def test_basic_backup_default_transaction_read_only(self): # Clean after yourself self.del_test_dir(module_name, fname) + + # @unittest.skip("skip") + def test_backup_atexit(self): + """""" + fname = self.id().split('.')[3] + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node'), + set_replication=True, + ptrack_enable=self.ptrack, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.pgbench_init(scale=5) + + # Full backup in streaming mode + gdb = self.backup_node( + backup_dir, 'node', node, + options=['--stream', '--log-level-file=VERBOSE'], gdb=True) + + # break at streaming start + gdb.set_breakpoint('backup_data_file') + gdb.run_until_break() + + gdb.remove_all_breakpoints() + gdb._execute('signal SIGINT') + sleep(1) + + self.assertEqual( + self.show_pb( + backup_dir, 'node')[0]['status'], 'ERROR') + + with open(os.path.join(backup_dir, 'log', 'pg_probackup.log')) as f: + log_content = f.read() + #print(log_content) + self.assertIn( + 'WARNING: backup in progress, stop backup', + log_content) + + self.assertIn( + 'FROM pg_catalog.pg_stop_backup', + log_content) + + self.assertIn( + 'setting its status to ERROR', + log_content) + + # Clean after yourself + self.del_test_dir(module_name, fname) + + # @unittest.skip("skip") + def test_pg_stop_backup_missing_permissions(self): + """""" + fname = self.id().split('.')[3] + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node'), + set_replication=True, + ptrack_enable=self.ptrack, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.pgbench_init(scale=5) + + self.simple_bootstrap(node, 'backup') + + if self.get_version(node) < 90600: + node.safe_psql( + 'postgres', + 'REVOKE EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() FROM backup') + elif self.get_version(node) > 90600 and self.get_version(node) < 100000: + node.safe_psql( + 'postgres', + 'REVOKE EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) FROM backup') + else: + node.safe_psql( + 'postgres', + 'REVOKE EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) FROM backup') + + # Full backup in streaming mode + try: + self.backup_node( + backup_dir, 'node', node, + options=['--stream', '-U', 'backup']) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because of missing permissions on pg_stop_backup " + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + "ERROR: permission denied for function pg_stop_backup", + e.message, + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) + self.assertIn( + "query was: SELECT pg_catalog.txid_snapshot_xmax", + e.message, + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) + + # Clean after yourself + self.del_test_dir(module_name, fname) diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index b0400a72d..8204ca3d1 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -405,6 +405,59 @@ def make_simple_node( self.set_auto_conf( node, {}, 'postgresql.conf', ['wal_keep_segments']) return node + + def simple_bootstrap(self, node, role) -> None: + + node.safe_psql( + 'postgres', + 'CREATE ROLE {0} WITH LOGIN REPLICATION'.format(role)) + + # PG 9.5 + if self.get_version(node) < 90600: + node.safe_psql( + 'postgres', + 'GRANT USAGE ON SCHEMA pg_catalog TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO {0};'.format(role)) + # PG 9.6 + elif self.get_version(node) > 90600 and self.get_version(node) < 100000: + node.safe_psql( + 'postgres', + 'GRANT USAGE ON SCHEMA pg_catalog TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_checkpoint() TO {0};'.format(role)) + # >= 10 + else: + node.safe_psql( + 'postgres', + 'GRANT USAGE ON SCHEMA pg_catalog TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_checkpoint() TO {0};'.format(role)) def create_tblspace_in_node(self, node, tblspc_name, tblspc_path=None, cfs=False): res = node.execute( From ac2e7ccf1c5f72fbe452bb6110c98cf03077d575 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Mon, 24 May 2021 06:22:11 +0300 Subject: [PATCH 146/525] PGPRO-5018: Passing prev_backup_start_lsn (also known as horizonLsn) into fio_send_pages() and send_pages() in case of ptrack backup for additional verification of block changes on the server side (without test yet). --- src/data.c | 6 +++--- src/utils/file.c | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/data.c b/src/data.c index 4370bcbbc..0b02ff15b 100644 --- a/src/data.c +++ b/src/data.c @@ -459,7 +459,7 @@ prepare_page(ConnectionArgs *conn_arg, * Skip page if page lsn is less than START_LSN of parent backup. * Nullified pages must be copied by DELTA backup, just to be safe. */ - if (backup_mode == BACKUP_MODE_DIFF_DELTA && + if ((backup_mode == BACKUP_MODE_DIFF_DELTA || backup_mode == BACKUP_MODE_DIFF_PTRACK) && file->exists_in_prev && page_st->lsn > 0 && page_st->lsn < prev_backup_start_lsn) @@ -603,7 +603,7 @@ backup_data_file(ConnectionArgs* conn_arg, pgFile *file, rc = fio_send_pages(to_fullpath, from_fullpath, file, /* send prev backup START_LSN */ - backup_mode == BACKUP_MODE_DIFF_DELTA && + (backup_mode == BACKUP_MODE_DIFF_DELTA || backup_mode == BACKUP_MODE_DIFF_PTRACK) && file->exists_in_prev ? prev_backup_start_lsn : InvalidXLogRecPtr, calg, clevel, checksum_version, /* send pagemap if any */ @@ -616,7 +616,7 @@ backup_data_file(ConnectionArgs* conn_arg, pgFile *file, /* TODO: stop handling errors internally */ rc = send_pages(conn_arg, to_fullpath, from_fullpath, file, /* send prev backup START_LSN */ - backup_mode == BACKUP_MODE_DIFF_DELTA && + (backup_mode == BACKUP_MODE_DIFF_DELTA || backup_mode == BACKUP_MODE_DIFF_PTRACK) && file->exists_in_prev ? prev_backup_start_lsn : InvalidXLogRecPtr, calg, clevel, checksum_version, use_pagemap, &headers, backup_mode, ptrack_version_num, ptrack_schema); diff --git a/src/utils/file.c b/src/utils/file.c index 634ddfba0..0bc4622c2 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -1992,13 +1992,13 @@ static void fio_send_pages_impl(int out, char* buf) n_blocks_read++; /* - * horizonLsn is not 0 only in case of delta backup. + * horizonLsn is not 0 only in case of delta and ptrack backup. * As far as unsigned number are always greater or equal than zero, * there is no sense to add more checks. */ - if ((req->horizonLsn == InvalidXLogRecPtr) || /* full, page, ptrack */ + if ((req->horizonLsn == InvalidXLogRecPtr) || /* full, page */ (page_st.lsn == InvalidXLogRecPtr) || /* zeroed page */ - (req->horizonLsn > 0 && page_st.lsn > req->horizonLsn)) /* delta */ + (req->horizonLsn > 0 && page_st.lsn > req->horizonLsn)) /* delta, ptrack */ { int compressed_size = 0; char write_buffer[BLCKSZ*2]; From 012719d28633a53a45a1c41cde6d226354bed037 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Mon, 24 May 2021 02:16:31 +0300 Subject: [PATCH 147/525] fix valgrind alerts valgrind detected some uninitialized memory usage. Looks like shift_lsn one is a real bug. --- src/data.c | 41 ++++++++++++++++++++++++++--------------- src/restore.c | 2 +- src/util.c | 2 +- src/utils/pgut.c | 11 +++++++++++ src/utils/pgut.h | 2 ++ 5 files changed, 41 insertions(+), 17 deletions(-) diff --git a/src/data.c b/src/data.c index 4370bcbbc..d70aae8fd 100644 --- a/src/data.c +++ b/src/data.c @@ -2001,13 +2001,14 @@ send_pages(ConnectionArgs* conn_arg, const char *to_fullpath, const char *from_f { FILE *in = NULL; FILE *out = NULL; - int hdr_num = -1; off_t cur_pos_out = 0; char curr_page[BLCKSZ]; int n_blocks_read = 0; BlockNumber blknum = 0; datapagemap_iterator_t *iter = NULL; int compressed_size = 0; + BackupPageHeader2 *header = NULL; + parray *harray = NULL; /* stdio buffers */ char *in_buf = NULL; @@ -2046,6 +2047,8 @@ send_pages(ConnectionArgs* conn_arg, const char *to_fullpath, const char *from_f setvbuf(in, in_buf, _IOFBF, STDIO_BUFSIZE); } + harray = parray_new(); + while (blknum < file->n_blocks) { PageState page_st; @@ -2063,17 +2066,15 @@ send_pages(ConnectionArgs* conn_arg, const char *to_fullpath, const char *from_f if (!out) out = open_local_file_rw(to_fullpath, &out_buf, STDIO_BUFSIZE); - hdr_num++; - - if (!*headers) - *headers = (BackupPageHeader2 *) pgut_malloc(sizeof(BackupPageHeader2)); - else - *headers = (BackupPageHeader2 *) pgut_realloc(*headers, (hdr_num+1) * sizeof(BackupPageHeader2)); + header = pgut_new0(BackupPageHeader2); + *header = (BackupPageHeader2){ + .block = blknum, + .pos = cur_pos_out, + .lsn = page_st.lsn, + .checksum = page_st.checksum, + }; - (*headers)[hdr_num].block = blknum; - (*headers)[hdr_num].pos = cur_pos_out; - (*headers)[hdr_num].lsn = page_st.lsn; - (*headers)[hdr_num].checksum = page_st.checksum; + parray_append(harray, header); compressed_size = compress_and_backup_page(file, blknum, in, out, &(file->crc), rc, curr_page, calg, clevel, @@ -2098,12 +2099,22 @@ send_pages(ConnectionArgs* conn_arg, const char *to_fullpath, const char *from_f * Add dummy header, so we can later extract the length of last header * as difference between their offsets. */ - if (*headers) + if (parray_num(harray) > 0) { - file->n_headers = hdr_num +1; - *headers = (BackupPageHeader2 *) pgut_realloc(*headers, (hdr_num+2) * sizeof(BackupPageHeader2)); - (*headers)[hdr_num+1].pos = cur_pos_out; + size_t hdr_num = parray_num(harray); + size_t i; + + file->n_headers = (int) hdr_num; /* is it valid? */ + *headers = (BackupPageHeader2 *) pgut_malloc0((hdr_num + 1) * sizeof(BackupPageHeader2)); + for (i = 0; i < hdr_num; i++) + { + header = (BackupPageHeader2 *)parray_get(harray, i); + (*headers)[i] = *header; + pg_free(header); + } + (*headers)[hdr_num] = (BackupPageHeader2){.pos=cur_pos_out}; } + parray_free(harray); /* cleanup */ if (in && fclose(in)) diff --git a/src/restore.c b/src/restore.c index 9594ef0b0..86317596e 100644 --- a/src/restore.c +++ b/src/restore.c @@ -557,8 +557,8 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt, elog(INFO, "shift LSN: %X/%X", (uint32) (shift_lsn >> 32), (uint32) shift_lsn); - params->shift_lsn = shift_lsn; } + params->shift_lsn = shift_lsn; /* for validation or restore with enabled validation */ if (!params->is_restore || !params->no_validate) diff --git a/src/util.c b/src/util.c index 946957819..87ec36713 100644 --- a/src/util.c +++ b/src/util.c @@ -136,7 +136,7 @@ writeControlFile(ControlFileData *ControlFile, const char *path, fio_location lo #endif /* copy controlFileSize */ - buffer = pg_malloc(ControlFileSize); + buffer = pg_malloc0(ControlFileSize); memcpy(buffer, ControlFile, sizeof(ControlFileData)); /* Write pg_control */ diff --git a/src/utils/pgut.c b/src/utils/pgut.c index a1631b106..eba31faa6 100644 --- a/src/utils/pgut.c +++ b/src/utils/pgut.c @@ -877,6 +877,17 @@ pgut_malloc(size_t size) return ret; } +void * +pgut_malloc0(size_t size) +{ + char *ret; + + ret = pgut_malloc(size); + memset(ret, 0, size); + + return ret; +} + void * pgut_realloc(void *p, size_t size) { diff --git a/src/utils/pgut.h b/src/utils/pgut.h index e6ccbf211..77337a945 100644 --- a/src/utils/pgut.h +++ b/src/utils/pgut.h @@ -59,10 +59,12 @@ extern int pgut_wait(int num, PGconn *connections[], struct timeval *timeout); * memory allocators */ extern void *pgut_malloc(size_t size); +extern void *pgut_malloc0(size_t size); extern void *pgut_realloc(void *p, size_t size); extern char *pgut_strdup(const char *str); #define pgut_new(type) ((type *) pgut_malloc(sizeof(type))) +#define pgut_new0(type) ((type *) pgut_malloc0(sizeof(type))) #define pgut_newarray(type, n) ((type *) pgut_malloc(sizeof(type) * (n))) /* From d3bbb74d01b2a78f6db79845b667c43bf53cdaab Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 25 May 2021 17:09:42 +0300 Subject: [PATCH 148/525] tests: disable autovacuum by default With autovacuum enabled tests are unstable. Especially they are unstable if postgresql is running under valgrind and therefore is severely inhibited (and vacuum has time to be triggered). --- tests/archive.py | 29 ++++--------- tests/backup.py | 20 +++------ tests/compatibility.py | 46 ++++++-------------- tests/delta.py | 19 +-------- tests/exclude.py | 1 - tests/external.py | 73 +++++++++---------------------- tests/helpers/ptrack_helpers.py | 1 + tests/incr_restore.py | 57 +++++++++---------------- tests/merge.py | 76 +++++++++------------------------ tests/page.py | 16 ++----- tests/ptrack.py | 68 +++++++++-------------------- tests/replica.py | 10 ++--- tests/restore.py | 23 +++------- tests/retention.py | 15 +++---- tests/show.py | 9 ++-- tests/time_stamp.py | 6 +-- 16 files changed, 134 insertions(+), 335 deletions(-) diff --git a/tests/archive.py b/tests/archive.py index 2ebe09b39..a7bc04e13 100644 --- a/tests/archive.py +++ b/tests/archive.py @@ -903,7 +903,6 @@ def test_basic_master_and_replica_concurrent_archiving(self): initdb_params=['--data-checksums'], pg_options={ 'checkpoint_timeout': '30s', - 'autovacuum': 'off', 'archive_timeout': '10s'}) replica = self.make_simple_node( @@ -1002,8 +1001,7 @@ def test_concurrent_archiving(self): master = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'master'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', master) @@ -1235,8 +1233,7 @@ def test_archive_catalog(self): initdb_params=['--data-checksums'], pg_options={ 'archive_timeout': '30s', - 'checkpoint_timeout': '30s', - 'autovacuum': 'off'}) + 'checkpoint_timeout': '30s'}) if self.get_version(master) < self.version_to_num('9.6.0'): self.del_test_dir(module_name, fname) @@ -1558,8 +1555,7 @@ def test_archive_catalog_1(self): initdb_params=['--data-checksums'], pg_options={ 'archive_timeout': '30s', - 'checkpoint_timeout': '30s', - 'autovacuum': 'off'}) + 'checkpoint_timeout': '30s'}) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -1614,8 +1610,7 @@ def test_archive_catalog_2(self): initdb_params=['--data-checksums'], pg_options={ 'archive_timeout': '30s', - 'checkpoint_timeout': '30s', - 'autovacuum': 'off'}) + 'checkpoint_timeout': '30s'}) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -1811,8 +1806,7 @@ def test_hexadecimal_timeline(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -1875,7 +1869,6 @@ def test_archiving_and_slots(self): set_replication=True, initdb_params=['--data-checksums'], pg_options={ - 'autovacuum': 'off', 'checkpoint_timeout': '30s', 'max_wal_size': '64MB'}) @@ -2008,8 +2001,7 @@ def test_archive_pg_receivexlog_partial_handling(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -2098,8 +2090,7 @@ def test_multi_timeline_recovery_prefetching(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -2214,8 +2205,7 @@ def test_archive_get_batching_sanity(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) if self.get_version(node) < self.version_to_num('9.6.0'): self.del_test_dir(module_name, fname) @@ -2287,8 +2277,7 @@ def test_archive_get_prefetch_corruption(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) diff --git a/tests/backup.py b/tests/backup.py index e3bfc84e4..d713263c3 100644 --- a/tests/backup.py +++ b/tests/backup.py @@ -1401,8 +1401,7 @@ def test_drop_rel_during_backup_page(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -1660,8 +1659,7 @@ def test_pg_11_adjusted_wal_segment_size(self): '--data-checksums', '--wal-segsize=64'], pg_options={ - 'min_wal_size': '128MB', - 'autovacuum': 'off'}) + 'min_wal_size': '128MB'}) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -2576,9 +2574,7 @@ def test_issue_132(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -2616,9 +2612,7 @@ def test_issue_132_1(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'autovacuum': 'off'}) + initdb_params=['--data-checksums']) # TODO: check version of old binary, it should be 2.1.4, 2.1.5 or 2.2.1 @@ -2963,8 +2957,7 @@ def test_issue_203(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -3004,8 +2997,7 @@ def test_issue_231(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) diff --git a/tests/compatibility.py b/tests/compatibility.py index da9d72f83..d0fae2528 100644 --- a/tests/compatibility.py +++ b/tests/compatibility.py @@ -19,9 +19,7 @@ def test_backward_compatibility_page(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir, old_binary=True) self.show_pb(backup_dir) @@ -156,8 +154,7 @@ def test_backward_compatibility_delta(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir, old_binary=True) self.show_pb(backup_dir) @@ -296,9 +293,7 @@ def test_backward_compatibility_ptrack(self): base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, ptrack_enable=True, - initdb_params=['--data-checksums'], - pg_options={ - 'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir, old_binary=True) self.show_pb(backup_dir) @@ -408,9 +403,7 @@ def test_backward_compatibility_compression(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir, old_binary=True) self.add_instance(backup_dir, 'node', node, old_binary=True) @@ -572,9 +565,7 @@ def test_backward_compatibility_merge(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir, old_binary=True) self.add_instance(backup_dir, 'node', node, old_binary=True) @@ -630,8 +621,7 @@ def test_backward_compatibility_merge_1(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir, old_binary=True) self.add_instance(backup_dir, 'node', node, old_binary=True) @@ -703,8 +693,7 @@ def test_backward_compatibility_merge_2(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir, old_binary=True) self.add_instance(backup_dir, 'node', node, old_binary=True) @@ -826,8 +815,7 @@ def test_backward_compatibility_merge_3(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir, old_binary=True) self.add_instance(backup_dir, 'node', node, old_binary=True) @@ -953,8 +941,7 @@ def test_backward_compatibility_merge_4(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir, old_binary=True) self.add_instance(backup_dir, 'node', node, old_binary=True) @@ -1036,8 +1023,7 @@ def test_backward_compatibility_merge_5(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir, old_binary=True) self.add_instance(backup_dir, 'node', node, old_binary=True) @@ -1112,8 +1098,7 @@ def test_page_vacuum_truncate(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir, old_binary=True) self.add_instance(backup_dir, 'node', node, old_binary=True) @@ -1217,8 +1202,7 @@ def test_page_vacuum_truncate_compression(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir, old_binary=True) self.add_instance(backup_dir, 'node', node, old_binary=True) @@ -1297,8 +1281,7 @@ def test_page_vacuum_truncate_compressed_1(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir, old_binary=True) self.add_instance(backup_dir, 'node', node, old_binary=True) @@ -1403,8 +1386,7 @@ def test_hidden_files(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir, old_binary=True) self.add_instance(backup_dir, 'node', node, old_binary=True) diff --git a/tests/delta.py b/tests/delta.py index e18b8fb63..c2f58d10f 100644 --- a/tests/delta.py +++ b/tests/delta.py @@ -26,9 +26,7 @@ def test_basic_delta_vacuum_truncate(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'autovacuum': 'off'}) + initdb_params=['--data-checksums']) node_restored = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node_restored')) @@ -96,9 +94,6 @@ def test_delta_vacuum_truncate_1(self): base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], - pg_options={ - 'autovacuum': 'off' - } ) node_restored = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node_restored'), @@ -183,9 +178,6 @@ def test_delta_vacuum_truncate_2(self): base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], - pg_options={ - 'autovacuum': 'off' - } ) node_restored = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node_restored'), @@ -408,7 +400,6 @@ def test_delta_multiple_segments(self): 'fsync': 'off', 'shared_buffers': '1GB', 'maintenance_work_mem': '1GB', - 'autovacuum': 'off', 'full_page_writes': 'off' } ) @@ -566,7 +557,6 @@ def test_create_db(self): initdb_params=['--data-checksums'], pg_options={ 'max_wal_size': '10GB', - 'autovacuum': 'off' } ) @@ -693,7 +683,6 @@ def test_exists_in_previous_backup(self): pg_options={ 'max_wal_size': '10GB', 'checkpoint_timeout': '5min', - 'autovacuum': 'off' } ) @@ -798,7 +787,6 @@ def test_alter_table_set_tablespace_delta(self): set_replication=True, initdb_params=['--data-checksums'], pg_options={ 'checkpoint_timeout': '30s', - 'autovacuum': 'off' } ) @@ -884,9 +872,6 @@ def test_alter_database_set_tablespace_delta(self): base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], - pg_options={ - 'autovacuum': 'off' - } ) self.init_pb(backup_dir) @@ -976,7 +961,6 @@ def test_delta_delete(self): set_replication=True, initdb_params=['--data-checksums'], pg_options={ 'checkpoint_timeout': '30s', - 'autovacuum': 'off' } ) @@ -1184,7 +1168,6 @@ def test_delta_pg_resetxlog(self): set_replication=True, initdb_params=['--data-checksums'], pg_options={ - 'autovacuum': 'off', 'shared_buffers': '512MB', 'max_wal_size': '3GB'}) diff --git a/tests/exclude.py b/tests/exclude.py index 7ee315fa5..83743bf0b 100644 --- a/tests/exclude.py +++ b/tests/exclude.py @@ -156,7 +156,6 @@ def test_exclude_unlogged_tables_1(self): set_replication=True, initdb_params=['--data-checksums'], pg_options={ - 'autovacuum': 'off', "shared_buffers": "10MB"}) self.init_pb(backup_dir) diff --git a/tests/external.py b/tests/external.py index 5658de2bf..5c970f57b 100644 --- a/tests/external.py +++ b/tests/external.py @@ -378,8 +378,7 @@ def test_external_backward_compatibility(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir, old_binary=True) self.show_pb(backup_dir) @@ -480,8 +479,7 @@ def test_external_backward_compatibility_merge_1(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir, old_binary=True) self.show_pb(backup_dir) @@ -573,8 +571,7 @@ def test_external_backward_compatibility_merge_2(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir, old_binary=True) self.show_pb(backup_dir) @@ -695,8 +692,7 @@ def test_external_merge(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node, old_binary=True) @@ -783,9 +779,7 @@ def test_external_merge_skip_external_dirs(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -883,9 +877,7 @@ def test_external_merge_1(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -965,8 +957,7 @@ def test_external_merge_3(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -1059,9 +1050,7 @@ def test_external_merge_2(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -1155,9 +1144,7 @@ def test_restore_external_changed_data(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -1257,7 +1244,6 @@ def test_restore_external_changed_data_1(self): set_replication=True, initdb_params=['--data-checksums'], pg_options={ - 'autovacuum': 'off', 'max_wal_size': '32MB'}) self.init_pb(backup_dir) @@ -1365,7 +1351,6 @@ def test_merge_external_changed_data(self): set_replication=True, initdb_params=['--data-checksums'], pg_options={ - 'autovacuum': 'off', 'max_wal_size': '32MB'}) self.init_pb(backup_dir) @@ -1469,9 +1454,7 @@ def test_restore_skip_external(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -1549,9 +1532,7 @@ def test_external_dir_is_symlink(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -1802,9 +1783,7 @@ def test_external_dir_is_tablespace(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -1853,9 +1832,7 @@ def test_restore_external_dir_not_empty(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -1936,9 +1913,7 @@ def test_restore_external_dir_is_missing(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -2023,9 +1998,7 @@ def test_merge_external_dir_is_missing(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -2111,9 +2084,7 @@ def test_restore_external_dir_is_empty(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -2177,9 +2148,7 @@ def test_merge_external_dir_is_empty(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -2246,9 +2215,7 @@ def test_restore_external_dir_string_order(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -2328,9 +2295,7 @@ def test_merge_external_dir_string_order(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index b0400a72d..3caba25df 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -375,6 +375,7 @@ def make_simple_node( options['log_connections'] = 'on' options['log_disconnections'] = 'on' options['restart_after_crash'] = 'off' + options['autovacuum'] = 'off' # Allow replication in pg_hba.conf if set_replication: diff --git a/tests/incr_restore.py b/tests/incr_restore.py index 4838fefc9..cb684a23a 100644 --- a/tests/incr_restore.py +++ b/tests/incr_restore.py @@ -23,8 +23,7 @@ def test_basic_incr_restore(self): fname = self.id().split('.')[3] node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') self.init_pb(backup_dir) @@ -86,8 +85,7 @@ def test_basic_incr_restore_into_missing_directory(self): fname = self.id().split('.')[3] node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') self.init_pb(backup_dir) @@ -136,8 +134,7 @@ def test_checksum_corruption_detection(self): fname = self.id().split('.')[3] node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') self.init_pb(backup_dir) @@ -237,8 +234,7 @@ def test_incr_restore_with_tablespace_1(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), initdb_params=['--data-checksums'], - set_replication=True, - pg_options={'autovacuum': 'off'}) + set_replication=True) backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') self.init_pb(backup_dir) @@ -299,8 +295,7 @@ def test_incr_restore_with_tablespace_2(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), initdb_params=['--data-checksums'], - set_replication=True, - pg_options={'autovacuum': 'off'}) + set_replication=True) backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') self.init_pb(backup_dir) @@ -662,8 +657,7 @@ def test_basic_incr_restore_sanity(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), initdb_params=['--data-checksums'], - set_replication=True, - pg_options={'autovacuum': 'off'}) + set_replication=True) backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') self.init_pb(backup_dir) @@ -735,7 +729,7 @@ def test_incr_checksum_restore(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off', 'wal_log_hints': 'on'}) + pg_options={'wal_log_hints': 'on'}) backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') self.init_pb(backup_dir) @@ -825,7 +819,7 @@ def test_incr_lsn_restore(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off', 'wal_log_hints': 'on'}) + pg_options={'wal_log_hints': 'on'}) backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') self.init_pb(backup_dir) @@ -914,7 +908,7 @@ def test_incr_lsn_sanity(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off', 'wal_log_hints': 'on'}) + pg_options={'wal_log_hints': 'on'}) backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') self.init_pb(backup_dir) @@ -981,8 +975,7 @@ def test_incr_checksum_sanity(self): fname = self.id().split('.')[3] node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') self.init_pb(backup_dir) @@ -1039,7 +1032,7 @@ def test_incr_checksum_corruption_detection(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), # initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off', 'wal_log_hints': 'on'}) + pg_options={'wal_log_hints': 'on'}) backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') self.init_pb(backup_dir) @@ -1097,7 +1090,7 @@ def test_incr_lsn_corruption_detection(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off', 'wal_log_hints': 'on'}) + pg_options={'wal_log_hints': 'on'}) backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') self.init_pb(backup_dir) @@ -1154,8 +1147,7 @@ def test_incr_restore_multiple_external(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') self.init_pb(backup_dir) @@ -1226,8 +1218,7 @@ def test_incr_lsn_restore_multiple_external(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') self.init_pb(backup_dir) @@ -1300,7 +1291,7 @@ def test_incr_lsn_restore_backward(self): base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off', 'wal_log_hints': 'on', 'hot_standby': 'on'}) + pg_options={'wal_log_hints': 'on', 'hot_standby': 'on'}) backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') self.init_pb(backup_dir) @@ -1409,7 +1400,6 @@ def test_incr_checksum_restore_backward(self): set_replication=True, initdb_params=['--data-checksums'], pg_options={ - 'autovacuum': 'off', 'hot_standby': 'on'}) backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') @@ -1645,10 +1635,7 @@ def test_incr_checksum_long_xact(self): fname = self.id().split('.')[3] node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), - set_replication=True, -# initdb_params=['--data-checksums'], - pg_options={ - 'autovacuum': 'off'}) + set_replication=True) backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') self.init_pb(backup_dir) @@ -1721,10 +1708,7 @@ def test_incr_lsn_long_xact_1(self): fname = self.id().split('.')[3] node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), - set_replication=True, -# initdb_params=['--data-checksums'], - pg_options={ - 'autovacuum': 'off'}) + set_replication=True) backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') self.init_pb(backup_dir) @@ -1804,7 +1788,6 @@ def test_incr_lsn_long_xact_2(self): set_replication=True, initdb_params=['--data-checksums'], pg_options={ - 'autovacuum': 'off', 'full_page_writes': 'off', 'wal_log_hints': 'off'}) @@ -1890,8 +1873,7 @@ def test_incr_restore_zero_size_file_checksum(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') self.init_pb(backup_dir) @@ -1964,8 +1946,7 @@ def test_incr_restore_zero_size_file_lsn(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') self.init_pb(backup_dir) diff --git a/tests/merge.py b/tests/merge.py index 186b2f203..668691fc8 100644 --- a/tests/merge.py +++ b/tests/merge.py @@ -175,8 +175,7 @@ def test_merge_compressed_backups_1(self): # Initialize instance and backup directory node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), - set_replication=True, initdb_params=["--data-checksums"], - pg_options={'autovacuum': 'off'}) + set_replication=True, initdb_params=["--data-checksums"]) self.init_pb(backup_dir) self.add_instance(backup_dir, "node", node) @@ -248,9 +247,6 @@ def test_merge_compressed_and_uncompressed_backups(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, initdb_params=["--data-checksums"], - pg_options={ - 'autovacuum': 'off' - } ) self.init_pb(backup_dir) @@ -323,9 +319,6 @@ def test_merge_compressed_and_uncompressed_backups_1(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, initdb_params=["--data-checksums"], - pg_options={ - 'autovacuum': 'off' - } ) self.init_pb(backup_dir) @@ -400,9 +393,6 @@ def test_merge_compressed_and_uncompressed_backups_2(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, initdb_params=["--data-checksums"], - pg_options={ - 'autovacuum': 'off' - } ) self.init_pb(backup_dir) @@ -478,9 +468,6 @@ def test_merge_tablespaces(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], - pg_options={ - 'autovacuum': 'off' - } ) self.init_pb(backup_dir) @@ -556,9 +543,6 @@ def test_merge_tablespaces_1(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], - pg_options={ - 'autovacuum': 'off' - } ) self.init_pb(backup_dir) @@ -640,8 +624,7 @@ def test_merge_page_truncate(self): set_replication=True, initdb_params=['--data-checksums'], pg_options={ - 'checkpoint_timeout': '300s', - 'autovacuum': 'off'}) + 'checkpoint_timeout': '300s'}) node_restored = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node_restored')) @@ -730,8 +713,7 @@ def test_merge_delta_truncate(self): set_replication=True, initdb_params=['--data-checksums'], pg_options={ - 'checkpoint_timeout': '300s', - 'autovacuum': 'off'}) + 'checkpoint_timeout': '300s'}) node_restored = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node_restored')) @@ -822,8 +804,7 @@ def test_merge_ptrack_truncate(self): base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], - ptrack_enable=True, - pg_options={'autovacuum': 'off'}) + ptrack_enable=True) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -918,7 +899,6 @@ def test_merge_delta_delete(self): set_replication=True, initdb_params=['--data-checksums'], pg_options={ 'checkpoint_timeout': '30s', - 'autovacuum': 'off' } ) @@ -1435,8 +1415,7 @@ def test_crash_after_opening_backup_control_1(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -1488,8 +1467,7 @@ def test_crash_after_opening_backup_control_2(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -1580,8 +1558,7 @@ def test_losing_file_after_failed_merge(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -1668,8 +1645,7 @@ def test_failed_merge_after_delete(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -1750,8 +1726,7 @@ def test_failed_merge_after_delete_1(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -1827,8 +1802,7 @@ def test_failed_merge_after_delete_2(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -1890,8 +1864,7 @@ def test_failed_merge_after_delete_3(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -1981,8 +1954,7 @@ def test_merge_backup_from_future(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') self.init_pb(backup_dir) @@ -2315,8 +2287,7 @@ def test_idempotent_merge(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -2393,8 +2364,7 @@ def test_merge_correct_inheritance(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -2448,8 +2418,7 @@ def test_merge_correct_inheritance_1(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -2506,8 +2475,7 @@ def test_multi_timeline_merge(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -2618,8 +2586,7 @@ def test_merge_page_header_map_retry(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -2665,8 +2632,7 @@ def test_missing_data_file(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -2724,8 +2690,7 @@ def test_missing_non_data_file(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -2782,8 +2747,7 @@ def test_merge_remote_mode(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) diff --git a/tests/page.py b/tests/page.py index 8208e8319..c1cba6b40 100644 --- a/tests/page.py +++ b/tests/page.py @@ -27,8 +27,7 @@ def test_basic_page_vacuum_truncate(self): set_replication=True, initdb_params=['--data-checksums'], pg_options={ - 'checkpoint_timeout': '300s', - 'autovacuum': 'off'}) + 'checkpoint_timeout': '300s'}) node_restored = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node_restored')) @@ -115,8 +114,7 @@ def test_page_vacuum_truncate_1(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -373,7 +371,6 @@ def test_page_multiple_segments(self): 'fsync': 'off', 'shared_buffers': '1GB', 'maintenance_work_mem': '1GB', - 'autovacuum': 'off', 'full_page_writes': 'off'}) self.init_pb(backup_dir) @@ -447,7 +444,6 @@ def test_page_delete(self): set_replication=True, initdb_params=['--data-checksums'], pg_options={ 'checkpoint_timeout': '30s', - 'autovacuum': 'off' } ) @@ -521,7 +517,6 @@ def test_page_delete_1(self): initdb_params=['--data-checksums'], pg_options={ 'checkpoint_timeout': '30s', - 'autovacuum': 'off' } ) @@ -1074,7 +1069,6 @@ def test_page_create_db(self): pg_options={ 'max_wal_size': '10GB', 'checkpoint_timeout': '5min', - 'autovacuum': 'off' } ) @@ -1190,8 +1184,7 @@ def test_multi_timeline_page(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -1330,7 +1323,7 @@ def test_multitimeline_page_1(self): base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off', 'wal_log_hints': 'on'}) + pg_options={'wal_log_hints': 'on'}) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -1403,7 +1396,6 @@ def test_page_pg_resetxlog(self): set_replication=True, initdb_params=['--data-checksums'], pg_options={ - 'autovacuum': 'off', 'shared_buffers': '512MB', 'max_wal_size': '3GB'}) diff --git a/tests/ptrack.py b/tests/ptrack.py index de76d1d36..aa0bbadc1 100644 --- a/tests/ptrack.py +++ b/tests/ptrack.py @@ -66,8 +66,7 @@ def test_ptrack_multi_timeline_backup(self): base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, ptrack_enable=True, - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -149,8 +148,7 @@ def test_ptrack_multi_timeline_backup_1(self): base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, ptrack_enable=True, - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -301,8 +299,7 @@ def test_ptrack_simple(self): base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, ptrack_enable=True, - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -666,8 +663,7 @@ def test_ptrack_uncommitted_xact(self): ptrack_enable=True, initdb_params=['--data-checksums'], pg_options={ - 'wal_level': 'replica', - 'autovacuum': 'off'}) + 'wal_level': 'replica'}) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -815,8 +811,7 @@ def test_ptrack_vacuum_truncate(self): base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, ptrack_enable=True, - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -974,8 +969,7 @@ def test_ptrack_stream(self): ptrack_enable=True, initdb_params=['--data-checksums'], pg_options={ - 'checkpoint_timeout': '30s', - 'autovacuum': 'off'}) + 'checkpoint_timeout': '30s'}) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -1066,8 +1060,7 @@ def test_ptrack_archive(self): ptrack_enable=True, initdb_params=['--data-checksums'], pg_options={ - 'checkpoint_timeout': '30s', - 'autovacuum': 'off'}) + 'checkpoint_timeout': '30s'}) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -1183,8 +1176,7 @@ def test_ptrack_pgpro417(self): ptrack_enable=True, initdb_params=['--data-checksums'], pg_options={ - 'checkpoint_timeout': '30s', - 'autovacuum': 'off'}) + 'checkpoint_timeout': '30s'}) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -1266,8 +1258,7 @@ def test_page_pgpro417(self): ptrack_enable=True, initdb_params=['--data-checksums'], pg_options={ - 'checkpoint_timeout': '30s', - 'autovacuum': 'off'}) + 'checkpoint_timeout': '30s'}) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -1337,8 +1328,7 @@ def test_full_pgpro417(self): ptrack_enable=True, initdb_params=['--data-checksums'], pg_options={ - 'checkpoint_timeout': '30s', - 'autovacuum': 'off'}) + 'checkpoint_timeout': '30s'}) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -1410,8 +1400,7 @@ def test_create_db(self): ptrack_enable=True, initdb_params=['--data-checksums'], pg_options={ - 'max_wal_size': '10GB', - 'autovacuum': 'off'}) + 'max_wal_size': '10GB'}) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -1530,8 +1519,7 @@ def test_create_db_on_replica(self): ptrack_enable=True, initdb_params=['--data-checksums'], pg_options={ - 'checkpoint_timeout': '30s', - 'autovacuum': 'off'}) + 'checkpoint_timeout': '30s'}) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -1630,8 +1618,7 @@ def test_alter_table_set_tablespace_ptrack(self): ptrack_enable=True, initdb_params=['--data-checksums'], pg_options={ - 'checkpoint_timeout': '30s', - 'autovacuum': 'off'}) + 'checkpoint_timeout': '30s'}) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -1724,8 +1711,7 @@ def test_alter_database_set_tablespace_ptrack(self): ptrack_enable=True, initdb_params=['--data-checksums'], pg_options={ - 'checkpoint_timeout': '30s', - 'autovacuum': 'off'}) + 'checkpoint_timeout': '30s'}) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -1796,8 +1782,7 @@ def test_drop_tablespace(self): ptrack_enable=True, initdb_params=['--data-checksums'], pg_options={ - 'checkpoint_timeout': '30s', - 'autovacuum': 'off'}) + 'checkpoint_timeout': '30s'}) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -1893,8 +1878,7 @@ def test_ptrack_alter_tablespace(self): ptrack_enable=True, initdb_params=['--data-checksums'], pg_options={ - 'checkpoint_timeout': '30s', - 'autovacuum': 'off'}) + 'checkpoint_timeout': '30s'}) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -2011,7 +1995,6 @@ def test_ptrack_multiple_segments(self): ptrack_enable=True, initdb_params=['--data-checksums'], pg_options={ - 'autovacuum': 'off', 'full_page_writes': 'off'}) self.init_pb(backup_dir) @@ -2768,9 +2751,7 @@ def test_ptrack_empty(self): base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, ptrack_enable=True, - initdb_params=['--data-checksums'], - pg_options={ - 'autovacuum': 'off'}) + initdb_params=['--data-checksums']) backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') self.init_pb(backup_dir) @@ -2851,8 +2832,7 @@ def test_ptrack_empty_replica(self): base_dir=os.path.join(module_name, fname, 'master'), set_replication=True, initdb_params=['--data-checksums'], - ptrack_enable=True, - pg_options={'autovacuum': 'off'}) + ptrack_enable=True) backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') self.init_pb(backup_dir) @@ -3031,8 +3011,7 @@ def test_basic_ptrack_truncate_replica(self): pg_options={ 'max_wal_size': '32MB', 'archive_timeout': '10s', - 'checkpoint_timeout': '5min', - 'autovacuum': 'off'}) + 'checkpoint_timeout': '5min'}) backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') self.init_pb(backup_dir) @@ -3692,8 +3671,7 @@ def test_ptrack_vacuum_full_replica(self): base_dir=os.path.join(module_name, fname, 'master'), set_replication=True, ptrack_enable=True, - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') self.init_pb(backup_dir) @@ -3982,9 +3960,7 @@ def test_ptrack_recovery(self): base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, ptrack_enable=True, - initdb_params=['--data-checksums'], - pg_options={ - 'autovacuum': 'off'}) + initdb_params=['--data-checksums']) backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') self.init_pb(backup_dir) @@ -4048,7 +4024,6 @@ def test_ptrack_recovery_1(self): ptrack_enable=True, initdb_params=['--data-checksums'], pg_options={ - 'autovacuum': 'off', 'shared_buffers': '512MB', 'max_wal_size': '3GB'}) @@ -4181,7 +4156,6 @@ def test_ptrack_pg_resetxlog(self): ptrack_enable=True, initdb_params=['--data-checksums'], pg_options={ - 'autovacuum': 'off', 'shared_buffers': '512MB', 'max_wal_size': '3GB'}) diff --git a/tests/replica.py b/tests/replica.py index ce90ef96e..d59b11dbf 100644 --- a/tests/replica.py +++ b/tests/replica.py @@ -983,7 +983,6 @@ def test_replica_toast(self): set_replication=True, initdb_params=['--data-checksums'], pg_options={ - 'autovacuum': 'off', 'checkpoint_timeout': '1h', 'wal_level': 'replica', 'shared_buffers': '128MB'}) @@ -1084,7 +1083,6 @@ def test_start_stop_lsn_in_the_same_segno(self): set_replication=True, initdb_params=['--data-checksums'], pg_options={ - 'autovacuum': 'off', 'checkpoint_timeout': '1h', 'wal_level': 'replica', 'shared_buffers': '128MB'}) @@ -1293,8 +1291,7 @@ def test_replica_promote_archive_delta(self): initdb_params=['--data-checksums'], pg_options={ 'checkpoint_timeout': '30s', - 'archive_timeout': '30s', - 'autovacuum': 'off'}) + 'archive_timeout': '30s'}) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node1) @@ -1414,8 +1411,7 @@ def test_replica_promote_archive_page(self): initdb_params=['--data-checksums'], pg_options={ 'checkpoint_timeout': '30s', - 'archive_timeout': '30s', - 'autovacuum': 'off'}) + 'archive_timeout': '30s'}) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node1) @@ -1645,7 +1641,7 @@ def test_replica_via_basebackup(self): base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off', 'hot_standby': 'on'}) + pg_options={'hot_standby': 'on'}) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) diff --git a/tests/restore.py b/tests/restore.py index 2a11a27a4..61aae9285 100644 --- a/tests/restore.py +++ b/tests/restore.py @@ -1028,8 +1028,7 @@ def test_restore_with_missing_or_corrupted_tablespace_map(self): fname = self.id().split('.')[3] node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') self.init_pb(backup_dir) @@ -1488,7 +1487,6 @@ def test_zags_block_corrupt_1(self): base_dir=os.path.join(module_name, fname, 'node'), initdb_params=['--data-checksums'], pg_options={ - 'autovacuum': 'off', 'full_page_writes': 'on'} ) backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') @@ -1864,9 +1862,7 @@ def test_restore_backup_from_future(self): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'autovacuum': 'off'}) + initdb_params=['--data-checksums']) backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') self.init_pb(backup_dir) @@ -3177,8 +3173,7 @@ def test_missing_database_map(self): base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, ptrack_enable=self.ptrack, - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -3580,8 +3575,7 @@ def test_issue_249(self): backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -3659,8 +3653,7 @@ def test_pg_12_probackup_recovery_conf_compatibility(self): backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -3729,8 +3722,7 @@ def test_drop_postgresql_auto_conf(self): backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -3775,8 +3767,7 @@ def test_truncate_postgresql_auto_conf(self): backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) diff --git a/tests/retention.py b/tests/retention.py index 75b19c28a..19204807b 100644 --- a/tests/retention.py +++ b/tests/retention.py @@ -619,8 +619,7 @@ def test_window_merge_interleaved_incremental_chains_1(self): fname = self.id().split('.')[3] node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), - initdb_params=['--data-checksums'], - pg_options={'autovacuum':'off'}) + initdb_params=['--data-checksums']) backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') self.init_pb(backup_dir) @@ -765,8 +764,7 @@ def test_basic_window_merge_multiple_descendants(self): fname = self.id().split('.')[3] node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') self.init_pb(backup_dir) @@ -1026,8 +1024,7 @@ def test_basic_window_merge_multiple_descendants_1(self): fname = self.id().split('.')[3] node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') self.init_pb(backup_dir) @@ -1292,8 +1289,7 @@ def test_window_chains(self): fname = self.id().split('.')[3] node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') self.init_pb(backup_dir) @@ -1855,8 +1851,7 @@ def test_wal_depth_1(self): initdb_params=['--data-checksums'], pg_options={ 'archive_timeout': '30s', - 'checkpoint_timeout': '30s', - 'autovacuum': 'off'}) + 'checkpoint_timeout': '30s'}) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) diff --git a/tests/show.py b/tests/show.py index 3fdd85d04..2a13a768b 100644 --- a/tests/show.py +++ b/tests/show.py @@ -216,8 +216,7 @@ def test_corrupt_correctness(self): backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -323,8 +322,7 @@ def test_corrupt_correctness_1(self): backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -434,8 +432,7 @@ def test_corrupt_correctness_2(self): backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) diff --git a/tests/time_stamp.py b/tests/time_stamp.py index 4a4198c27..c49d183da 100644 --- a/tests/time_stamp.py +++ b/tests/time_stamp.py @@ -115,8 +115,7 @@ def test_dst_timezone_handling(self): backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -191,8 +190,7 @@ def test_dst_timezone_handling_backward_compatibilty(self): backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) + initdb_params=['--data-checksums']) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) From 4188a699f220b78e76d5398c4a4f62044d7f37dc Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Wed, 26 May 2021 12:57:10 +0300 Subject: [PATCH 149/525] Version 2.4.16 --- src/pg_probackup.h | 2 +- tests/expected/option_version.out | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/pg_probackup.h b/src/pg_probackup.h index f5bac64a0..746b0f5a5 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -308,7 +308,7 @@ typedef enum ShowFormat #define BYTES_INVALID (-1) /* file didn`t changed since previous backup, DELTA backup do not rely on it */ #define FILE_NOT_FOUND (-2) /* file disappeared during backup */ #define BLOCKNUM_INVALID (-1) -#define PROGRAM_VERSION "2.4.15" +#define PROGRAM_VERSION "2.4.16" /* update when remote agent API or behaviour changes */ #define AGENT_PROTOCOL_VERSION 20409 diff --git a/tests/expected/option_version.out b/tests/expected/option_version.out index 05a8660ab..1330acb5a 100644 --- a/tests/expected/option_version.out +++ b/tests/expected/option_version.out @@ -1 +1 @@ -pg_probackup 2.4.15 \ No newline at end of file +pg_probackup 2.4.16 \ No newline at end of file From 3c66873f57ec89a78cde126df9f61560b0936a0e Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Thu, 27 May 2021 02:25:56 +0300 Subject: [PATCH 150/525] [PR #386] fix --- src/data.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/data.c b/src/data.c index 601e4f674..2a8a42764 100644 --- a/src/data.c +++ b/src/data.c @@ -335,8 +335,8 @@ prepare_page(ConnectionArgs *conn_arg, return PageIsOk; case PAGE_IS_VALID: - /* in DELTA mode we must compare lsn */ - if (backup_mode == BACKUP_MODE_DIFF_DELTA) + /* in DELTA or PTRACK modes we must compare lsn */ + if (backup_mode == BACKUP_MODE_DIFF_DELTA || backup_mode == BACKUP_MODE_DIFF_PTRACK) page_is_valid = true; else return PageIsOk; From 151d4999b0f78994dbfbb8f040aa86426d9fad98 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Thu, 27 May 2021 02:27:19 +0300 Subject: [PATCH 151/525] [PR #386] test coverage --- tests/ptrack.py | 75 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 75 insertions(+) diff --git a/tests/ptrack.py b/tests/ptrack.py index aa0bbadc1..011f8754a 100644 --- a/tests/ptrack.py +++ b/tests/ptrack.py @@ -4436,3 +4436,78 @@ def test_corrupt_ptrack_map(self): # Clean after yourself self.del_test_dir(module_name, fname) + + # @unittest.skip("skip") + def test_horizon_lsn_ptrack(self): + """ + https://github.com/postgrespro/pg_probackup/pull/386 + """ + + if self.pg_config_version < self.version_to_num('11.0'): + return unittest.skip("You need PostgreSQL >= 11 for this test") + + self.assertLessEqual( + self.version_to_num(self.old_probackup_version), + self.version_to_num('2.4.15'), + 'You need pg_probackup old_binary =< 2.4.15 for this test') + + fname = self.id().split('.')[3] + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node'), + set_replication=True, + ptrack_enable=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + if node.major_version >= 11: + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") + + # TODO: ptrack version must be 2.1 + ptrack_version = node.safe_psql( + "postgres", + "SELECT extversion " + "FROM pg_catalog.pg_extension WHERE extname = 'ptrack'").decode('utf-8').rstrip() + + self.assertEqual( + ptrack_version, + "2.1", + "You need ptrack 2.1 for this test") + + # set map_size to a minimal value + self.set_auto_conf(node, {'ptrack.map_size': '1'}) + node.restart() + + node.pgbench_init(scale=100) + + # FULL backup + full_id = self.backup_node(backup_dir, 'node', node, options=['--stream'], old_binary=True) + + # enable archiving so the WAL size to do interfere with data bytes comparison later + self.set_archiving(backup_dir, 'node', node) + node.restart() + + # change data + pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) + pgbench.wait() + + # DELTA is exemplar + delta_id = self.backup_node( + backup_dir, 'node', node, backup_type='delta') + delta_bytes = self.show_pb(backup_dir, 'node', backup_id=delta_id)["data-bytes"] + self.delete_pb(backup_dir, 'node', backup_id=delta_id) + + # PTRACK with current binary + ptrack_id = self.backup_node(backup_dir, 'node', node, backup_type='ptrack') + ptrack_bytes = self.show_pb(backup_dir, 'node', backup_id=ptrack_id)["data-bytes"] + + # make sure that backup size is exactly the same + self.assertEqual(delta_bytes, ptrack_bytes) + + # Clean after yourself + self.del_test_dir(module_name, fname) From 3013a783f33a10d13911d2a9ae79cff433384adf Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" <16117281+kulaginm@users.noreply.github.com> Date: Thu, 27 May 2021 03:14:15 +0300 Subject: [PATCH 152/525] Release 2 5 pg stop backup decomposition2 (#387) * Rename pg_checksum_enable() to pg_is_checksum_enabled * Remove unused instanceState from pg_start_backup() * Refactor wait_wal_lsn(): remove unused pgBackup * parameter and replace InstanceState * with simple directory string * Refactor pg_stop_backup(): remove useless conn variable * Make some functions and variables (from backup.c) accessible from other compilation units * Remove some references to global stream_wal variable * Remove unused variable externaldir * Yet another split of pg_stop_backup(): separate verification of stop_lsn into wait_wal_and_calculate_stop_lsn() * Create pfilearray_clear_locks() helper function --- src/backup.c | 425 ++++++++++++++++++++------------------------- src/checkdb.c | 3 +- src/dir.c | 16 ++ src/pg_probackup.c | 2 - src/pg_probackup.h | 45 ++++- src/restore.c | 8 +- src/utils/parray.c | 2 +- src/validate.c | 6 +- 8 files changed, 257 insertions(+), 250 deletions(-) diff --git a/src/backup.c b/src/backup.c index 46e3ba482..21df1d95e 100644 --- a/src/backup.c +++ b/src/backup.c @@ -27,7 +27,7 @@ //const char *progname = "pg_probackup"; /* list of files contained in backup */ -static parray *backup_files_list = NULL; +parray *backup_files_list = NULL; /* We need critical section for datapagemap_add() in case of using threads */ static pthread_mutex_t backup_pagemap_mutex = PTHREAD_MUTEX_INITIALIZER; @@ -36,21 +36,7 @@ static pthread_mutex_t backup_pagemap_mutex = PTHREAD_MUTEX_INITIALIZER; bool exclusive_backup = false; /* Is pg_start_backup() was executed */ -static bool backup_in_progress = false; - -struct pg_stop_backup_result { - /* - * We will use values of snapshot_xid and invocation_time if there are - * no transactions between start_lsn and stop_lsn. - */ - TransactionId snapshot_xid; - time_t invocation_time; - XLogRecPtr lsn; - size_t backup_label_content_len; - char *backup_label_content; - size_t tablespace_map_content_len; - char *tablespace_map_content; -}; +bool backup_in_progress = false; /* * Backup routines @@ -62,18 +48,9 @@ static void *backup_files(void *arg); static void do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, PGNodeInfo *nodeInfo, bool no_sync, bool backup_logs); -static void pg_start_backup(InstanceState *instanceState, const char *label, bool smooth, pgBackup *backup, - PGNodeInfo *nodeInfo, PGconn *conn); static void pg_switch_wal(PGconn *conn); -static void pg_silent_client_messages(PGconn *conn); -static void pg_create_restore_point(PGconn *conn, time_t backup_start_time); static void pg_stop_backup(InstanceState *instanceState, pgBackup *backup, PGconn *pg_startbackup_conn, PGNodeInfo *nodeInfo); -static void pg_stop_backup_send(PGconn *conn, int server_version, bool is_started_on_replica, bool is_exclusive, char **query_text); - -static XLogRecPtr wait_wal_lsn(InstanceState *instanceState, XLogRecPtr lsn, bool is_start_lsn, TimeLineID tli, - bool in_prev_segment, bool segment_only, - int timeout_elevel, bool in_stream_dir, pgBackup *backup); static void check_external_for_tablespaces(parray *external_list, PGconn *backup_conn); @@ -83,19 +60,19 @@ static parray *get_database_map(PGconn *pg_startbackup_conn); static bool pgpro_support(PGconn *conn); /* Check functions */ -static bool pg_checksum_enable(PGconn *conn); +static bool pg_is_checksum_enabled(PGconn *conn); static bool pg_is_in_recovery(PGconn *conn); static bool pg_is_superuser(PGconn *conn); static void check_server_version(PGconn *conn, PGNodeInfo *nodeInfo); static void confirm_block_size(PGconn *conn, const char *name, int blcksz); static void set_cfs_datafiles(parray *files, const char *root, char *relative, size_t i); -static StopBackupCallbackState stop_callback_state; +static StopBackupCallbackParams stop_callback_params; static void backup_stopbackup_callback(bool fatal, void *userdata) { - StopBackupCallbackState *st = (StopBackupCallbackState *) userdata; + StopBackupCallbackParams *st = (StopBackupCallbackParams *) userdata; /* * If backup is in progress, notify stop of backup to PostgreSQL */ @@ -158,7 +135,7 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, strlen(" with pg_probackup")); /* Call pg_start_backup function in PostgreSQL connect */ - pg_start_backup(instanceState, label, smooth_checkpoint, ¤t, nodeInfo, backup_conn); + pg_start_backup(label, smooth_checkpoint, ¤t, nodeInfo, backup_conn); /* Obtain current timeline */ #if PG_VERSION_NUM >= 90600 @@ -214,11 +191,11 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, if (prev_backup) { - if (parse_program_version(prev_backup->program_version) > parse_program_version(PROGRAM_VERSION)) - elog(ERROR, "pg_probackup binary version is %s, but backup %s version is %s. " - "pg_probackup do not guarantee to be forward compatible. " - "Please upgrade pg_probackup binary.", - PROGRAM_VERSION, base36enc(prev_backup->start_time), prev_backup->program_version); + if (parse_program_version(prev_backup->program_version) > parse_program_version(PROGRAM_VERSION)) + elog(ERROR, "pg_probackup binary version is %s, but backup %s version is %s. " + "pg_probackup do not guarantee to be forward compatible. " + "Please upgrade pg_probackup binary.", + PROGRAM_VERSION, base36enc(prev_backup->start_time), prev_backup->program_version); elog(INFO, "Parent backup: %s", base36enc(prev_backup->start_time)); @@ -282,7 +259,7 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, write_backup(¤t, true); /* In PAGE mode or in ARCHIVE wal-mode wait for current segment */ - if (current.backup_mode == BACKUP_MODE_DIFF_PAGE || !stream_wal) + if (current.backup_mode == BACKUP_MODE_DIFF_PAGE || !current.stream) { /* Check that archive_dir can be reached */ if (fio_access(instanceState->instance_wal_subdir_path, F_OK, FIO_BACKUP_HOST) != 0) @@ -294,11 +271,11 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, * Because WAL streaming will start after pg_start_backup() in stream * mode. */ - wait_wal_lsn(instanceState, current.start_lsn, true, current.tli, false, true, ERROR, false, ¤t); + wait_wal_lsn(instanceState->instance_wal_subdir_path, current.start_lsn, true, current.tli, false, true, ERROR, false); } /* start stream replication */ - if (stream_wal) + if (current.stream) { join_path_components(dst_backup_path, current.database_dir, PG_XLOG_DIR); fio_mkdir(dst_backup_path, DIR_PERMISSION, FIO_BACKUP_HOST); @@ -310,7 +287,7 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, * PAGE backup in stream mode is waited twice, first for * segment in WAL archive and then for streamed segment */ - wait_wal_lsn(instanceState, current.start_lsn, true, current.tli, false, true, ERROR, true, ¤t); + wait_wal_lsn(dst_backup_path, current.start_lsn, true, current.tli, false, true, ERROR, true); } /* initialize backup's file list */ @@ -453,7 +430,7 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, } /* - * Make directories before backup and setup threads at the same time + * Make directories before backup */ for (i = 0; i < parray_num(backup_files_list); i++) { @@ -478,10 +455,11 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, fio_mkdir(dirpath, DIR_PERMISSION, FIO_BACKUP_HOST); } - /* setup threads */ - pg_atomic_clear_flag(&file->lock); } + /* setup thread locks */ + pfilearray_clear_locks(backup_files_list); + /* Sort by size for load balancing */ parray_qsort(backup_files_list, pgFileCompareSize); /* Sort the array for binary search */ @@ -728,7 +706,7 @@ pgdata_basic_setup(ConnectionOptions conn_opt, PGNodeInfo *nodeInfo) /* Confirm that this server version is supported */ check_server_version(cur_conn, nodeInfo); - if (pg_checksum_enable(cur_conn)) + if (pg_is_checksum_enabled(cur_conn)) current.checksum_version = 1; else current.checksum_version = 0; @@ -1058,8 +1036,8 @@ confirm_block_size(PGconn *conn, const char *name, int blcksz) /* * Notify start of backup to PostgreSQL server. */ -static void -pg_start_backup(InstanceState *instanceState, const char *label, bool smooth, pgBackup *backup, +void +pg_start_backup(const char *label, bool smooth, pgBackup *backup, PGNodeInfo *nodeInfo, PGconn *conn) { PGresult *res; @@ -1088,9 +1066,9 @@ pg_start_backup(InstanceState *instanceState, const char *label, bool smooth, pg * is necessary to call pg_stop_backup() in backup_cleanup(). */ backup_in_progress = true; - stop_callback_state.conn = conn; - stop_callback_state.server_version = nodeInfo->server_version; - pgut_atexit_push(backup_stopbackup_callback, &stop_callback_state); + stop_callback_params.conn = conn; + stop_callback_params.server_version = nodeInfo->server_version; + pgut_atexit_push(backup_stopbackup_callback, &stop_callback_params); /* Extract timeline and LSN from results of pg_start_backup() */ XLogDataFromLSN(PQgetvalue(res, 0, 0), &lsn_hi, &lsn_lo); @@ -1099,7 +1077,7 @@ pg_start_backup(InstanceState *instanceState, const char *label, bool smooth, pg PQclear(res); - if ((!stream_wal || current.backup_mode == BACKUP_MODE_DIFF_PAGE) && + if ((!backup->stream || backup->backup_mode == BACKUP_MODE_DIFF_PAGE) && !backup->from_replica && !(nodeInfo->server_version < 90600 && !nodeInfo->is_superuser)) @@ -1218,7 +1196,7 @@ get_database_map(PGconn *conn) /* Check if ptrack is enabled in target instance */ static bool -pg_checksum_enable(PGconn *conn) +pg_is_checksum_enabled(PGconn *conn) { PGresult *res_db; @@ -1284,7 +1262,7 @@ pg_is_superuser(PGconn *conn) * previous segment. * * Flag 'in_stream_dir' determine whether we looking for WAL in 'pg_wal' directory or - * in archive. Do note, that we cannot rely sorely on global variable 'stream_wal' because, + * in archive. Do note, that we cannot rely sorely on global variable 'stream_wal' (current.stream) because, * for example, PAGE backup must(!) look for start_lsn in archive regardless of wal_mode. * * 'timeout_elevel' determine the elevel for timeout elog message. If elevel lighter than @@ -1293,15 +1271,13 @@ pg_is_superuser(PGconn *conn) * Returns target LSN if such is found, failing that returns LSN of record prior to target LSN. * Returns InvalidXLogRecPtr if 'segment_only' flag is used. */ -static XLogRecPtr -wait_wal_lsn(InstanceState *instanceState, XLogRecPtr target_lsn, bool is_start_lsn, TimeLineID tli, +XLogRecPtr +wait_wal_lsn(const char *wal_segment_dir, XLogRecPtr target_lsn, bool is_start_lsn, TimeLineID tli, bool in_prev_segment, bool segment_only, - int timeout_elevel, bool in_stream_dir, pgBackup *backup) + int timeout_elevel, bool in_stream_dir) { XLogSegNo targetSegNo; - char pg_wal_dir[MAXPGPATH]; char wal_segment_path[MAXPGPATH], - *wal_segment_dir, wal_segment[MAXFNAMELEN]; bool file_exists = false; uint32 try_count = 0, @@ -1319,6 +1295,7 @@ wait_wal_lsn(InstanceState *instanceState, XLogRecPtr target_lsn, bool is_start_ GetXLogFileName(wal_segment, tli, targetSegNo, instance_config.xlog_seg_size); + join_path_components(wal_segment_path, wal_segment_dir, wal_segment); /* * In pg_start_backup we wait for 'target_lsn' in 'pg_wal' directory if it is * stream and non-page backup. Page backup needs archived WAL files, so we @@ -1326,17 +1303,6 @@ wait_wal_lsn(InstanceState *instanceState, XLogRecPtr target_lsn, bool is_start_ * * In pg_stop_backup it depends only on stream_wal. */ - if (in_stream_dir) - { - join_path_components(pg_wal_dir, backup->database_dir, PG_XLOG_DIR); - join_path_components(wal_segment_path, pg_wal_dir, wal_segment); - wal_segment_dir = pg_wal_dir; - } - else - { - join_path_components(wal_segment_path, instanceState->instance_wal_subdir_path, wal_segment); - wal_segment_dir = instanceState->instance_wal_subdir_path; - } /* TODO: remove this in 3.0 (it is a cludge against some old bug with archive_timeout) */ if (instance_config.archive_timeout > 0) @@ -1442,7 +1408,7 @@ wait_wal_lsn(InstanceState *instanceState, XLogRecPtr target_lsn, bool is_start_ wal_delivery_str, wal_segment_path); } - if (!stream_wal && is_start_lsn && try_count == 30) + if (!current.stream && is_start_lsn && try_count == 30) elog(WARNING, "By default pg_probackup assume WAL delivery method to be ARCHIVE. " "If continuous archiving is not set up, use '--stream' option to make autonomous backup. " "Otherwise check that continuous archiving works correctly."); @@ -1466,8 +1432,144 @@ wait_wal_lsn(InstanceState *instanceState, XLogRecPtr target_lsn, bool is_start_ } } +/* + * Check stop_lsn (returned from pg_stop_backup()) and update backup->stop_lsn + */ +void +wait_wal_and_calculate_stop_lsn(const char *xlog_path, XLogRecPtr stop_lsn, pgBackup *backup) +{ + bool stop_lsn_exists = false; + + /* It is ok for replica to return invalid STOP LSN + * UPD: Apparently it is ok even for a master. + */ + if (!XRecOffIsValid(stop_lsn)) + { + XLogSegNo segno = 0; + XLogRecPtr lsn_tmp = InvalidXLogRecPtr; + + /* + * Even though the value is invalid, it's expected postgres behaviour + * and we're trying to fix it below. + */ + elog(LOG, "Invalid offset in stop_lsn value %X/%X, trying to fix", + (uint32) (stop_lsn >> 32), (uint32) (stop_lsn)); + + /* + * Note: even with gdb it is very hard to produce automated tests for + * contrecord + invalid LSN, so emulate it for manual testing. + */ + //lsn = lsn - XLOG_SEG_SIZE; + //elog(WARNING, "New Invalid stop_backup_lsn value %X/%X", + // (uint32) (stop_lsn >> 32), (uint32) (stop_lsn)); + + GetXLogSegNo(stop_lsn, segno, instance_config.xlog_seg_size); + + /* + * Note, that there is no guarantee that corresponding WAL file even exists. + * Replica may return LSN from future and keep staying in present. + * Or it can return invalid LSN. + * + * That's bad, since we want to get real LSN to save it in backup label file + * and to use it in WAL validation. + * + * So we try to do the following: + * 1. Wait 'archive_timeout' seconds for segment containing stop_lsn and + * look for the first valid record in it. + * It solves the problem of occasional invalid LSN on write-busy system. + * 2. Failing that, look for record in previous segment with endpoint + * equal or greater than stop_lsn. It may(!) solve the problem of invalid LSN + * on write-idle system. If that fails too, error out. + */ + + /* stop_lsn is pointing to a 0 byte of xlog segment */ + if (stop_lsn % instance_config.xlog_seg_size == 0) + { + /* Wait for segment with current stop_lsn, it is ok for it to never arrive */ + wait_wal_lsn(xlog_path, stop_lsn, false, backup->tli, + false, true, WARNING, backup->stream); + + /* Get the first record in segment with current stop_lsn */ + lsn_tmp = get_first_record_lsn(xlog_path, segno, backup->tli, + instance_config.xlog_seg_size, + instance_config.archive_timeout); + + /* Check that returned LSN is valid and greater than stop_lsn */ + if (XLogRecPtrIsInvalid(lsn_tmp) || + !XRecOffIsValid(lsn_tmp) || + lsn_tmp < stop_lsn) + { + /* Backup from master should error out here */ + if (!backup->from_replica) + elog(ERROR, "Failed to get next WAL record after %X/%X", + (uint32) (stop_lsn >> 32), + (uint32) (stop_lsn)); + + /* No luck, falling back to looking up for previous record */ + elog(WARNING, "Failed to get next WAL record after %X/%X, " + "looking for previous WAL record", + (uint32) (stop_lsn >> 32), + (uint32) (stop_lsn)); + + /* Despite looking for previous record there is not guarantee of success + * because previous record can be the contrecord. + */ + lsn_tmp = wait_wal_lsn(xlog_path, stop_lsn, false, backup->tli, + true, false, ERROR, backup->stream); + + /* sanity */ + if (!XRecOffIsValid(lsn_tmp) || XLogRecPtrIsInvalid(lsn_tmp)) + elog(ERROR, "Failed to get WAL record prior to %X/%X", + (uint32) (stop_lsn >> 32), + (uint32) (stop_lsn)); + } + } + /* stop lsn is aligned to xlog block size, just find next lsn */ + else if (stop_lsn % XLOG_BLCKSZ == 0) + { + /* Wait for segment with current stop_lsn */ + wait_wal_lsn(xlog_path, stop_lsn, false, backup->tli, + false, true, ERROR, backup->stream); + + /* Get the next closest record in segment with current stop_lsn */ + lsn_tmp = get_next_record_lsn(xlog_path, segno, backup->tli, + instance_config.xlog_seg_size, + instance_config.archive_timeout, + stop_lsn); + + /* sanity */ + if (!XRecOffIsValid(lsn_tmp) || XLogRecPtrIsInvalid(lsn_tmp)) + elog(ERROR, "Failed to get WAL record next to %X/%X", + (uint32) (stop_lsn >> 32), + (uint32) (stop_lsn)); + } + /* PostgreSQL returned something very illegal as STOP_LSN, error out */ + else + elog(ERROR, "Invalid stop_backup_lsn value %X/%X", + (uint32) (stop_lsn >> 32), (uint32) (stop_lsn)); + + /* Setting stop_backup_lsn will set stop point for streaming */ + stop_backup_lsn = lsn_tmp; + stop_lsn_exists = true; + } + + elog(LOG, "stop_lsn: %X/%X", + (uint32) (stop_lsn >> 32), (uint32) (stop_lsn)); + + /* + * Wait for stop_lsn to be archived or streamed. + * If replica returned valid STOP_LSN of not actually existing record, + * look for previous record with endpoint >= STOP_LSN. + */ + if (!stop_lsn_exists) + stop_backup_lsn = wait_wal_lsn(xlog_path, stop_lsn, false, backup->tli, + false, false, ERROR, backup->stream); + + backup->stop_lsn = stop_backup_lsn; +} + /* Remove annoying NOTICE messages generated by backend */ -static void +void pg_silent_client_messages(PGconn *conn) { PGresult *res; @@ -1476,7 +1578,7 @@ pg_silent_client_messages(PGconn *conn) PQclear(res); } -static void +void pg_create_restore_point(PGconn *conn, time_t backup_start_time) { PGresult *res; @@ -1573,7 +1675,7 @@ pg_stop_backup_send(PGconn *conn, int server_version, bool is_started_on_replica elog(ERROR, "Failed to send pg_stop_backup query"); /* After we have sent pg_stop_backup, we don't need this callback anymore */ - pgut_atexit_pop(backup_stopbackup_callback, &stop_callback_state); + pgut_atexit_pop(backup_stopbackup_callback, &stop_callback_params); if (query_text) *query_text = pgut_strdup(stop_backup_query); @@ -1586,10 +1688,10 @@ pg_stop_backup_send(PGconn *conn, int server_version, bool is_started_on_replica * parameters: * - */ -static void +void pg_stop_backup_consume(PGconn *conn, int server_version, bool is_exclusive, uint32 timeout, const char *query_text, - struct pg_stop_backup_result *result) + PGStopBackupResult *result) { PGresult *query_result; uint32 pg_stop_backup_timeout = 0; @@ -1717,7 +1819,7 @@ pg_stop_backup_consume(PGconn *conn, int server_version, /* * helper routine used to write backup_label and tablespace_map in pg_stop_backup() */ -static void +void pg_stop_backup_write_file_helper(const char *path, const char *filename, const char *error_msg_filename, const void *data, size_t len, parray *file_list) { @@ -1738,7 +1840,7 @@ pg_stop_backup_write_file_helper(const char *path, const char *filename, const c error_msg_filename, full_filename, strerror(errno)); /* - * It's vital to check if backup_files_list is initialized, + * It's vital to check if files_list is initialized, * because we could get here because the backup was interrupted */ if (file_list) @@ -1764,10 +1866,8 @@ static void pg_stop_backup(InstanceState *instanceState, pgBackup *backup, PGconn *pg_startbackup_conn, PGNodeInfo *nodeInfo) { - PGconn *conn; - bool stop_lsn_exists = false; - struct pg_stop_backup_result stop_backup_result; - char *xlog_path,stream_xlog_path[MAXPGPATH]; + PGStopBackupResult stop_backup_result; + char *xlog_path, stream_xlog_path[MAXPGPATH]; /* kludge against some old bug in archive_timeout. TODO: remove in 3.0.0 */ int timeout = (instance_config.archive_timeout > 0) ? instance_config.archive_timeout : ARCHIVE_TIMEOUT_DEFAULT; @@ -1777,9 +1877,7 @@ pg_stop_backup(InstanceState *instanceState, pgBackup *backup, PGconn *pg_startb if (!backup_in_progress) elog(ERROR, "backup is not in progress"); - conn = pg_startbackup_conn; - - pg_silent_client_messages(conn); + pg_silent_client_messages(pg_startbackup_conn); /* Create restore point * Only if backup is from master. @@ -1788,157 +1886,34 @@ pg_stop_backup(InstanceState *instanceState, pgBackup *backup, PGconn *pg_startb if (!backup->from_replica && !(nodeInfo->server_version < 90600 && !nodeInfo->is_superuser)) //TODO: check correctness - pg_create_restore_point(conn, backup->start_time); + pg_create_restore_point(pg_startbackup_conn, backup->start_time); /* Execute pg_stop_backup using PostgreSQL connection */ - pg_stop_backup_send(conn, nodeInfo->server_version, current.from_replica, exclusive_backup, &query_text); + pg_stop_backup_send(pg_startbackup_conn, nodeInfo->server_version, backup->from_replica, exclusive_backup, &query_text); /* * Wait for the result of pg_stop_backup(), but no longer than * archive_timeout seconds */ - pg_stop_backup_consume(conn, nodeInfo->server_version, exclusive_backup, timeout, query_text, &stop_backup_result); + pg_stop_backup_consume(pg_startbackup_conn, nodeInfo->server_version, exclusive_backup, timeout, query_text, &stop_backup_result); - /* It is ok for replica to return invalid STOP LSN - * UPD: Apparently it is ok even for a master. - */ - if (!XRecOffIsValid(stop_backup_result.lsn)) + if (backup->stream) { - char *xlog_path, - stream_xlog_path[MAXPGPATH]; - XLogSegNo segno = 0; - XLogRecPtr lsn_tmp = InvalidXLogRecPtr; - - /* - * Even though the value is invalid, it's expected postgres behaviour - * and we're trying to fix it below. - */ - elog(LOG, "Invalid offset in stop_lsn value %X/%X, trying to fix", - (uint32) (stop_backup_result.lsn >> 32), (uint32) (stop_backup_result.lsn)); - - /* - * Note: even with gdb it is very hard to produce automated tests for - * contrecord + invalid LSN, so emulate it for manual testing. - */ - //stop_backup_result.lsn = stop_backup_result.lsn - XLOG_SEG_SIZE; - //elog(WARNING, "New Invalid stop_backup_lsn value %X/%X", - // (uint32) (stop_backup_result.lsn >> 32), (uint32) (stop_backup_result.lsn)); - - if (stream_wal) - { - snprintf(stream_xlog_path, lengthof(stream_xlog_path), - "%s/%s/%s/%s", instanceState->instance_backup_subdir_path, - base36enc(backup->start_time), - DATABASE_DIR, PG_XLOG_DIR); - xlog_path = stream_xlog_path; - } - else - xlog_path = instanceState->instance_wal_subdir_path; - - GetXLogSegNo(stop_backup_result.lsn, segno, instance_config.xlog_seg_size); - - /* - * Note, that there is no guarantee that corresponding WAL file even exists. - * Replica may return LSN from future and keep staying in present. - * Or it can return invalid LSN. - * - * That's bad, since we want to get real LSN to save it in backup label file - * and to use it in WAL validation. - * - * So we try to do the following: - * 1. Wait 'archive_timeout' seconds for segment containing stop_lsn and - * look for the first valid record in it. - * It solves the problem of occasional invalid LSN on write-busy system. - * 2. Failing that, look for record in previous segment with endpoint - * equal or greater than stop_lsn. It may(!) solve the problem of invalid LSN - * on write-idle system. If that fails too, error out. - */ - - /* stop_lsn is pointing to a 0 byte of xlog segment */ - if (stop_backup_result.lsn % instance_config.xlog_seg_size == 0) - { - /* Wait for segment with current stop_lsn, it is ok for it to never arrive */ - wait_wal_lsn(instanceState, stop_backup_result.lsn, false, backup->tli, - false, true, WARNING, stream_wal, backup); - - /* Get the first record in segment with current stop_lsn */ - lsn_tmp = get_first_record_lsn(xlog_path, segno, backup->tli, - instance_config.xlog_seg_size, - instance_config.archive_timeout); - - /* Check that returned LSN is valid and greater than stop_lsn */ - if (XLogRecPtrIsInvalid(lsn_tmp) || - !XRecOffIsValid(lsn_tmp) || - lsn_tmp < stop_backup_result.lsn) - { - /* Backup from master should error out here */ - if (!backup->from_replica) - elog(ERROR, "Failed to get next WAL record after %X/%X", - (uint32) (stop_backup_result.lsn >> 32), - (uint32) (stop_backup_result.lsn)); - - /* No luck, falling back to looking up for previous record */ - elog(WARNING, "Failed to get next WAL record after %X/%X, " - "looking for previous WAL record", - (uint32) (stop_backup_result.lsn >> 32), - (uint32) (stop_backup_result.lsn)); - - /* Despite looking for previous record there is not guarantee of success - * because previous record can be the contrecord. - */ - lsn_tmp = wait_wal_lsn(instanceState, stop_backup_result.lsn, false, backup->tli, - true, false, ERROR, stream_wal, backup); - - /* sanity */ - if (!XRecOffIsValid(lsn_tmp) || XLogRecPtrIsInvalid(lsn_tmp)) - elog(ERROR, "Failed to get WAL record prior to %X/%X", - (uint32) (stop_backup_result.lsn >> 32), - (uint32) (stop_backup_result.lsn)); - } - } - /* stop lsn is aligned to xlog block size, just find next lsn */ - else if (stop_backup_result.lsn % XLOG_BLCKSZ == 0) - { - /* Wait for segment with current stop_lsn */ - wait_wal_lsn(instanceState, stop_backup_result.lsn, false, backup->tli, - false, true, ERROR, stream_wal, backup); - - /* Get the next closest record in segment with current stop_lsn */ - lsn_tmp = get_next_record_lsn(xlog_path, segno, backup->tli, - instance_config.xlog_seg_size, - instance_config.archive_timeout, - stop_backup_result.lsn); - - /* sanity */ - if (!XRecOffIsValid(lsn_tmp) || XLogRecPtrIsInvalid(lsn_tmp)) - elog(ERROR, "Failed to get WAL record next to %X/%X", - (uint32) (stop_backup_result.lsn >> 32), - (uint32) (stop_backup_result.lsn)); - } - /* PostgreSQL returned something very illegal as STOP_LSN, error out */ - else - elog(ERROR, "Invalid stop_backup_lsn value %X/%X", - (uint32) (stop_backup_result.lsn >> 32), (uint32) (stop_backup_result.lsn)); - - /* Setting stop_backup_lsn will set stop point for streaming */ - stop_backup_lsn = lsn_tmp; - stop_lsn_exists = true; + join_path_components(stream_xlog_path, backup->database_dir, PG_XLOG_DIR); + xlog_path = stream_xlog_path; } + else + xlog_path = instanceState->instance_wal_subdir_path; - elog(LOG, "stop_lsn: %X/%X", - (uint32) (stop_backup_result.lsn >> 32), (uint32) (stop_backup_result.lsn)); + wait_wal_and_calculate_stop_lsn(xlog_path, stop_backup_result.lsn, backup); /* Write backup_label and tablespace_map */ if (!exclusive_backup) { - char path[MAXPGPATH]; - Assert(stop_backup_result.backup_label_content != NULL); - snprintf(path, lengthof(path), "%s/%s/%s", instanceState->instance_backup_subdir_path, - base36enc(backup->start_time), DATABASE_DIR); /* Write backup_label */ - pg_stop_backup_write_file_helper(path, PG_BACKUP_LABEL_FILE, "backup label", + pg_stop_backup_write_file_helper(backup->database_dir, PG_BACKUP_LABEL_FILE, "backup label", stop_backup_result.backup_label_content, stop_backup_result.backup_label_content_len, backup_files_list); free(stop_backup_result.backup_label_content); @@ -1948,7 +1923,7 @@ pg_stop_backup(InstanceState *instanceState, pgBackup *backup, PGconn *pg_startb /* Write tablespace_map */ if (stop_backup_result.tablespace_map_content != NULL) { - pg_stop_backup_write_file_helper(path, PG_TABLESPACE_MAP_FILE, "tablespace map", + pg_stop_backup_write_file_helper(backup->database_dir, PG_TABLESPACE_MAP_FILE, "tablespace map", stop_backup_result.tablespace_map_content, stop_backup_result.tablespace_map_content_len, backup_files_list); free(stop_backup_result.tablespace_map_content); @@ -1957,32 +1932,14 @@ pg_stop_backup(InstanceState *instanceState, pgBackup *backup, PGconn *pg_startb } } - /* - * Wait for stop_lsn to be archived or streamed. - * If replica returned valid STOP_LSN of not actually existing record, - * look for previous record with endpoint >= STOP_LSN. - */ - if (!stop_lsn_exists) - stop_backup_lsn = wait_wal_lsn(instanceState, stop_backup_result.lsn, false, backup->tli, - false, false, ERROR, stream_wal, backup); - - if (stream_wal) + if (backup->stream) { /* This function will also add list of xlog files * to the passed filelist */ if(wait_WAL_streaming_end(backup_files_list)) elog(ERROR, "WAL streaming failed"); - - snprintf(stream_xlog_path, lengthof(stream_xlog_path), "%s/%s/%s/%s", - instanceState->instance_backup_subdir_path, base36enc(backup->start_time), - DATABASE_DIR, PG_XLOG_DIR); - - xlog_path = stream_xlog_path; } - else - xlog_path = instanceState->instance_wal_subdir_path; - backup->stop_lsn = stop_backup_lsn; backup->recovery_xid = stop_backup_result.snapshot_xid; elog(LOG, "Getting the Recovery Time from WAL"); diff --git a/src/checkdb.c b/src/checkdb.c index 4ea1d0800..5d7d6652b 100644 --- a/src/checkdb.c +++ b/src/checkdb.c @@ -455,7 +455,6 @@ get_index_list(const char *dbname, bool first_db_with_amcheck, ind->heapallindexed_is_supported = heapallindexed_is_supported; ind->amcheck_nspname = pgut_malloc(strlen(amcheck_nspname) + 1); strcpy(ind->amcheck_nspname, amcheck_nspname); - pg_atomic_clear_flag(&ind->lock); if (index_list == NULL) index_list = parray_new(); @@ -463,6 +462,8 @@ get_index_list(const char *dbname, bool first_db_with_amcheck, parray_append(index_list, ind); } + pfilearray_clear_locks(index_list); + PQclear(res); return index_list; diff --git a/src/dir.c b/src/dir.c index 86848d8d5..dfcddd7d0 100644 --- a/src/dir.c +++ b/src/dir.c @@ -222,6 +222,8 @@ pgFileInit(const char *rel_path) /* Number of blocks backed up during backup */ file->n_headers = 0; + // May be add? + // pg_atomic_clear_flag(file->lock); return file; } @@ -1859,3 +1861,17 @@ cleanup_tablespace(const char *path) parray_walk(files, pgFileFree); parray_free(files); } + +/* + * Clear the synchronisation locks in a parray of (pgFile *)'s + */ +void +pfilearray_clear_locks(parray *file_list) +{ + int i; + for (i = 0; i < parray_num(file_list); i++) + { + pgFile *file = (pgFile *) parray_get(file_list, i); + pg_atomic_clear_flag(&file->lock); + } +} diff --git a/src/pg_probackup.c b/src/pg_probackup.c index 1b2e7f751..3150900b6 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -68,8 +68,6 @@ static char *backup_path = NULL; static CatalogState *catalogState = NULL; /* ================ catalogState (END) =========== */ -/* colon separated external directories list ("/path1:/path2") */ -char *externaldir = NULL; /* common options */ int num_threads = 1; bool stream_wal = false; diff --git a/src/pg_probackup.h b/src/pg_probackup.h index d02bbb033..a7979ed27 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -600,7 +600,6 @@ typedef struct int ret; } backup_files_arg; - typedef struct timelineInfo timelineInfo; /* struct to collect info about timelines in WAL archive */ @@ -679,10 +678,11 @@ typedef struct BackupPageHeader2 uint16 checksum; } BackupPageHeader2; -typedef struct StopBackupCallbackState { +typedef struct StopBackupCallbackParams +{ PGconn *conn; int server_version; -} StopBackupCallbackState; +} StopBackupCallbackParams; /* Special value for compressed_size field */ #define PageIsOk 0 @@ -1061,6 +1061,7 @@ extern int pgFileCompareRelPathWithExternalDesc(const void *f1, const void *f2); extern int pgFileCompareLinked(const void *f1, const void *f2); extern int pgFileCompareSize(const void *f1, const void *f2); extern int pgCompareOid(const void *f1, const void *f2); +extern void pfilearray_clear_locks(parray *file_list); /* in data.c */ extern bool check_data_file(ConnectionArgs *arguments, pgFile *file, @@ -1259,4 +1260,42 @@ extern void start_WAL_streaming(PGconn *backup_conn, char *stream_dst_path, ConnectionOptions *conn_opt, XLogRecPtr startpos, TimeLineID starttli); extern int wait_WAL_streaming_end(parray *backup_files_list); + +/* external variables and functions, implemented in backup.c */ +typedef struct PGStopBackupResult +{ + /* + * We will use values of snapshot_xid and invocation_time if there are + * no transactions between start_lsn and stop_lsn. + */ + TransactionId snapshot_xid; + time_t invocation_time; + /* + * Fields that store pg_catalog.pg_stop_backup() result + */ + XLogRecPtr lsn; + size_t backup_label_content_len; + char *backup_label_content; + size_t tablespace_map_content_len; + char *tablespace_map_content; +} PGStopBackupResult; + +extern bool backup_in_progress; +extern parray *backup_files_list; + +extern void pg_start_backup(const char *label, bool smooth, pgBackup *backup, + PGNodeInfo *nodeInfo, PGconn *conn); +extern void pg_silent_client_messages(PGconn *conn); +extern void pg_create_restore_point(PGconn *conn, time_t backup_start_time); +extern void pg_stop_backup_send(PGconn *conn, int server_version, bool is_started_on_replica, bool is_exclusive, char **query_text); +extern void pg_stop_backup_consume(PGconn *conn, int server_version, + bool is_exclusive, uint32 timeout, const char *query_text, + PGStopBackupResult *result); +extern void pg_stop_backup_write_file_helper(const char *path, const char *filename, const char *error_msg_filename, + const void *data, size_t len, parray *file_list); +extern XLogRecPtr wait_wal_lsn(const char *wal_segment_dir, XLogRecPtr lsn, bool is_start_lsn, TimeLineID tli, + bool in_prev_segment, bool segment_only, + int timeout_elevel, bool in_stream_dir); +extern void wait_wal_and_calculate_stop_lsn(const char *xlog_path, XLogRecPtr stop_lsn, pgBackup *backup); + #endif /* PG_PROBACKUP_H */ diff --git a/src/restore.c b/src/restore.c index 6aa4c5345..81ee9a6f7 100644 --- a/src/restore.c +++ b/src/restore.c @@ -824,7 +824,7 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain, } /* - * Setup directory structure for external directories and file locks + * Setup directory structure for external directories */ for (i = 0; i < parray_num(dest_files); i++) { @@ -848,11 +848,11 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain, elog(VERBOSE, "Create external directory \"%s\"", dirpath); fio_mkdir(dirpath, file->mode, FIO_DB_HOST); } - - /* setup threads */ - pg_atomic_clear_flag(&file->lock); } + /* setup threads */ + pfilearray_clear_locks(dest_files); + /* Get list of files in destination directory and remove redundant files */ if (params->incremental_mode != INCR_NONE || cleanup_pgdata) { diff --git a/src/utils/parray.c b/src/utils/parray.c index 31148ee9a..95b83365d 100644 --- a/src/utils/parray.c +++ b/src/utils/parray.c @@ -175,7 +175,7 @@ parray_rm(parray *array, const void *key, int(*compare)(const void *, const void size_t parray_num(const parray *array) { - return array->used; + return array!= NULL ? array->used : (size_t) 0; } void diff --git a/src/validate.c b/src/validate.c index f000698d0..4044ac158 100644 --- a/src/validate.c +++ b/src/validate.c @@ -130,11 +130,7 @@ pgBackupValidate(pgBackup *backup, pgRestoreParams *params) // params->partial_restore_type); /* setup threads */ - for (i = 0; i < parray_num(files); i++) - { - pgFile *file = (pgFile *) parray_get(files, i); - pg_atomic_clear_flag(&file->lock); - } + pfilearray_clear_locks(files); /* init thread args with own file lists */ threads = (pthread_t *) palloc(sizeof(pthread_t) * num_threads); From 908a5ad65a61ec874b9a425d81308123b497db1f Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Thu, 3 Jun 2021 18:28:22 +0300 Subject: [PATCH 153/525] introduce ptrack_parse_version_string() and new ptrack version numbering schema --- src/backup.c | 10 +++++----- src/data.c | 4 ++-- src/ptrack.c | 56 +++++++++++++++++++++++++++++++--------------------- 3 files changed, 40 insertions(+), 30 deletions(-) diff --git a/src/backup.c b/src/backup.c index 3815900b9..6f1aa867a 100644 --- a/src/backup.c +++ b/src/backup.c @@ -219,7 +219,7 @@ do_backup_instance(PGconn *backup_conn, PGNodeInfo *nodeInfo, bool no_sync, bool { XLogRecPtr ptrack_lsn = get_last_ptrack_lsn(backup_conn, nodeInfo); - if (nodeInfo->ptrack_version_num < 20) + if (nodeInfo->ptrack_version_num < 200) { // backward compatibility kludge: use Stop LSN for ptrack 1.x, if (ptrack_lsn > prev_backup->stop_lsn || ptrack_lsn == InvalidXLogRecPtr) @@ -408,14 +408,14 @@ do_backup_instance(PGconn *backup_conn, PGNodeInfo *nodeInfo, bool no_sync, bool /* * Build the page map from ptrack information. */ - if (nodeInfo->ptrack_version_num >= 20) + if (nodeInfo->ptrack_version_num >= 200) make_pagemap_from_ptrack_2(backup_files_list, backup_conn, nodeInfo->ptrack_schema, nodeInfo->ptrack_version_num, prev_backup_start_lsn); - else if (nodeInfo->ptrack_version_num == 15 || - nodeInfo->ptrack_version_num == 16 || - nodeInfo->ptrack_version_num == 17) + else if (nodeInfo->ptrack_version_num == 105 || + nodeInfo->ptrack_version_num == 106 || + nodeInfo->ptrack_version_num == 107) make_pagemap_from_ptrack_1(backup_files_list, backup_conn); } diff --git a/src/data.c b/src/data.c index d70aae8fd..280ede5c8 100644 --- a/src/data.c +++ b/src/data.c @@ -300,7 +300,7 @@ prepare_page(ConnectionArgs *conn_arg, * Under high write load it's possible that we've read partly * flushed page, so try several times before throwing an error. */ - if (backup_mode != BACKUP_MODE_DIFF_PTRACK || ptrack_version_num >= 20) + if (backup_mode != BACKUP_MODE_DIFF_PTRACK || ptrack_version_num >= 200) { int rc = 0; while (!page_is_valid && try_again--) @@ -400,7 +400,7 @@ prepare_page(ConnectionArgs *conn_arg, * We do this only in the cases of PTRACK 1.x versions backup */ if (backup_mode == BACKUP_MODE_DIFF_PTRACK - && (ptrack_version_num >= 15 && ptrack_version_num < 20)) + && (ptrack_version_num >= 105 && ptrack_version_num < 200)) { int rc = 0; size_t page_size = 0; diff --git a/src/ptrack.c b/src/ptrack.c index dc1a2c74c..5a2b9f046 100644 --- a/src/ptrack.c +++ b/src/ptrack.c @@ -139,6 +139,23 @@ make_pagemap_from_ptrack_1(parray *files, PGconn *backup_conn) } } +/* + * Parse a string like "2.1" into int + * result: int by formula major_number * 100 + minor_number + * or -1 if string cannot be parsed + */ +static int +ptrack_parse_version_string(const char *version_str) +{ + int ma, mi; + int sscanf_readed_count; + if (sscanf(version_str, "%u.%2u%n", &ma, &mi, &sscanf_readed_count) != 2) + return -1; + if (sscanf_readed_count != strlen(version_str)) + return -1; + return ma * 100 + mi; +} + /* Check if the instance supports compatible version of ptrack, * fill-in version number if it does. * Also for ptrack 2.x save schema namespace. @@ -148,6 +165,7 @@ get_ptrack_version(PGconn *backup_conn, PGNodeInfo *nodeInfo) { PGresult *res_db; char *ptrack_version_str; + int ptrack_version_num; res_db = pgut_execute(backup_conn, "SELECT extnamespace::regnamespace, extversion " @@ -191,24 +209,16 @@ get_ptrack_version(PGconn *backup_conn, PGNodeInfo *nodeInfo) ptrack_version_str = PQgetvalue(res_db, 0, 0); } - if (strcmp(ptrack_version_str, "1.5") == 0) - nodeInfo->ptrack_version_num = 15; - else if (strcmp(ptrack_version_str, "1.6") == 0) - nodeInfo->ptrack_version_num = 16; - else if (strcmp(ptrack_version_str, "1.7") == 0) - nodeInfo->ptrack_version_num = 17; - else if (strcmp(ptrack_version_str, "2.0") == 0) - nodeInfo->ptrack_version_num = 20; - else if (strcmp(ptrack_version_str, "2.1") == 0) - nodeInfo->ptrack_version_num = 21; - else if (strcmp(ptrack_version_str, "2.2") == 0) - nodeInfo->ptrack_version_num = 22; - else - elog(WARNING, "Update your ptrack to the version 2.1 or upper. Current version is %s", + ptrack_version_num = ptrack_parse_version_string(ptrack_version_str); + if (ptrack_version_num == -1) + /* leave default nodeInfo->ptrack_version_num = 0 from pgNodeInit() */ + elog(WARNING, "Cannot parse ptrack version string \"%s\"", ptrack_version_str); + else + nodeInfo->ptrack_version_num = ptrack_version_num; /* ptrack 1.X is buggy, so fall back to DELTA backup strategy for safety */ - if (nodeInfo->ptrack_version_num >= 15 && nodeInfo->ptrack_version_num < 20) + if (nodeInfo->ptrack_version_num >= 105 && nodeInfo->ptrack_version_num < 200) { if (current.backup_mode == BACKUP_MODE_DIFF_PTRACK) { @@ -231,12 +241,12 @@ pg_ptrack_enable(PGconn *backup_conn, int ptrack_version_num) PGresult *res_db; bool result = false; - if (ptrack_version_num < 20) + if (ptrack_version_num < 200) { res_db = pgut_execute(backup_conn, "SHOW ptrack_enable", 0, NULL); result = strcmp(PQgetvalue(res_db, 0, 0), "on") == 0; } - else if (ptrack_version_num == 20) + else if (ptrack_version_num == 200) { res_db = pgut_execute(backup_conn, "SHOW ptrack_map_size", 0, NULL); result = strcmp(PQgetvalue(res_db, 0, 0), "0") != 0; @@ -270,7 +280,7 @@ pg_ptrack_clear(PGconn *backup_conn, int ptrack_version_num) char *params[2]; // FIXME Perform this check on caller's side - if (ptrack_version_num >= 20) + if (ptrack_version_num >= 200) return; params[0] = palloc(64); @@ -472,14 +482,14 @@ get_last_ptrack_lsn(PGconn *backup_conn, PGNodeInfo *nodeInfo) uint32 lsn_lo; XLogRecPtr lsn; - if (nodeInfo->ptrack_version_num < 20) + if (nodeInfo->ptrack_version_num < 200) res = pgut_execute(backup_conn, "SELECT pg_catalog.pg_ptrack_control_lsn()", 0, NULL); else { char query[128]; - if (nodeInfo->ptrack_version_num == 20) + if (nodeInfo->ptrack_version_num == 200) sprintf(query, "SELECT %s.pg_ptrack_control_lsn()", nodeInfo->ptrack_schema); else sprintf(query, "SELECT %s.ptrack_init_lsn()", nodeInfo->ptrack_schema); @@ -537,7 +547,7 @@ pg_ptrack_get_block(ConnectionArgs *arguments, // elog(LOG, "db %i pg_ptrack_get_block(%i, %i, %u)",dbOid, tblsOid, relOid, blknum); - if (ptrack_version_num < 20) + if (ptrack_version_num < 200) res = pgut_execute_parallel(arguments->conn, arguments->cancel_conn, "SELECT pg_catalog.pg_ptrack_get_block_2($1, $2, $3, $4)", @@ -550,7 +560,7 @@ pg_ptrack_get_block(ConnectionArgs *arguments, if (!ptrack_schema) elog(ERROR, "Schema name of ptrack extension is missing"); - if (ptrack_version_num == 20) + if (ptrack_version_num == 200) sprintf(query, "SELECT %s.pg_ptrack_get_block($1, $2, $3, $4)", ptrack_schema); else elog(ERROR, "ptrack >= 2.1.0 does not support pg_ptrack_get_block()"); @@ -614,7 +624,7 @@ pg_ptrack_get_pagemapset(PGconn *backup_conn, const char *ptrack_schema, if (!ptrack_schema) elog(ERROR, "Schema name of ptrack extension is missing"); - if (ptrack_version_num == 20) + if (ptrack_version_num == 200) sprintf(query, "SELECT path, pagemap FROM %s.pg_ptrack_get_pagemapset($1) ORDER BY 1", ptrack_schema); else From 0dcfb06ec7e8f397b32f65de8562c82aec458f60 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Mon, 7 Jun 2021 15:48:53 +0300 Subject: [PATCH 154/525] s:snprintf(..., MAXPGPATH, "%s/%s", ...):join_path_components(...):g --- src/catalog.c | 4 ++-- src/parsexlog.c | 2 +- src/restore.c | 13 ++++++------- src/util.c | 4 ++-- src/utils/pgut.c | 2 +- 5 files changed, 12 insertions(+), 13 deletions(-) diff --git a/src/catalog.c b/src/catalog.c index 981841747..3ea4d9bca 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -829,7 +829,7 @@ IsDir(const char *dirpath, const char *entry, fio_location location) char path[MAXPGPATH]; struct stat st; - snprintf(path, MAXPGPATH, "%s/%s", dirpath, entry); + join_path_components(path, dirpath, entry); return fio_stat(path, &st, false, location) == 0 && S_ISDIR(st.st_mode); } @@ -941,7 +941,7 @@ catalog_get_backup_list(const char *instance_name, time_t requested_backup_id) join_path_components(data_path, backup_instance_path, data_ent->d_name); /* read backup information from BACKUP_CONTROL_FILE */ - snprintf(backup_conf_path, MAXPGPATH, "%s/%s", data_path, BACKUP_CONTROL_FILE); + join_path_components(backup_conf_path, data_path, BACKUP_CONTROL_FILE); backup = readBackupControlFile(backup_conf_path); if (!backup) diff --git a/src/parsexlog.c b/src/parsexlog.c index 4a0f38642..8dfb2c78c 100644 --- a/src/parsexlog.c +++ b/src/parsexlog.c @@ -1017,7 +1017,7 @@ SimpleXLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr, GetXLogFileName(xlogfname, reader_data->tli, reader_data->xlogsegno, wal_seg_size); - snprintf(reader_data->xlogpath, MAXPGPATH, "%s/%s", wal_archivedir, xlogfname); + join_path_components(reader_data->xlogpath, wal_archivedir, xlogfname); snprintf(reader_data->gz_xlogpath, MAXPGPATH, "%s.gz", reader_data->xlogpath); /* We fall back to using .partial segment in case if we are running diff --git a/src/restore.c b/src/restore.c index 86317596e..9c0b059e9 100644 --- a/src/restore.c +++ b/src/restore.c @@ -1480,7 +1480,7 @@ update_recovery_options_before_v12(pgBackup *backup, } elog(LOG, "update recovery settings in recovery.conf"); - snprintf(path, lengthof(path), "%s/recovery.conf", instance_config.pgdata); + join_path_components(path, instance_config.pgdata, "recovery.conf"); fp = fio_fopen(path, "w", FIO_DB_HOST); if (fp == NULL) @@ -1537,8 +1537,7 @@ update_recovery_options(pgBackup *backup, time2iso(current_time_str, lengthof(current_time_str), current_time, false); - snprintf(postgres_auto_path, lengthof(postgres_auto_path), - "%s/postgresql.auto.conf", instance_config.pgdata); + join_path_components(postgres_auto_path, instance_config.pgdata, "postgresql.auto.conf"); if (fio_stat(postgres_auto_path, &st, false, FIO_DB_HOST) < 0) { @@ -1648,7 +1647,7 @@ update_recovery_options(pgBackup *backup, if (params->recovery_settings_mode == PITR_REQUESTED) { elog(LOG, "creating recovery.signal file"); - snprintf(path, lengthof(path), "%s/recovery.signal", instance_config.pgdata); + join_path_components(path, instance_config.pgdata, "recovery.signal"); fp = fio_fopen(path, PG_BINARY_W, FIO_DB_HOST); if (fp == NULL) @@ -1664,7 +1663,7 @@ update_recovery_options(pgBackup *backup, if (params->restore_as_replica) { elog(LOG, "creating standby.signal file"); - snprintf(path, lengthof(path), "%s/standby.signal", instance_config.pgdata); + join_path_components(path, instance_config.pgdata, "standby.signal"); fp = fio_fopen(path, PG_BINARY_W, FIO_DB_HOST); if (fp == NULL) @@ -2160,7 +2159,7 @@ check_incremental_compatibility(const char *pgdata, uint64 system_identifier, { char pid_file[MAXPGPATH]; - snprintf(pid_file, MAXPGPATH, "%s/postmaster.pid", pgdata); + join_path_components(pid_file, pgdata, "postmaster.pid"); elog(WARNING, "Pid file \"%s\" is mangled, cannot determine whether postmaster is running or not", pid_file); success = false; @@ -2201,7 +2200,7 @@ check_incremental_compatibility(const char *pgdata, uint64 system_identifier, */ if (incremental_mode == INCR_LSN) { - snprintf(backup_label, MAXPGPATH, "%s/backup_label", pgdata); + join_path_components(backup_label, pgdata, "backup_label"); if (fio_access(backup_label, F_OK, FIO_DB_HOST) == 0) { elog(WARNING, "Destination directory contains \"backup_control\" file. " diff --git a/src/util.c b/src/util.c index 87ec36713..8fcec6189 100644 --- a/src/util.c +++ b/src/util.c @@ -418,7 +418,7 @@ set_min_recovery_point(pgFile *file, const char *backup_path, FIN_CRC32C(ControlFile.crc); /* overwrite pg_control */ - snprintf(fullpath, sizeof(fullpath), "%s/%s", backup_path, XLOG_CONTROL_FILE); + join_path_components(fullpath, backup_path, XLOG_CONTROL_FILE); writeControlFile(&ControlFile, fullpath, FIO_LOCAL_HOST); /* Update pg_control checksum in backup_list */ @@ -569,7 +569,7 @@ check_postmaster(const char *pgdata) pid_t pid; char pid_file[MAXPGPATH]; - snprintf(pid_file, MAXPGPATH, "%s/postmaster.pid", pgdata); + join_path_components(pid_file, pgdata, "postmaster.pid"); fp = fopen(pid_file, "r"); if (fp == NULL) diff --git a/src/utils/pgut.c b/src/utils/pgut.c index eba31faa6..e1e52b24b 100644 --- a/src/utils/pgut.c +++ b/src/utils/pgut.c @@ -1184,7 +1184,7 @@ pgut_rmtree(const char *path, bool rmtopdir, bool strict) /* now we have the names we can start removing things */ for (filename = filenames; *filename; filename++) { - snprintf(pathbuf, MAXPGPATH, "%s/%s", path, *filename); + join_path_components(pathbuf, path, *filename); if (lstat(pathbuf, &statbuf) != 0) { From 477e5bcb4fcb0ea74cfb49d865e21e4c7bff5f0a Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" <16117281+kulaginm@users.noreply.github.com> Date: Sat, 12 Jun 2021 20:21:14 +0300 Subject: [PATCH 155/525] Some minor fixes (#397) * Reformat fio_*() definitions for easier grep'ping * typo * move check_postmaster() into src/utils/file.c (from src/util.c), rename it to local_check_postmaster() and make it static/private to src/utils/file.c --- src/data.c | 2 +- src/pg_probackup.h | 2 - src/util.c | 48 --------- src/utils/file.c | 238 ++++++++++++++++++++++++++++++++------------- src/utils/file.h | 1 - 5 files changed, 174 insertions(+), 117 deletions(-) diff --git a/src/data.c b/src/data.c index 280ede5c8..60986fd5c 100644 --- a/src/data.c +++ b/src/data.c @@ -1516,7 +1516,7 @@ validate_one_page(Page page, BlockNumber absolute_blkno, } /* - * Valiate pages of datafile in PGDATA one by one. + * Validate pages of datafile in PGDATA one by one. * * returns true if the file is valid * also returns true if the file was not found diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 746b0f5a5..fca08bdac 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -1071,8 +1071,6 @@ extern PageState *get_checksum_map(const char *fullpath, uint32 checksum_version int n_blocks, XLogRecPtr dest_stop_lsn, BlockNumber segmentno); extern datapagemap_t *get_lsn_map(const char *fullpath, uint32 checksum_version, int n_blocks, XLogRecPtr shift_lsn, BlockNumber segmentno); -extern pid_t check_postmaster(const char *pgdata); - extern bool validate_file_pages(pgFile *file, const char *fullpath, XLogRecPtr stop_lsn, uint32 checksum_version, uint32 backup_version, HeaderMap *hdr_map); diff --git a/src/util.c b/src/util.c index 8fcec6189..9fd0114bb 100644 --- a/src/util.c +++ b/src/util.c @@ -556,51 +556,3 @@ datapagemap_print_debug(datapagemap_t *map) pg_free(iter); } - -/* - * Return pid of postmaster process running in given pgdata. - * Return 0 if there is none. - * Return 1 if postmaster.pid is mangled. - */ -pid_t -check_postmaster(const char *pgdata) -{ - FILE *fp; - pid_t pid; - char pid_file[MAXPGPATH]; - - join_path_components(pid_file, pgdata, "postmaster.pid"); - - fp = fopen(pid_file, "r"); - if (fp == NULL) - { - /* No pid file, acceptable*/ - if (errno == ENOENT) - return 0; - else - elog(ERROR, "Cannot open file \"%s\": %s", - pid_file, strerror(errno)); - } - - if (fscanf(fp, "%i", &pid) != 1) - { - /* something is wrong with the file content */ - pid = 1; - } - - if (pid > 1) - { - if (kill(pid, 0) != 0) - { - /* process no longer exists */ - if (errno == ESRCH) - pid = 0; - else - elog(ERROR, "Failed to send signal 0 to a process %d: %s", - pid, strerror(errno)); - } - } - - fclose(fp); - return pid; -} diff --git a/src/utils/file.c b/src/utils/file.c index 15a7085ec..6dcf2288e 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -30,7 +30,6 @@ typedef struct int path_len; } fio_send_request; - typedef struct { char path[MAXPGPATH]; @@ -85,14 +84,16 @@ typedef struct #endif /* Use specified file descriptors as stdin/stdout for FIO functions */ -void fio_redirect(int in, int out, int err) +void +fio_redirect(int in, int out, int err) { fio_stdin = in; fio_stdout = out; fio_stderr = err; } -void fio_error(int rc, int size, char const* file, int line) +void +fio_error(int rc, int size, char const* file, int line) { if (remote_agent) { @@ -115,7 +116,8 @@ void fio_error(int rc, int size, char const* file, int line) } /* Check if file descriptor is local or remote (created by FIO) */ -static bool fio_is_remote_fd(int fd) +static bool +fio_is_remote_fd(int fd) { return (fd & FIO_PIPE_MARKER) != 0; } @@ -157,14 +159,17 @@ fio_safestat(const char *path, struct stat *buf) #define stat(x, y) fio_safestat(x, y) /* TODO: use real pread on Linux */ -static ssize_t pread(int fd, void* buf, size_t size, off_t off) +static ssize_t +pread(int fd, void* buf, size_t size, off_t off) { off_t rc = lseek(fd, off, SEEK_SET); if (rc != off) return -1; return read(fd, buf, size); } -static int remove_file_or_dir(char const* path) + +static int +remove_file_or_dir(char const* path) { int rc = remove(path); #ifdef WIN32 @@ -178,7 +183,8 @@ static int remove_file_or_dir(char const* path) #endif /* Check if specified location is local for current node */ -bool fio_is_remote(fio_location location) +bool +fio_is_remote(fio_location location) { bool is_remote = MyLocation != FIO_LOCAL_HOST && location != FIO_LOCAL_HOST @@ -189,7 +195,8 @@ bool fio_is_remote(fio_location location) } /* Check if specified location is local for current node */ -bool fio_is_remote_simple(fio_location location) +bool +fio_is_remote_simple(fio_location location) { bool is_remote = MyLocation != FIO_LOCAL_HOST && location != FIO_LOCAL_HOST @@ -198,7 +205,8 @@ bool fio_is_remote_simple(fio_location location) } /* Try to read specified amount of bytes unless error or EOF are encountered */ -static ssize_t fio_read_all(int fd, void* buf, size_t size) +static ssize_t +fio_read_all(int fd, void* buf, size_t size) { size_t offs = 0; while (offs < size) @@ -220,7 +228,8 @@ static ssize_t fio_read_all(int fd, void* buf, size_t size) } /* Try to write specified amount of bytes unless error is encountered */ -static ssize_t fio_write_all(int fd, void const* buf, size_t size) +static ssize_t +fio_write_all(int fd, void const* buf, size_t size) { size_t offs = 0; while (offs < size) @@ -241,7 +250,8 @@ static ssize_t fio_write_all(int fd, void const* buf, size_t size) } /* Get version of remote agent */ -int fio_get_agent_version(void) +int +fio_get_agent_version(void) { fio_header hdr; hdr.cop = FIO_AGENT_VERSION; @@ -254,7 +264,8 @@ int fio_get_agent_version(void) } /* Open input stream. Remote file is fetched to the in-memory buffer and then accessed through Linux fmemopen */ -FILE* fio_open_stream(char const* path, fio_location location) +FILE* +fio_open_stream(char const* path, fio_location location) { FILE* f; if (fio_is_remote(location)) @@ -294,7 +305,8 @@ FILE* fio_open_stream(char const* path, fio_location location) } /* Close input stream */ -int fio_close_stream(FILE* f) +int +fio_close_stream(FILE* f) { if (fio_stdin_buffer) { @@ -305,7 +317,8 @@ int fio_close_stream(FILE* f) } /* Open directory */ -DIR* fio_opendir(char const* path, fio_location location) +DIR* +fio_opendir(char const* path, fio_location location) { DIR* dir; if (fio_is_remote(location)) @@ -346,7 +359,8 @@ DIR* fio_opendir(char const* path, fio_location location) } /* Get next directory entry */ -struct dirent* fio_readdir(DIR *dir) +struct dirent* +fio_readdir(DIR *dir) { if (fio_is_remote_file((FILE*)dir)) { @@ -374,7 +388,8 @@ struct dirent* fio_readdir(DIR *dir) } /* Close directory */ -int fio_closedir(DIR *dir) +int +fio_closedir(DIR *dir) { if (fio_is_remote_file((FILE*)dir)) { @@ -394,7 +409,8 @@ int fio_closedir(DIR *dir) } /* Open file */ -int fio_open(char const* path, int mode, fio_location location) +int +fio_open(char const* path, int mode, fio_location location) { int fd; if (fio_is_remote(location)) @@ -461,7 +477,8 @@ fio_disconnect(void) } /* Open stdio file */ -FILE* fio_fopen(char const* path, char const* mode, fio_location location) +FILE* +fio_fopen(char const* path, char const* mode, fio_location location) { FILE *f = NULL; @@ -506,7 +523,8 @@ FILE* fio_fopen(char const* path, char const* mode, fio_location location) } /* Format output to file stream */ -int fio_fprintf(FILE* f, char const* format, ...) +int +fio_fprintf(FILE* f, char const* format, ...) { int rc; va_list args; @@ -532,7 +550,8 @@ int fio_fprintf(FILE* f, char const* format, ...) } /* Flush stream data (does nothing for remote file) */ -int fio_fflush(FILE* f) +int +fio_fflush(FILE* f) { int rc = 0; if (!fio_is_remote_file(f)) @@ -541,13 +560,15 @@ int fio_fflush(FILE* f) } /* Sync file to the disk (does nothing for remote file) */ -int fio_flush(int fd) +int +fio_flush(int fd) { return fio_is_remote_fd(fd) ? 0 : fsync(fd); } /* Close output stream */ -int fio_fclose(FILE* f) +int +fio_fclose(FILE* f) { return fio_is_remote_file(f) ? fio_close(fio_fileno(f)) @@ -555,7 +576,8 @@ int fio_fclose(FILE* f) } /* Close file */ -int fio_close(int fd) +int +fio_close(int fd) { if (fio_is_remote_fd(fd)) { @@ -578,7 +600,8 @@ int fio_close(int fd) } /* Truncate stdio file */ -int fio_ftruncate(FILE* f, off_t size) +int +fio_ftruncate(FILE* f, off_t size) { return fio_is_remote_file(f) ? fio_truncate(fio_fileno(f), size) @@ -588,7 +611,8 @@ int fio_ftruncate(FILE* f, off_t size) /* Truncate file * TODO: make it synchronous */ -int fio_truncate(int fd, off_t size) +int +fio_truncate(int fd, off_t size) { if (fio_is_remote_fd(fd)) { @@ -613,7 +637,8 @@ int fio_truncate(int fd, off_t size) /* * Read file from specified location. */ -int fio_pread(FILE* f, void* buf, off_t offs) +int +fio_pread(FILE* f, void* buf, off_t offs) { if (fio_is_remote_file(f)) { @@ -649,7 +674,8 @@ int fio_pread(FILE* f, void* buf, off_t offs) } /* Set position in stdio file */ -int fio_fseek(FILE* f, off_t offs) +int +fio_fseek(FILE* f, off_t offs) { return fio_is_remote_file(f) ? fio_seek(fio_fileno(f), offs) @@ -658,7 +684,8 @@ int fio_fseek(FILE* f, off_t offs) /* Set position in file */ /* TODO: make it synchronous or check async error */ -int fio_seek(int fd, off_t offs) +int +fio_seek(int fd, off_t offs) { if (fio_is_remote_fd(fd)) { @@ -699,7 +726,8 @@ fio_seek_impl(int fd, off_t offs) } /* Write data to stdio file */ -size_t fio_fwrite(FILE* f, void const* buf, size_t size) +size_t +fio_fwrite(FILE* f, void const* buf, size_t size) { if (fio_is_remote_file(f)) return fio_write(fio_fileno(f), buf, size); @@ -708,7 +736,8 @@ size_t fio_fwrite(FILE* f, void const* buf, size_t size) } /* Write data to the file synchronously */ -ssize_t fio_write(int fd, void const* buf, size_t size) +ssize_t +fio_write(int fd, void const* buf, size_t size) { if (fio_is_remote_fd(fd)) { @@ -759,7 +788,8 @@ fio_write_impl(int fd, void const* buf, size_t size, int out) return; } -size_t fio_fwrite_async(FILE* f, void const* buf, size_t size) +size_t +fio_fwrite_async(FILE* f, void const* buf, size_t size) { return fio_is_remote_file(f) ? fio_write_async(fio_fileno(f), buf, size) @@ -768,7 +798,8 @@ size_t fio_fwrite_async(FILE* f, void const* buf, size_t size) /* Write data to the file */ /* TODO: support async report error */ -ssize_t fio_write_async(int fd, void const* buf, size_t size) +ssize_t +fio_write_async(int fd, void const* buf, size_t size) { if (size == 0) return 0; @@ -836,7 +867,8 @@ fio_decompress(void* dst, void const* src, size_t size, int compress_alg, char * } /* Write data to the file */ -ssize_t fio_fwrite_async_compressed(FILE* f, void const* buf, size_t size, int compress_alg) +ssize_t +fio_fwrite_async_compressed(FILE* f, void const* buf, size_t size, int compress_alg) { if (fio_is_remote_file(f)) { @@ -976,7 +1008,8 @@ fio_get_async_error_impl(int out) } /* Read data from stdio file */ -ssize_t fio_fread(FILE* f, void* buf, size_t size) +ssize_t +fio_fread(FILE* f, void* buf, size_t size) { size_t rc; if (fio_is_remote_file(f)) @@ -986,7 +1019,8 @@ ssize_t fio_fread(FILE* f, void* buf, size_t size) } /* Read data from file */ -ssize_t fio_read(int fd, void* buf, size_t size) +ssize_t +fio_read(int fd, void* buf, size_t size) { if (fio_is_remote_fd(fd)) { @@ -1012,7 +1046,8 @@ ssize_t fio_read(int fd, void* buf, size_t size) } /* Get information about file */ -int fio_stat(char const* path, struct stat* st, bool follow_symlink, fio_location location) +int +fio_stat(char const* path, struct stat* st, bool follow_symlink, fio_location location) { if (fio_is_remote(location)) { @@ -1045,7 +1080,8 @@ int fio_stat(char const* path, struct stat* st, bool follow_symlink, fio_locatio } /* Check presence of the file */ -int fio_access(char const* path, int mode, fio_location location) +int +fio_access(char const* path, int mode, fio_location location) { if (fio_is_remote(location)) { @@ -1076,7 +1112,8 @@ int fio_access(char const* path, int mode, fio_location location) } /* Create symbolic link */ -int fio_symlink(char const* target, char const* link_path, bool overwrite, fio_location location) +int +fio_symlink(char const* target, char const* link_path, bool overwrite, fio_location location) { if (fio_is_remote(location)) { @@ -1103,7 +1140,8 @@ int fio_symlink(char const* target, char const* link_path, bool overwrite, fio_l } } -static void fio_symlink_impl(int out, char *buf, bool overwrite) +static void +fio_symlink_impl(int out, char *buf, bool overwrite) { char *linked_path = buf; char *link_path = buf + strlen(buf) + 1; @@ -1117,7 +1155,8 @@ static void fio_symlink_impl(int out, char *buf, bool overwrite) } /* Rename file */ -int fio_rename(char const* old_path, char const* new_path, fio_location location) +int +fio_rename(char const* old_path, char const* new_path, fio_location location) { if (fio_is_remote(location)) { @@ -1143,7 +1182,8 @@ int fio_rename(char const* old_path, char const* new_path, fio_location location } /* Sync file to disk */ -int fio_sync(char const* path, fio_location location) +int +fio_sync(char const* path, fio_location location) { if (fio_is_remote(location)) { @@ -1185,7 +1225,8 @@ int fio_sync(char const* path, fio_location location) } /* Get crc32 of file */ -pg_crc32 fio_get_crc32(const char *file_path, fio_location location, bool decompress) +pg_crc32 +fio_get_crc32(const char *file_path, fio_location location, bool decompress) { if (fio_is_remote(location)) { @@ -1216,7 +1257,8 @@ pg_crc32 fio_get_crc32(const char *file_path, fio_location location, bool decomp } /* Remove file */ -int fio_unlink(char const* path, fio_location location) +int +fio_unlink(char const* path, fio_location location) { if (fio_is_remote(location)) { @@ -1241,7 +1283,8 @@ int fio_unlink(char const* path, fio_location location) /* Create directory * TODO: add strict flag */ -int fio_mkdir(char const* path, int mode, fio_location location) +int +fio_mkdir(char const* path, int mode, fio_location location) { if (fio_is_remote(location)) { @@ -1267,7 +1310,8 @@ int fio_mkdir(char const* path, int mode, fio_location location) } /* Change file mode */ -int fio_chmod(char const* path, int mode, fio_location location) +int +fio_chmod(char const* path, int mode, fio_location location) { if (fio_is_remote(location)) { @@ -1562,7 +1606,8 @@ fio_gzclose(gzFile f) } } -int fio_gzeof(gzFile f) +int +fio_gzeof(gzFile f) { if ((size_t)f & FIO_GZ_REMOTE_MARKER) { @@ -1575,7 +1620,8 @@ int fio_gzeof(gzFile f) } } -const char* fio_gzerror(gzFile f, int *errnum) +const char* +fio_gzerror(gzFile f, int *errnum) { if ((size_t)f & FIO_GZ_REMOTE_MARKER) { @@ -1590,7 +1636,8 @@ const char* fio_gzerror(gzFile f, int *errnum) } } -z_off_t fio_gzseek(gzFile f, z_off_t offset, int whence) +z_off_t +fio_gzseek(gzFile f, z_off_t offset, int whence) { Assert(!((size_t)f & FIO_GZ_REMOTE_MARKER)); return gzseek(f, offset, whence); @@ -1602,7 +1649,8 @@ z_off_t fio_gzseek(gzFile f, z_off_t offset, int whence) /* Send file content * Note: it should not be used for large files. */ -static void fio_load_file(int out, char const* path) +static void +fio_load_file(int out, char const* path) { int fd = open(path, O_RDONLY); fio_header hdr; @@ -1644,7 +1692,8 @@ static void fio_load_file(int out, char const* path) * In case of DELTA mode horizonLsn must be a valid lsn, * otherwise it should be set to InvalidXLogRecPtr. */ -int fio_send_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, +int +fio_send_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, XLogRecPtr horizonLsn, int calg, int clevel, uint32 checksum_version, bool use_pagemap, BlockNumber* err_blknum, char **errormsg, BackupPageHeader2 **headers) @@ -1804,7 +1853,8 @@ int fio_send_pages(const char *to_fullpath, const char *from_fullpath, pgFile *f * FIO_SEND_FILE_CORRUPTION * FIO_SEND_FILE_EOF */ -static void fio_send_pages_impl(int out, char* buf) +static void +fio_send_pages_impl(int out, char* buf) { FILE *in = NULL; BlockNumber blknum = 0; @@ -2074,7 +2124,8 @@ static void fio_send_pages_impl(int out, char* buf) * ZLIB_ERROR (-5) * REMOTE_ERROR (-6) */ -int fio_send_file_gz(const char *from_fullpath, const char *to_fullpath, FILE* out, char **errormsg) +int +fio_send_file_gz(const char *from_fullpath, const char *to_fullpath, FILE* out, char **errormsg) { fio_header hdr; int exit_code = SEND_OK; @@ -2234,7 +2285,8 @@ int fio_send_file_gz(const char *from_fullpath, const char *to_fullpath, FILE* o * OPEN_FAILED and READ_FAIL should also set errormsg. * If pgFile is not NULL then we must calculate crc and read_size for it. */ -int fio_send_file(const char *from_fullpath, const char *to_fullpath, FILE* out, +int +fio_send_file(const char *from_fullpath, const char *to_fullpath, FILE* out, pgFile *file, char **errormsg) { fio_header hdr; @@ -2315,7 +2367,8 @@ int fio_send_file(const char *from_fullpath, const char *to_fullpath, FILE* out, * FIO_SEND_FILE_EOF * */ -static void fio_send_file_impl(int out, char const* path) +static void +fio_send_file_impl(int out, char const* path) { FILE *fp; fio_header hdr; @@ -2406,7 +2459,8 @@ static void fio_send_file_impl(int out, char const* path) } /* Compile the array of files located on remote machine in directory root */ -static void fio_list_dir_internal(parray *files, const char *root, bool exclude, +static void +fio_list_dir_internal(parray *files, const char *root, bool exclude, bool follow_symlink, bool add_root, bool backup_logs, bool skip_hidden, int external_dir_num) { @@ -2499,7 +2553,8 @@ static void fio_list_dir_internal(parray *files, const char *root, bool exclude, * * TODO: replace FIO_SEND_FILE and FIO_SEND_FILE_EOF with dedicated messages */ -static void fio_list_dir_impl(int out, char* buf) +static void +fio_list_dir_impl(int out, char* buf) { int i; fio_header hdr; @@ -2565,7 +2620,8 @@ static void fio_list_dir_impl(int out, char* buf) } /* Wrapper for directory listing */ -void fio_list_dir(parray *files, const char *root, bool exclude, +void +fio_list_dir(parray *files, const char *root, bool exclude, bool follow_symlink, bool add_root, bool backup_logs, bool skip_hidden, int external_dir_num) { @@ -2620,7 +2676,8 @@ fio_get_checksum_map(const char *fullpath, uint32 checksum_version, int n_blocks } } -static void fio_get_checksum_map_impl(int out, char *buf) +static void +fio_get_checksum_map_impl(int out, char *buf) { fio_header hdr; PageState *checksum_map = NULL; @@ -2687,7 +2744,8 @@ fio_get_lsn_map(const char *fullpath, uint32 checksum_version, return lsn_map; } -static void fio_get_lsn_map_impl(int out, char *buf) +static void +fio_get_lsn_map_impl(int out, char *buf) { fio_header hdr; datapagemap_t *lsn_map = NULL; @@ -2713,11 +2771,60 @@ static void fio_get_lsn_map_impl(int out, char *buf) } } +/* + * Return pid of postmaster process running in given pgdata on local machine. + * Return 0 if there is none. + * Return 1 if postmaster.pid is mangled. + */ +static pid_t +local_check_postmaster(const char *pgdata) +{ + FILE *fp; + pid_t pid; + char pid_file[MAXPGPATH]; + + join_path_components(pid_file, pgdata, "postmaster.pid"); + + fp = fopen(pid_file, "r"); + if (fp == NULL) + { + /* No pid file, acceptable*/ + if (errno == ENOENT) + return 0; + else + elog(ERROR, "Cannot open file \"%s\": %s", + pid_file, strerror(errno)); + } + + if (fscanf(fp, "%i", &pid) != 1) + { + /* something is wrong with the file content */ + pid = 1; + } + + if (pid > 1) + { + if (kill(pid, 0) != 0) + { + /* process no longer exists */ + if (errno == ESRCH) + pid = 0; + else + elog(ERROR, "Failed to send signal 0 to a process %d: %s", + pid, strerror(errno)); + } + } + + fclose(fp); + return pid; +} + /* * Go to the remote host and get postmaster pid from file postmaster.pid * and check that process is running, if process is running, return its pid number. */ -pid_t fio_check_postmaster(const char *pgdata, fio_location location) +pid_t +fio_check_postmaster(const char *pgdata, fio_location location) { if (fio_is_remote(location)) { @@ -2734,16 +2841,17 @@ pid_t fio_check_postmaster(const char *pgdata, fio_location location) return hdr.arg; } else - return check_postmaster(pgdata); + return local_check_postmaster(pgdata); } -static void fio_check_postmaster_impl(int out, char *buf) +static void +fio_check_postmaster_impl(int out, char *buf) { fio_header hdr; pid_t postmaster_pid; char *pgdata = (char*) buf; - postmaster_pid = check_postmaster(pgdata); + postmaster_pid = local_check_postmaster(pgdata); /* send arrays of checksums to main process */ hdr.arg = postmaster_pid; @@ -2782,7 +2890,8 @@ fio_delete_impl(mode_t mode, char *buf) } /* Execute commands at remote host */ -void fio_communicate(int in, int out) +void +fio_communicate(int in, int out) { /* * Map of file and directory descriptors. @@ -2990,4 +3099,3 @@ void fio_communicate(int in, int out) exit(EXIT_FAILURE); } } - diff --git a/src/utils/file.h b/src/utils/file.h index 1eafe543d..ad65b9901 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -145,4 +145,3 @@ extern const char* fio_gzerror(gzFile file, int *errnum); #endif #endif - From 8ae217bae5ab6a6b02943f3279f87f914d685acf Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Mon, 14 Jun 2021 13:34:05 +0300 Subject: [PATCH 156/525] [Issue #394] correctly detect ENOSPC when using write(): durable_write is implemented --- src/archive.c | 1 + src/utils/file.c | 42 ++++++++++++++++++++++++++++++++---------- 2 files changed, 33 insertions(+), 10 deletions(-) diff --git a/src/archive.c b/src/archive.c index 2d858a64c..6ac1062b8 100644 --- a/src/archive.c +++ b/src/archive.c @@ -568,6 +568,7 @@ push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_d } /* copy content */ + errno = 0; for (;;) { size_t read_len = 0; diff --git a/src/utils/file.c b/src/utils/file.c index 6dcf2288e..19e71fab2 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -796,6 +796,34 @@ fio_fwrite_async(FILE* f, void const* buf, size_t size) : fwrite(buf, 1, size, f); } +/* + * Write buffer to descriptor by calling write(), + * If size of written data is less than buffer size, + * then try to write what is left. + * We do this to get honest errno if there are some problems + * with filesystem, since writing less than buffer size + * is not considered an error. + */ +static ssize_t +durable_write(int fd, const char* buf, size_t size) +{ + off_t current_pos = 0; + size_t bytes_left = size; + + while (bytes_left > 0) + { + int rc = write(fd, buf + current_pos, bytes_left); + + if (rc <= 0) + return rc; + + bytes_left -= rc; + current_pos += rc; + } + + return size; +} + /* Write data to the file */ /* TODO: support async report error */ ssize_t @@ -814,27 +842,21 @@ fio_write_async(int fd, void const* buf, size_t size) IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); IO_CHECK(fio_write_all(fio_stdout, buf, size), size); - - return size; } else - { - return write(fd, buf, size); - } + return durable_write(fd, buf, size); + + return size; } static void fio_write_async_impl(int fd, void const* buf, size_t size, int out) { - int rc; - /* Quick exit if agent is tainted */ if (async_errormsg) return; - rc = write(fd, buf, size); - - if (rc <= 0) + if (durable_write(fd, buf, size) <= 0) { async_errormsg = pgut_malloc(ERRMSG_MAX_LEN); snprintf(async_errormsg, ERRMSG_MAX_LEN, "%s", strerror(errno)); From da2cbcb2e789154f33cea3e409bd41b454a28566 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Mon, 14 Jun 2021 13:59:49 +0300 Subject: [PATCH 157/525] [Issue #394] fio_close is now synchronous --- src/utils/file.c | 28 ++++++++++++++++++++++++++-- 1 file changed, 26 insertions(+), 2 deletions(-) diff --git a/src/utils/file.c b/src/utils/file.c index ed72d6dc0..f341b7a37 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -607,7 +607,15 @@ fio_close(int fd) fio_fdset &= ~(1 << hdr.handle); IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); - /* Note, that file is closed without waiting for confirmation */ + + /* Wait for response */ + IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); + + if (hdr.arg != 0) + { + errno = hdr.arg; + return -1; + } return 0; } @@ -617,6 +625,22 @@ fio_close(int fd) } } +/* Close remote file implementation */ +static void +fio_close_impl(int fd, int out) +{ + fio_header hdr; + + hdr.cop = FIO_CLOSE; + hdr.arg = 0; + + if (close(fd) != 0) + hdr.arg = errno; + + /* send header */ + IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); +} + /* Truncate stdio file */ int fio_ftruncate(FILE* f, off_t size) @@ -3000,7 +3024,7 @@ fio_communicate(int in, int out) IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); break; case FIO_CLOSE: /* Close file */ - SYS_CHECK(close(fd[hdr.handle])); + fio_close_impl(fd[hdr.handle], out); break; case FIO_WRITE: /* Write to the current position in file */ // IO_CHECK(fio_write_all(fd[hdr.handle], buf, hdr.size), hdr.size); From d1106e5571c8f2e13d03759fc32fa774467bc8bb Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Mon, 14 Jun 2021 14:16:07 +0300 Subject: [PATCH 158/525] [Issue #394] use durable_write in fio_write --- src/utils/file.c | 81 +++++++++++++++++++++++------------------------- 1 file changed, 39 insertions(+), 42 deletions(-) diff --git a/src/utils/file.c b/src/utils/file.c index 19e71fab2..ef322997f 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -735,6 +735,34 @@ fio_fwrite(FILE* f, void const* buf, size_t size) return fwrite(buf, 1, size, f); } +/* + * Write buffer to descriptor by calling write(), + * If size of written data is less than buffer size, + * then try to write what is left. + * We do this to get honest errno if there are some problems + * with filesystem, since writing less than buffer size + * is not considered an error. + */ +static ssize_t +durable_write(int fd, const char* buf, size_t size) +{ + off_t current_pos = 0; + size_t bytes_left = size; + + while (bytes_left > 0) + { + int rc = write(fd, buf + current_pos, bytes_left); + + if (rc <= 0) + return rc; + + bytes_left -= rc; + current_pos += rc; + } + + return size; +} + /* Write data to the file synchronously */ ssize_t fio_write(int fd, void const* buf, size_t size) @@ -764,7 +792,7 @@ fio_write(int fd, void const* buf, size_t size) } else { - return write(fd, buf, size); + return durable_write(fd, buf, size); } } @@ -774,7 +802,7 @@ fio_write_impl(int fd, void const* buf, size_t size, int out) int rc; fio_header hdr; - rc = write(fd, buf, size); + rc = durable_write(fd, buf, size); hdr.arg = 0; hdr.size = 0; @@ -796,34 +824,6 @@ fio_fwrite_async(FILE* f, void const* buf, size_t size) : fwrite(buf, 1, size, f); } -/* - * Write buffer to descriptor by calling write(), - * If size of written data is less than buffer size, - * then try to write what is left. - * We do this to get honest errno if there are some problems - * with filesystem, since writing less than buffer size - * is not considered an error. - */ -static ssize_t -durable_write(int fd, const char* buf, size_t size) -{ - off_t current_pos = 0; - size_t bytes_left = size; - - while (bytes_left > 0) - { - int rc = write(fd, buf + current_pos, bytes_left); - - if (rc <= 0) - return rc; - - bytes_left -= rc; - current_pos += rc; - } - - return size; -} - /* Write data to the file */ /* TODO: support async report error */ ssize_t @@ -908,23 +908,22 @@ fio_fwrite_async_compressed(FILE* f, void const* buf, size_t size, int compress_ } else { - char uncompressed_buf[BLCKSZ]; char *errormsg = NULL; - int32 uncompressed_size = fio_decompress(uncompressed_buf, buf, size, compress_alg, &errormsg); + char decompressed_buf[BLCKSZ]; + int32 decompressed_size = fio_decompress(decompressed_buf, buf, size, compress_alg, &errormsg); - if (uncompressed_size < 0) + if (decompressed_size < 0) elog(ERROR, "%s", errormsg); - return fwrite(uncompressed_buf, 1, uncompressed_size, f); + return fwrite(decompressed_buf, 1, decompressed_size, f); } } static void fio_write_compressed_impl(int fd, void const* buf, size_t size, int compress_alg) { - int rc; - int32 uncompressed_size; - char uncompressed_buf[BLCKSZ]; + int32 decompressed_size; + char decompressed_buf[BLCKSZ]; /* If the previous command already have failed, * then there is no point in bashing a head against the wall @@ -933,14 +932,12 @@ fio_write_compressed_impl(int fd, void const* buf, size_t size, int compress_alg return; /* decompress chunk */ - uncompressed_size = fio_decompress(uncompressed_buf, buf, size, compress_alg, &async_errormsg); + decompressed_size = fio_decompress(decompressed_buf, buf, size, compress_alg, &async_errormsg); - if (uncompressed_size < 0) + if (decompressed_size < 0) return; - rc = write(fd, uncompressed_buf, uncompressed_size); - - if (rc <= 0) + if (durable_write(fd, decompressed_buf, decompressed_size) <= 0) { async_errormsg = pgut_malloc(ERRMSG_MAX_LEN); snprintf(async_errormsg, ERRMSG_MAX_LEN, "%s", strerror(errno)); From 6e8e948fd6d784b58b90f4f05bdb9eccee7b104e Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Thu, 17 Jun 2021 02:32:36 +0300 Subject: [PATCH 159/525] [Issue #313] added test coverage --- tests/helpers/ptrack_helpers.py | 3 ++ tests/restore.py | 66 ++++++++++++++++++++++++++++++++- 2 files changed, 68 insertions(+), 1 deletion(-) diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index 3caba25df..b6dc6d028 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -1950,6 +1950,9 @@ def stopped_in_breakpoint(self): return True return False + def quit(self): + self.proc.terminate() + # use for breakpoint, run, continue def _execute(self, cmd, running=True): output = [] diff --git a/tests/restore.py b/tests/restore.py index 61aae9285..8ccffa44c 100644 --- a/tests/restore.py +++ b/tests/restore.py @@ -9,7 +9,8 @@ import shutil import json from shutil import copyfile -from testgres import QueryException +from testgres import QueryException, StartNodeException +from stat import S_ISDIR module_name = 'restore' @@ -3856,3 +3857,66 @@ def test_concurrent_restore(self): # Clean after yourself self.del_test_dir(module_name, fname) + + # @unittest.skip("skip") + def test_restore_issue_313(self): + """ + Check that partially restored PostgreSQL instance cannot be started + """ + fname = self.id().split('.')[3] + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # FULL backup + backup_id = self.backup_node(backup_dir, 'node', node) + node.cleanup() + + count = 0 + filelist = self.get_backup_filelist(backup_dir, 'node', backup_id) + for file in filelist: + # count only nondata files + if int(filelist[file]['is_datafile']) == 0 and int(filelist[file]['size']) > 0: + count += 1 + + node_restored = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node_restored')) + node_restored.cleanup() + self.restore_node(backup_dir, 'node', node_restored) + + gdb = self.restore_node(backup_dir, 'node', node, gdb=True, options=['--progress']) + gdb.verbose = False + gdb.set_breakpoint('restore_non_data_file') + gdb.run_until_break() + gdb.continue_execution_until_break(count - 2) + gdb.quit() + + # emulate the user or HA taking care of PG configuration + for fname in os.listdir(node_restored.data_dir): + if fname.endswith('.conf'): + os.rename( + os.path.join(node_restored.data_dir, fname), + os.path.join(node.data_dir, fname)) + + try: + node.slow_start() + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because backup is not fully restored") + except StartNodeException as e: + self.assertIn( + 'Cannot start node', + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + # Clean after yourself + self.del_test_dir(module_name, fname) From cce59bc824262656b4ad1396465dcfd40a56c3de Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" <16117281+kulaginm@users.noreply.github.com> Date: Thu, 17 Jun 2021 19:11:36 +0300 Subject: [PATCH 160/525] Remove ptrack-1.* support (#398) * Remove ptrack-1.* support * [PR #398] review feedback * [PR #398] remove conn_arg from backup_files_arg Co-authored-by: Grigory Smolkin --- doc/pgprobackup.xml | 30 +-- src/backup.c | 71 ++---- src/data.c | 239 +++++++----------- src/dir.c | 10 - src/merge.c | 2 +- src/pg_probackup.h | 27 +- src/ptrack.c | 448 +-------------------------------- src/utils/file.c | 3 - tests/auth_test.py | 4 +- tests/backup.py | 1 + tests/false_positive.py | 186 -------------- tests/ptrack.py | 536 +++++++++++++++++----------------------- tests/restore.py | 2 +- 13 files changed, 352 insertions(+), 1207 deletions(-) diff --git a/doc/pgprobackup.xml b/doc/pgprobackup.xml index 2bf197814..b1ddd0032 100644 --- a/doc/pgprobackup.xml +++ b/doc/pgprobackup.xml @@ -1162,7 +1162,7 @@ GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; - PTRACK versions lower than 2.0 are deprecated. Postgres Pro Standard and Postgres Pro Enterprise + PTRACK versions lower than 2.0 are deprecated and not supported. Postgres Pro Standard and Postgres Pro Enterprise versions starting with 11.9.1 contain PTRACK 2.0. Upgrade your server to avoid issues in backups that you will take in future and be sure to take fresh backups of your clusters with the upgraded PTRACK since the backups taken with PTRACK 1.x might be corrupt. @@ -1218,34 +1218,6 @@ CREATE EXTENSION ptrack; - - For older PostgreSQL versions, - PTRACK required taking backups in the exclusive mode - to provide exclusive access to bitmaps with changed blocks. - To set up PTRACK backups for PostgreSQL 10 - or lower, do the following: - - - - - Set the ptrack_enable parameter to - on. - - - - - Grant the right to execute PTRACK - functions to the backup role - in every database of the - cluster: - - -GRANT EXECUTE ON FUNCTION pg_catalog.pg_ptrack_clear() TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_ptrack_get_and_clear(oid, oid) TO backup; - - - - diff --git a/src/backup.c b/src/backup.c index 46e4f1ea7..738b6dcf2 100644 --- a/src/backup.c +++ b/src/backup.c @@ -125,10 +125,6 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, check_external_for_tablespaces(external_dirs, backup_conn); } - /* Clear ptrack files for not PTRACK backups */ - if (current.backup_mode != BACKUP_MODE_DIFF_PTRACK && nodeInfo->is_ptrack_enable) - pg_ptrack_clear(backup_conn, nodeInfo->ptrack_version_num); - /* notify start of backup to PostgreSQL server */ time2iso(label, lengthof(label), current.start_time, false); strncat(label, " with pg_probackup", lengthof(label) - @@ -217,29 +213,14 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, { XLogRecPtr ptrack_lsn = get_last_ptrack_lsn(backup_conn, nodeInfo); - if (nodeInfo->ptrack_version_num < 200) + // new ptrack (>=2.0) is more robust and checks Start LSN + if (ptrack_lsn > prev_backup->start_lsn || ptrack_lsn == InvalidXLogRecPtr) { - // backward compatibility kludge: use Stop LSN for ptrack 1.x, - if (ptrack_lsn > prev_backup->stop_lsn || ptrack_lsn == InvalidXLogRecPtr) - { - elog(ERROR, "LSN from ptrack_control %X/%X differs from Stop LSN of previous backup %X/%X.\n" - "Create new full backup before an incremental one.", - (uint32) (ptrack_lsn >> 32), (uint32) (ptrack_lsn), - (uint32) (prev_backup->stop_lsn >> 32), - (uint32) (prev_backup->stop_lsn)); - } - } - else - { - // new ptrack is more robust and checks Start LSN - if (ptrack_lsn > prev_backup->start_lsn || ptrack_lsn == InvalidXLogRecPtr) - { - elog(ERROR, "LSN from ptrack_control %X/%X is greater than Start LSN of previous backup %X/%X.\n" - "Create new full backup before an incremental one.", - (uint32) (ptrack_lsn >> 32), (uint32) (ptrack_lsn), - (uint32) (prev_backup->start_lsn >> 32), - (uint32) (prev_backup->start_lsn)); - } + elog(ERROR, "LSN from ptrack_control %X/%X is greater than Start LSN of previous backup %X/%X.\n" + "Create new full backup before an incremental one.", + (uint32) (ptrack_lsn >> 32), (uint32) (ptrack_lsn), + (uint32) (prev_backup->start_lsn >> 32), + (uint32) (prev_backup->start_lsn)); } } @@ -407,15 +388,10 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, /* * Build the page map from ptrack information. */ - if (nodeInfo->ptrack_version_num >= 200) - make_pagemap_from_ptrack_2(backup_files_list, backup_conn, - nodeInfo->ptrack_schema, - nodeInfo->ptrack_version_num, - prev_backup_start_lsn); - else if (nodeInfo->ptrack_version_num == 105 || - nodeInfo->ptrack_version_num == 106 || - nodeInfo->ptrack_version_num == 107) - make_pagemap_from_ptrack_1(backup_files_list, backup_conn); + make_pagemap_from_ptrack_2(backup_files_list, backup_conn, + nodeInfo->ptrack_schema, + nodeInfo->ptrack_version_num, + prev_backup_start_lsn); } time(&end_time); @@ -490,8 +466,6 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, arg->files_list = backup_files_list; arg->prev_filelist = prev_backup_filelist; arg->prev_start_lsn = prev_backup_start_lsn; - arg->conn_arg.conn = NULL; - arg->conn_arg.cancel_conn = NULL; arg->hdr_map = &(current.hdr_map); arg->thread_num = i+1; /* By default there are some error */ @@ -816,6 +790,7 @@ do_backup(InstanceState *instanceState, pgSetBackupParams *set_backup_params, if (current.backup_mode == BACKUP_MODE_DIFF_PTRACK) { + /* ptrack_version_num < 2.0 was already checked in get_ptrack_version() */ if (nodeInfo.ptrack_version_num == 0) elog(ERROR, "This PostgreSQL instance does not support ptrack"); else @@ -2085,15 +2060,15 @@ backup_files(void *arg) /* backup file */ if (file->is_datafile && !file->is_cfs) { - backup_data_file(&(arguments->conn_arg), file, from_fullpath, to_fullpath, - arguments->prev_start_lsn, - current.backup_mode, - instance_config.compress_alg, - instance_config.compress_level, - arguments->nodeInfo->checksum_version, - arguments->nodeInfo->ptrack_version_num, - arguments->nodeInfo->ptrack_schema, - arguments->hdr_map, false); + backup_data_file(file, from_fullpath, to_fullpath, + arguments->prev_start_lsn, + current.backup_mode, + instance_config.compress_alg, + instance_config.compress_level, + arguments->nodeInfo->checksum_version, + arguments->nodeInfo->ptrack_version_num, + arguments->nodeInfo->ptrack_schema, + arguments->hdr_map, false); } else { @@ -2117,10 +2092,6 @@ backup_files(void *arg) /* ssh connection to longer needed */ fio_disconnect(); - /* Close connection */ - if (arguments->conn_arg.conn) - pgut_disconnect(arguments->conn_arg.conn); - /* Data files transferring is successful */ arguments->ret = 0; diff --git a/src/data.c b/src/data.c index 9d8bfc584..314490585 100644 --- a/src/data.c +++ b/src/data.c @@ -276,8 +276,7 @@ get_checksum_errormsg(Page page, char **errormsg, BlockNumber absolute_blkno) * return it to the caller */ static int32 -prepare_page(ConnectionArgs *conn_arg, - pgFile *file, XLogRecPtr prev_backup_start_lsn, +prepare_page(pgFile *file, XLogRecPtr prev_backup_start_lsn, BlockNumber blknum, FILE *in, BackupMode backup_mode, Page page, bool strict, @@ -290,6 +289,7 @@ prepare_page(ConnectionArgs *conn_arg, int try_again = PAGE_READ_ATTEMPTS; bool page_is_valid = false; BlockNumber absolute_blknum = file->segno * RELSEG_SIZE + blknum; + int rc = 0; /* check for interrupt */ if (interrupted || thread_interrupted) @@ -300,161 +300,97 @@ prepare_page(ConnectionArgs *conn_arg, * Under high write load it's possible that we've read partly * flushed page, so try several times before throwing an error. */ - if (backup_mode != BACKUP_MODE_DIFF_PTRACK || ptrack_version_num >= 200) + while (!page_is_valid && try_again--) { - int rc = 0; - while (!page_is_valid && try_again--) - { - /* read the block */ - int read_len = fio_pread(in, page, blknum * BLCKSZ); + /* read the block */ + int read_len = fio_pread(in, page, blknum * BLCKSZ); - /* The block could have been truncated. It is fine. */ - if (read_len == 0) - { - elog(VERBOSE, "Cannot read block %u of \"%s\": " - "block truncated", blknum, from_fullpath); - return PageIsTruncated; - } - else if (read_len < 0) - elog(ERROR, "Cannot read block %u of \"%s\": %s", - blknum, from_fullpath, strerror(errno)); - else if (read_len != BLCKSZ) - elog(WARNING, "Cannot read block %u of \"%s\": " - "read %i of %d, try again", - blknum, from_fullpath, read_len, BLCKSZ); - else + /* The block could have been truncated. It is fine. */ + if (read_len == 0) + { + elog(VERBOSE, "Cannot read block %u of \"%s\": " + "block truncated", blknum, from_fullpath); + return PageIsTruncated; + } + else if (read_len < 0) + elog(ERROR, "Cannot read block %u of \"%s\": %s", + blknum, from_fullpath, strerror(errno)); + else if (read_len != BLCKSZ) + elog(WARNING, "Cannot read block %u of \"%s\": " + "read %i of %d, try again", + blknum, from_fullpath, read_len, BLCKSZ); + else + { + /* We have BLCKSZ of raw data, validate it */ + rc = validate_one_page(page, absolute_blknum, + InvalidXLogRecPtr, page_st, + checksum_version); + switch (rc) { - /* We have BLCKSZ of raw data, validate it */ - rc = validate_one_page(page, absolute_blknum, - InvalidXLogRecPtr, page_st, - checksum_version); - switch (rc) - { - case PAGE_IS_ZEROED: - elog(VERBOSE, "File: \"%s\" blknum %u, empty page", from_fullpath, blknum); + case PAGE_IS_ZEROED: + elog(VERBOSE, "File: \"%s\" blknum %u, empty page", from_fullpath, blknum); + return PageIsOk; + + case PAGE_IS_VALID: + /* in DELTA or PTRACK modes we must compare lsn */ + if (backup_mode == BACKUP_MODE_DIFF_DELTA || backup_mode == BACKUP_MODE_DIFF_PTRACK) + page_is_valid = true; + else return PageIsOk; - - case PAGE_IS_VALID: - /* in DELTA or PTRACK modes we must compare lsn */ - if (backup_mode == BACKUP_MODE_DIFF_DELTA || backup_mode == BACKUP_MODE_DIFF_PTRACK) - page_is_valid = true; - else - return PageIsOk; - break; - - case PAGE_HEADER_IS_INVALID: - elog(VERBOSE, "File: \"%s\" blknum %u have wrong page header, try again", - from_fullpath, blknum); - break; - - case PAGE_CHECKSUM_MISMATCH: - elog(VERBOSE, "File: \"%s\" blknum %u have wrong checksum, try again", - from_fullpath, blknum); - break; - default: - Assert(false); - } + break; + + case PAGE_HEADER_IS_INVALID: + elog(VERBOSE, "File: \"%s\" blknum %u have wrong page header, try again", + from_fullpath, blknum); + break; + + case PAGE_CHECKSUM_MISMATCH: + elog(VERBOSE, "File: \"%s\" blknum %u have wrong checksum, try again", + from_fullpath, blknum); + break; + default: + Assert(false); } } - - /* - * If page is not valid after 100 attempts to read it - * throw an error. - */ - if (!page_is_valid) - { - int elevel = ERROR; - char *errormsg = NULL; - - /* Get the details of corruption */ - if (rc == PAGE_HEADER_IS_INVALID) - get_header_errormsg(page, &errormsg); - else if (rc == PAGE_CHECKSUM_MISMATCH) - get_checksum_errormsg(page, &errormsg, - file->segno * RELSEG_SIZE + blknum); - - /* Error out in case of merge or backup without ptrack support; - * issue warning in case of checkdb or backup with ptrack support - */ - if (!strict) - elevel = WARNING; - - if (errormsg) - elog(elevel, "Corruption detected in file \"%s\", block %u: %s", - from_fullpath, blknum, errormsg); - else - elog(elevel, "Corruption detected in file \"%s\", block %u", - from_fullpath, blknum); - - pg_free(errormsg); - return PageIsCorrupted; - } - - /* Checkdb not going futher */ - if (!strict) - return PageIsOk; } /* - * Get page via ptrack interface from PostgreSQL shared buffer. - * We do this only in the cases of PTRACK 1.x versions backup + * If page is not valid after PAGE_READ_ATTEMPTS attempts to read it + * throw an error. */ - if (backup_mode == BACKUP_MODE_DIFF_PTRACK - && (ptrack_version_num >= 105 && ptrack_version_num < 200)) + if (!page_is_valid) { - int rc = 0; - size_t page_size = 0; - Page ptrack_page = NULL; - ptrack_page = (Page) pg_ptrack_get_block(conn_arg, file->dbOid, file->tblspcOid, - file->relOid, absolute_blknum, &page_size, - ptrack_version_num, ptrack_schema); - - if (ptrack_page == NULL) - /* This block was truncated.*/ - return PageIsTruncated; - - if (page_size != BLCKSZ) - elog(ERROR, "File: \"%s\", block %u, expected block size %d, but read %zu", - from_fullpath, blknum, BLCKSZ, page_size); - - /* - * We need to copy the page that was successfully - * retrieved from ptrack into our output "page" parameter. - */ - memcpy(page, ptrack_page, BLCKSZ); - pg_free(ptrack_page); - - /* - * UPD: It apprears that is possible to get zeroed page or page with invalid header - * from shared buffer. - * Note, that getting page with wrong checksumm from shared buffer is - * acceptable. - */ - rc = validate_one_page(page, absolute_blknum, - InvalidXLogRecPtr, page_st, - checksum_version); - - /* It is ok to get zeroed page */ - if (rc == PAGE_IS_ZEROED) - return PageIsOk; + int elevel = ERROR; + char *errormsg = NULL; - /* Getting page with invalid header from shared buffers is unacceptable */ + /* Get the details of corruption */ if (rc == PAGE_HEADER_IS_INVALID) - { - char *errormsg = NULL; get_header_errormsg(page, &errormsg); - elog(ERROR, "Corruption detected in file \"%s\", block %u: %s", - from_fullpath, blknum, errormsg); - } + else if (rc == PAGE_CHECKSUM_MISMATCH) + get_checksum_errormsg(page, &errormsg, + file->segno * RELSEG_SIZE + blknum); - /* - * We must set checksum here, because it is outdated - * in the block recieved from shared buffers. + /* Error out in case of merge or backup without ptrack support; + * issue warning in case of checkdb or backup with ptrack support */ - if (checksum_version) - page_st->checksum = ((PageHeader) page)->pd_checksum = pg_checksum_page(page, absolute_blknum); + if (!strict) + elevel = WARNING; + + if (errormsg) + elog(elevel, "Corruption detected in file \"%s\", block %u: %s", + from_fullpath, blknum, errormsg); + else + elog(elevel, "Corruption detected in file \"%s\", block %u", + from_fullpath, blknum); + + pg_free(errormsg); + return PageIsCorrupted; } + /* Checkdb not going futher */ + if (!strict) + return PageIsOk; + /* * Skip page if page lsn is less than START_LSN of parent backup. * Nullified pages must be copied by DELTA backup, just to be safe. @@ -531,8 +467,7 @@ compress_and_backup_page(pgFile *file, BlockNumber blknum, * backup with special header. */ void -backup_data_file(ConnectionArgs* conn_arg, pgFile *file, - const char *from_fullpath, const char *to_fullpath, +backup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpath, XLogRecPtr prev_backup_start_lsn, BackupMode backup_mode, CompressAlg calg, int clevel, uint32 checksum_version, int ptrack_version_num, const char *ptrack_schema, @@ -614,7 +549,7 @@ backup_data_file(ConnectionArgs* conn_arg, pgFile *file, else { /* TODO: stop handling errors internally */ - rc = send_pages(conn_arg, to_fullpath, from_fullpath, file, + rc = send_pages(to_fullpath, from_fullpath, file, /* send prev backup START_LSN */ (backup_mode == BACKUP_MODE_DIFF_DELTA || backup_mode == BACKUP_MODE_DIFF_PTRACK) && file->exists_in_prev ? prev_backup_start_lsn : InvalidXLogRecPtr, @@ -1563,10 +1498,10 @@ check_data_file(ConnectionArgs *arguments, pgFile *file, for (blknum = 0; blknum < nblocks; blknum++) { PageState page_st; - page_state = prepare_page(NULL, file, InvalidXLogRecPtr, - blknum, in, BACKUP_MODE_FULL, - curr_page, false, checksum_version, - 0, NULL, from_fullpath, &page_st); + page_state = prepare_page(file, InvalidXLogRecPtr, + blknum, in, BACKUP_MODE_FULL, + curr_page, false, checksum_version, + 0, NULL, from_fullpath, &page_st); if (page_state == PageIsTruncated) break; @@ -1994,7 +1929,7 @@ open_local_file_rw(const char *to_fullpath, char **out_buf, uint32 buf_size) /* backup local file */ int -send_pages(ConnectionArgs* conn_arg, const char *to_fullpath, const char *from_fullpath, +send_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, XLogRecPtr prev_backup_start_lsn, CompressAlg calg, int clevel, uint32 checksum_version, bool use_pagemap, BackupPageHeader2 **headers, BackupMode backup_mode, int ptrack_version_num, const char *ptrack_schema) @@ -2052,11 +1987,11 @@ send_pages(ConnectionArgs* conn_arg, const char *to_fullpath, const char *from_f while (blknum < file->n_blocks) { PageState page_st; - int rc = prepare_page(conn_arg, file, prev_backup_start_lsn, - blknum, in, backup_mode, curr_page, - true, checksum_version, - ptrack_version_num, ptrack_schema, - from_fullpath, &page_st); + int rc = prepare_page(file, prev_backup_start_lsn, + blknum, in, backup_mode, curr_page, + true, checksum_version, + ptrack_version_num, ptrack_schema, + from_fullpath, &page_st); if (rc == PageIsTruncated) break; diff --git a/src/dir.c b/src/dir.c index dfcddd7d0..ce255d0ad 100644 --- a/src/dir.c +++ b/src/dir.c @@ -677,26 +677,16 @@ dir_check_file(pgFile *file, bool backup_logs) */ if (sscanf_res == 2 && strcmp(tmp_rel_path, TABLESPACE_VERSION_DIRECTORY) != 0) return CHECK_FALSE; - - if (sscanf_res == 3 && S_ISDIR(file->mode) && - strcmp(tmp_rel_path, TABLESPACE_VERSION_DIRECTORY) == 0) - file->is_database = true; } else if (path_is_prefix_of_path("global", file->rel_path)) { file->tblspcOid = GLOBALTABLESPACE_OID; - - if (S_ISDIR(file->mode) && strcmp(file->name, "global") == 0) - file->is_database = true; } else if (path_is_prefix_of_path("base", file->rel_path)) { file->tblspcOid = DEFAULTTABLESPACE_OID; sscanf(file->rel_path, "base/%u/", &(file->dbOid)); - - if (S_ISDIR(file->mode) && strcmp(file->name, "base") != 0) - file->is_database = true; } /* Do not backup ptrack_init files */ diff --git a/src/merge.c b/src/merge.c index 6e0e74940..e59b359fe 100644 --- a/src/merge.c +++ b/src/merge.c @@ -1253,7 +1253,7 @@ merge_data_file(parray *parent_chain, pgBackup *full_backup, * 2 backups of old versions, where n_blocks is missing. */ - backup_data_file(NULL, tmp_file, to_fullpath_tmp1, to_fullpath_tmp2, + backup_data_file(tmp_file, to_fullpath_tmp1, to_fullpath_tmp2, InvalidXLogRecPtr, BACKUP_MODE_FULL, dest_backup->compress_alg, dest_backup->compress_level, dest_backup->checksum_version, 0, NULL, diff --git a/src/pg_probackup.h b/src/pg_probackup.h index f5fd0f672..ccbf803fd 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -264,7 +264,6 @@ typedef struct pgFile int segno; /* Segment number for ptrack */ int n_blocks; /* number of blocks in the data file in data directory */ bool is_cfs; /* Flag to distinguish files compressed by CFS*/ - bool is_database; /* Flag used strictly by ptrack 1.x backup */ int external_dir_num; /* Number of external directory. 0 if not external */ bool exists_in_prev; /* Mark files, both data and regular, that exists in previous backup */ CompressAlg compress_alg; /* compression algorithm applied to the file */ @@ -589,7 +588,6 @@ typedef struct parray *external_dirs; XLogRecPtr prev_start_lsn; - ConnectionArgs conn_arg; int thread_num; HeaderMap *hdr_map; @@ -842,10 +840,6 @@ extern const char *deparse_backup_mode(BackupMode mode); extern void process_block_change(ForkNumber forknum, RelFileNode rnode, BlockNumber blkno); -extern char *pg_ptrack_get_block(ConnectionArgs *arguments, - Oid dbOid, Oid tblsOid, Oid relOid, - BlockNumber blknum, size_t *result_size, - int ptrack_version_num, const char *ptrack_schema); /* in restore.c */ extern int do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, @@ -1067,12 +1061,11 @@ extern void pfilearray_clear_locks(parray *file_list); extern bool check_data_file(ConnectionArgs *arguments, pgFile *file, const char *from_fullpath, uint32 checksum_version); -extern void backup_data_file(ConnectionArgs* conn_arg, pgFile *file, - const char *from_fullpath, const char *to_fullpath, - XLogRecPtr prev_backup_start_lsn, BackupMode backup_mode, - CompressAlg calg, int clevel, uint32 checksum_version, - int ptrack_version_num, const char *ptrack_schema, - HeaderMap *hdr_map, bool missing_ok); +extern void backup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpath, + XLogRecPtr prev_backup_start_lsn, BackupMode backup_mode, + CompressAlg calg, int clevel, uint32 checksum_version, + int ptrack_version_num, const char *ptrack_schema, + HeaderMap *hdr_map, bool missing_ok); extern void backup_non_data_file(pgFile *file, pgFile *prev_file, const char *from_fullpath, const char *to_fullpath, BackupMode backup_mode, time_t parent_backup_time, @@ -1172,20 +1165,12 @@ extern void check_system_identifiers(PGconn *conn, char *pgdata); extern void parse_filelist_filenames(parray *files, const char *root); /* in ptrack.c */ -extern void make_pagemap_from_ptrack_1(parray* files, PGconn* backup_conn); extern void make_pagemap_from_ptrack_2(parray* files, PGconn* backup_conn, const char *ptrack_schema, int ptrack_version_num, XLogRecPtr lsn); -extern void pg_ptrack_clear(PGconn *backup_conn, int ptrack_version_num); extern void get_ptrack_version(PGconn *backup_conn, PGNodeInfo *nodeInfo); extern bool pg_ptrack_enable(PGconn *backup_conn, int ptrack_version_num); -extern bool pg_ptrack_get_and_clear_db(Oid dbOid, Oid tblspcOid, PGconn *backup_conn); -extern char *pg_ptrack_get_and_clear(Oid tablespace_oid, - Oid db_oid, - Oid rel_oid, - size_t *result_size, - PGconn *backup_conn); extern XLogRecPtr get_last_ptrack_lsn(PGconn *backup_conn, PGNodeInfo *nodeInfo); extern parray * pg_ptrack_get_pagemapset(PGconn *backup_conn, const char *ptrack_schema, int ptrack_version_num, XLogRecPtr lsn); @@ -1193,7 +1178,7 @@ extern parray * pg_ptrack_get_pagemapset(PGconn *backup_conn, const char *ptrack /* open local file to writing */ extern FILE* open_local_file_rw(const char *to_fullpath, char **out_buf, uint32 buf_size); -extern int send_pages(ConnectionArgs* conn_arg, const char *to_fullpath, const char *from_fullpath, +extern int send_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, XLogRecPtr prev_backup_start_lsn, CompressAlg calg, int clevel, uint32 checksum_version, bool use_pagemap, BackupPageHeader2 **headers, BackupMode backup_mode, int ptrack_version_num, const char *ptrack_schema); diff --git a/src/ptrack.c b/src/ptrack.c index 5a2b9f046..6825686c6 100644 --- a/src/ptrack.c +++ b/src/ptrack.c @@ -2,7 +2,7 @@ * * ptrack.c: support functions for ptrack backups * - * Copyright (c) 2019 Postgres Professional + * Copyright (c) 2021 Postgres Professional * *------------------------------------------------------------------------- */ @@ -21,124 +21,6 @@ #define PTRACK_BITS_PER_HEAPBLOCK 1 #define HEAPBLOCKS_PER_BYTE (BITS_PER_BYTE / PTRACK_BITS_PER_HEAPBLOCK) -/* - * Given a list of files in the instance to backup, build a pagemap for each - * data file that has ptrack. Result is saved in the pagemap field of pgFile. - * NOTE we rely on the fact that provided parray is sorted by file->rel_path. - */ -void -make_pagemap_from_ptrack_1(parray *files, PGconn *backup_conn) -{ - size_t i; - Oid dbOid_with_ptrack_init = 0; - Oid tblspcOid_with_ptrack_init = 0; - char *ptrack_nonparsed = NULL; - size_t ptrack_nonparsed_size = 0; - - for (i = 0; i < parray_num(files); i++) - { - pgFile *file = (pgFile *) parray_get(files, i); - size_t start_addr; - - /* - * If there is a ptrack_init file in the database, - * we must backup all its files, ignoring ptrack files for relations. - */ - if (file->is_database) - { - /* - * The function pg_ptrack_get_and_clear_db returns true - * if there was a ptrack_init file. - * Also ignore ptrack files for global tablespace, - * to avoid any possible specific errors. - */ - if ((file->tblspcOid == GLOBALTABLESPACE_OID) || - pg_ptrack_get_and_clear_db(file->dbOid, file->tblspcOid, backup_conn)) - { - dbOid_with_ptrack_init = file->dbOid; - tblspcOid_with_ptrack_init = file->tblspcOid; - } - } - - if (file->is_datafile) - { - if (file->tblspcOid == tblspcOid_with_ptrack_init && - file->dbOid == dbOid_with_ptrack_init) - { - /* ignore ptrack if ptrack_init exists */ - elog(VERBOSE, "Ignoring ptrack because of ptrack_init for file: %s", file->rel_path); - file->pagemap_isabsent = true; - continue; - } - - /* get ptrack bitmap once for all segments of the file */ - if (file->segno == 0) - { - /* release previous value */ - pg_free(ptrack_nonparsed); - ptrack_nonparsed_size = 0; - - ptrack_nonparsed = pg_ptrack_get_and_clear(file->tblspcOid, file->dbOid, - file->relOid, &ptrack_nonparsed_size, backup_conn); - } - - if (ptrack_nonparsed != NULL) - { - /* - * pg_ptrack_get_and_clear() returns ptrack with VARHDR cut out. - * Compute the beginning of the ptrack map related to this segment - * - * HEAPBLOCKS_PER_BYTE. Number of heap pages one ptrack byte can track: 8 - * RELSEG_SIZE. Number of Pages per segment: 131072 - * RELSEG_SIZE/HEAPBLOCKS_PER_BYTE. number of bytes in ptrack file needed - * to keep track on one relsegment: 16384 - */ - start_addr = (RELSEG_SIZE/HEAPBLOCKS_PER_BYTE)*file->segno; - - /* - * If file segment was created after we have read ptrack, - * we won't have a bitmap for this segment. - */ - if (start_addr > ptrack_nonparsed_size) - { - elog(VERBOSE, "Ptrack is missing for file: %s", file->rel_path); - file->pagemap_isabsent = true; - } - else - { - - if (start_addr + RELSEG_SIZE/HEAPBLOCKS_PER_BYTE > ptrack_nonparsed_size) - { - file->pagemap.bitmapsize = ptrack_nonparsed_size - start_addr; - elog(VERBOSE, "pagemap size: %i", file->pagemap.bitmapsize); - } - else - { - file->pagemap.bitmapsize = RELSEG_SIZE/HEAPBLOCKS_PER_BYTE; - elog(VERBOSE, "pagemap size: %i", file->pagemap.bitmapsize); - } - - file->pagemap.bitmap = pg_malloc(file->pagemap.bitmapsize); - memcpy(file->pagemap.bitmap, ptrack_nonparsed+start_addr, file->pagemap.bitmapsize); - } - } - else - { - /* - * If ptrack file is missing, try to copy the entire file. - * It can happen in two cases: - * - files were created by commands that bypass buffer manager - * and, correspondingly, ptrack mechanism. - * i.e. CREATE DATABASE - * - target relation was deleted. - */ - elog(VERBOSE, "Ptrack is missing for file: %s", file->rel_path); - file->pagemap_isabsent = true; - } - } - } -} - /* * Parse a string like "2.1" into int * result: int by formula major_number * 100 + minor_number @@ -218,7 +100,7 @@ get_ptrack_version(PGconn *backup_conn, PGNodeInfo *nodeInfo) nodeInfo->ptrack_version_num = ptrack_version_num; /* ptrack 1.X is buggy, so fall back to DELTA backup strategy for safety */ - if (nodeInfo->ptrack_version_num >= 105 && nodeInfo->ptrack_version_num < 200) + if (nodeInfo->ptrack_version_num < 200) { if (current.backup_mode == BACKUP_MODE_DIFF_PTRACK) { @@ -241,12 +123,7 @@ pg_ptrack_enable(PGconn *backup_conn, int ptrack_version_num) PGresult *res_db; bool result = false; - if (ptrack_version_num < 200) - { - res_db = pgut_execute(backup_conn, "SHOW ptrack_enable", 0, NULL); - result = strcmp(PQgetvalue(res_db, 0, 0), "on") == 0; - } - else if (ptrack_version_num == 200) + if (ptrack_version_num == 200) { res_db = pgut_execute(backup_conn, "SHOW ptrack_map_size", 0, NULL); result = strcmp(PQgetvalue(res_db, 0, 0), "0") != 0; @@ -262,214 +139,6 @@ pg_ptrack_enable(PGconn *backup_conn, int ptrack_version_num) return result; } - -/* ---------------------------- - * Ptrack 1.* support functions - * ---------------------------- - */ - -/* Clear ptrack files in all databases of the instance we connected to */ -void -pg_ptrack_clear(PGconn *backup_conn, int ptrack_version_num) -{ - PGresult *res_db, - *res; - const char *dbname; - int i; - Oid dbOid, tblspcOid; - char *params[2]; - - // FIXME Perform this check on caller's side - if (ptrack_version_num >= 200) - return; - - params[0] = palloc(64); - params[1] = palloc(64); - res_db = pgut_execute(backup_conn, "SELECT datname, oid, dattablespace FROM pg_database", - 0, NULL); - - for(i = 0; i < PQntuples(res_db); i++) - { - PGconn *tmp_conn; - - dbname = PQgetvalue(res_db, i, 0); - if (strcmp(dbname, "template0") == 0) - continue; - - dbOid = atoll(PQgetvalue(res_db, i, 1)); - tblspcOid = atoll(PQgetvalue(res_db, i, 2)); - - tmp_conn = pgut_connect(instance_config.conn_opt.pghost, instance_config.conn_opt.pgport, - dbname, - instance_config.conn_opt.pguser); - - res = pgut_execute(tmp_conn, "SELECT pg_catalog.pg_ptrack_clear()", - 0, NULL); - PQclear(res); - - sprintf(params[0], "%i", dbOid); - sprintf(params[1], "%i", tblspcOid); - res = pgut_execute(tmp_conn, "SELECT pg_catalog.pg_ptrack_get_and_clear_db($1, $2)", - 2, (const char **)params); - PQclear(res); - - pgut_disconnect(tmp_conn); - } - - pfree(params[0]); - pfree(params[1]); - PQclear(res_db); -} - -bool -pg_ptrack_get_and_clear_db(Oid dbOid, Oid tblspcOid, PGconn *backup_conn) -{ - char *params[2]; - char *dbname; - PGresult *res_db; - PGresult *res; - bool result; - - params[0] = palloc(64); - params[1] = palloc(64); - - sprintf(params[0], "%i", dbOid); - res_db = pgut_execute(backup_conn, - "SELECT datname FROM pg_database WHERE oid=$1", - 1, (const char **) params); - /* - * If database is not found, it's not an error. - * It could have been deleted since previous backup. - */ - if (PQntuples(res_db) != 1 || PQnfields(res_db) != 1) - return false; - - dbname = PQgetvalue(res_db, 0, 0); - - /* Always backup all files from template0 database */ - if (strcmp(dbname, "template0") == 0) - { - PQclear(res_db); - return true; - } - PQclear(res_db); - - sprintf(params[0], "%i", dbOid); - sprintf(params[1], "%i", tblspcOid); - res = pgut_execute(backup_conn, "SELECT pg_catalog.pg_ptrack_get_and_clear_db($1, $2)", - 2, (const char **)params); - - if (PQnfields(res) != 1) - elog(ERROR, "cannot perform pg_ptrack_get_and_clear_db()"); - - if (!parse_bool(PQgetvalue(res, 0, 0), &result)) - elog(ERROR, - "result of pg_ptrack_get_and_clear_db() is invalid: %s", - PQgetvalue(res, 0, 0)); - - PQclear(res); - pfree(params[0]); - pfree(params[1]); - - return result; -} - -/* Read and clear ptrack files of the target relation. - * Result is a bytea ptrack map of all segments of the target relation. - * case 1: we know a tablespace_oid, db_oid, and rel_filenode - * case 2: we know db_oid and rel_filenode (no tablespace_oid, because file in pg_default) - * case 3: we know only rel_filenode (because file in pg_global) - */ -char * -pg_ptrack_get_and_clear(Oid tablespace_oid, Oid db_oid, Oid rel_filenode, - size_t *result_size, PGconn *backup_conn) -{ - PGconn *tmp_conn; - PGresult *res_db, - *res; - char *params[2]; - char *result; - char *val; - - params[0] = palloc(64); - params[1] = palloc(64); - - /* regular file (not in directory 'global') */ - if (db_oid != 0) - { - char *dbname; - - sprintf(params[0], "%i", db_oid); - res_db = pgut_execute(backup_conn, - "SELECT datname FROM pg_database WHERE oid=$1", - 1, (const char **) params); - /* - * If database is not found, it's not an error. - * It could have been deleted since previous backup. - */ - if (PQntuples(res_db) != 1 || PQnfields(res_db) != 1) - return NULL; - - dbname = PQgetvalue(res_db, 0, 0); - - if (strcmp(dbname, "template0") == 0) - { - PQclear(res_db); - return NULL; - } - - tmp_conn = pgut_connect(instance_config.conn_opt.pghost, instance_config.conn_opt.pgport, - dbname, - instance_config.conn_opt.pguser); - sprintf(params[0], "%i", tablespace_oid); - sprintf(params[1], "%i", rel_filenode); - res = pgut_execute(tmp_conn, "SELECT pg_catalog.pg_ptrack_get_and_clear($1, $2)", - 2, (const char **)params); - - if (PQnfields(res) != 1) - elog(ERROR, "cannot get ptrack file from database \"%s\" by tablespace oid %u and relation oid %u", - dbname, tablespace_oid, rel_filenode); - PQclear(res_db); - pgut_disconnect(tmp_conn); - } - /* file in directory 'global' */ - else - { - /* - * execute ptrack_get_and_clear for relation in pg_global - * Use backup_conn, cause we can do it from any database. - */ - sprintf(params[0], "%i", tablespace_oid); - sprintf(params[1], "%i", rel_filenode); - res = pgut_execute(backup_conn, "SELECT pg_catalog.pg_ptrack_get_and_clear($1, $2)", - 2, (const char **)params); - - if (PQnfields(res) != 1) - elog(ERROR, "cannot get ptrack file from pg_global tablespace and relation oid %u", - rel_filenode); - } - - val = PQgetvalue(res, 0, 0); - - /* TODO Now pg_ptrack_get_and_clear() returns bytea ending with \x. - * It should be fixed in future ptrack releases, but till then we - * can parse it. - */ - if (strcmp("x", val+1) == 0) - { - /* Ptrack file is missing */ - return NULL; - } - - result = (char *) PQunescapeBytea((unsigned char *) PQgetvalue(res, 0, 0), - result_size); - PQclear(res); - pfree(params[0]); - pfree(params[1]); - - return result; -} - /* * Get lsn of the moment when ptrack was enabled the last time. */ @@ -482,20 +151,14 @@ get_last_ptrack_lsn(PGconn *backup_conn, PGNodeInfo *nodeInfo) uint32 lsn_lo; XLogRecPtr lsn; - if (nodeInfo->ptrack_version_num < 200) - res = pgut_execute(backup_conn, "SELECT pg_catalog.pg_ptrack_control_lsn()", - 0, NULL); - else - { - char query[128]; + char query[128]; - if (nodeInfo->ptrack_version_num == 200) - sprintf(query, "SELECT %s.pg_ptrack_control_lsn()", nodeInfo->ptrack_schema); - else - sprintf(query, "SELECT %s.ptrack_init_lsn()", nodeInfo->ptrack_schema); + if (nodeInfo->ptrack_version_num == 200) + sprintf(query, "SELECT %s.pg_ptrack_control_lsn()", nodeInfo->ptrack_schema); + else + sprintf(query, "SELECT %s.ptrack_init_lsn()", nodeInfo->ptrack_schema); - res = pgut_execute(backup_conn, query, 0, NULL); - } + res = pgut_execute(backup_conn, query, 0, NULL); /* Extract timeline and LSN from results of pg_start_backup() */ XLogDataFromLSN(PQgetvalue(res, 0, 0), &lsn_hi, &lsn_lo); @@ -506,99 +169,6 @@ get_last_ptrack_lsn(PGconn *backup_conn, PGNodeInfo *nodeInfo) return lsn; } -char * -pg_ptrack_get_block(ConnectionArgs *arguments, - Oid dbOid, - Oid tblsOid, - Oid relOid, - BlockNumber blknum, - size_t *result_size, - int ptrack_version_num, - const char *ptrack_schema) -{ - PGresult *res; - char *params[4]; - char *result; - - params[0] = palloc(64); - params[1] = palloc(64); - params[2] = palloc(64); - params[3] = palloc(64); - - /* - * Use tmp_conn, since we may work in parallel threads. - * We can connect to any database. - */ - sprintf(params[0], "%i", tblsOid); - sprintf(params[1], "%i", dbOid); - sprintf(params[2], "%i", relOid); - sprintf(params[3], "%u", blknum); - - if (arguments->conn == NULL) - { - arguments->conn = pgut_connect(instance_config.conn_opt.pghost, - instance_config.conn_opt.pgport, - instance_config.conn_opt.pgdatabase, - instance_config.conn_opt.pguser); - } - - if (arguments->cancel_conn == NULL) - arguments->cancel_conn = PQgetCancel(arguments->conn); - - // elog(LOG, "db %i pg_ptrack_get_block(%i, %i, %u)",dbOid, tblsOid, relOid, blknum); - - if (ptrack_version_num < 200) - res = pgut_execute_parallel(arguments->conn, - arguments->cancel_conn, - "SELECT pg_catalog.pg_ptrack_get_block_2($1, $2, $3, $4)", - 4, (const char **)params, true, false, false); - else - { - char query[128]; - - /* sanity */ - if (!ptrack_schema) - elog(ERROR, "Schema name of ptrack extension is missing"); - - if (ptrack_version_num == 200) - sprintf(query, "SELECT %s.pg_ptrack_get_block($1, $2, $3, $4)", ptrack_schema); - else - elog(ERROR, "ptrack >= 2.1.0 does not support pg_ptrack_get_block()"); - // sprintf(query, "SELECT %s.ptrack_get_block($1, $2, $3, $4)", ptrack_schema); - - res = pgut_execute_parallel(arguments->conn, - arguments->cancel_conn, - query, 4, (const char **)params, - true, false, false); - } - - if (PQnfields(res) != 1) - { - elog(VERBOSE, "cannot get file block for relation oid %u", - relOid); - return NULL; - } - - if (PQgetisnull(res, 0, 0)) - { - elog(VERBOSE, "cannot get file block for relation oid %u", - relOid); - return NULL; - } - - result = (char *) PQunescapeBytea((unsigned char *) PQgetvalue(res, 0, 0), - result_size); - - PQclear(res); - - pfree(params[0]); - pfree(params[1]); - pfree(params[2]); - pfree(params[3]); - - return result; -} - /* ---------------------------- * Ptrack 2.* support functions * ---------------------------- diff --git a/src/utils/file.c b/src/utils/file.c index d40817aed..e9792dd9c 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -48,7 +48,6 @@ typedef struct size_t size; time_t mtime; bool is_datafile; - bool is_database; Oid tblspcOid; Oid dbOid; Oid relOid; @@ -2571,7 +2570,6 @@ fio_list_dir_internal(parray *files, const char *root, bool exclude, file->size = fio_file.size; file->mtime = fio_file.mtime; file->is_datafile = fio_file.is_datafile; - file->is_database = fio_file.is_database; file->tblspcOid = fio_file.tblspcOid; file->dbOid = fio_file.dbOid; file->relOid = fio_file.relOid; @@ -2645,7 +2643,6 @@ fio_list_dir_impl(int out, char* buf) fio_file.size = file->size; fio_file.mtime = file->mtime; fio_file.is_datafile = file->is_datafile; - fio_file.is_database = file->is_database; fio_file.tblspcOid = file->tblspcOid; fio_file.dbOid = file->dbOid; fio_file.relOid = file->relOid; diff --git a/tests/auth_test.py b/tests/auth_test.py index c84fdb981..78af21be9 100644 --- a/tests/auth_test.py +++ b/tests/auth_test.py @@ -190,9 +190,7 @@ def setUpClass(cls): "GRANT EXECUTE ON FUNCTION pg_switch_xlog() TO backup; " "GRANT EXECUTE ON FUNCTION txid_current() TO backup; " "GRANT EXECUTE ON FUNCTION txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION txid_snapshot_xmax(txid_snapshot) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_ptrack_clear() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_ptrack_get_and_clear(oid, oid) TO backup;") + "GRANT EXECUTE ON FUNCTION txid_snapshot_xmax(txid_snapshot) TO backup;") cls.pgpass_file = os.path.join(os.path.expanduser('~'), '.pgpass') @classmethod diff --git a/tests/backup.py b/tests/backup.py index 53790ad03..8c537dbc3 100644 --- a/tests/backup.py +++ b/tests/backup.py @@ -2106,6 +2106,7 @@ def test_backup_with_least_privileges_role(self): if self.ptrack: if node.major_version < 12: + # Reviewer, NB: skip this test in case of old ptrack? for fname in [ 'pg_catalog.oideq(oid, oid)', 'pg_catalog.ptrack_version()', diff --git a/tests/false_positive.py b/tests/false_positive.py index d4e7ccf0d..a101f8107 100644 --- a/tests/false_positive.py +++ b/tests/false_positive.py @@ -107,192 +107,6 @@ def test_incremental_backup_corrupt_full_1(self): # Clean after yourself self.del_test_dir(module_name, fname) - @unittest.expectedFailure - def test_ptrack_concurrent_get_and_clear_1(self): - """make node, make full and ptrack stream backups," - " restore them and check data correctness""" - - if not self.ptrack: - return unittest.skip('Skipped because ptrack support is disabled') - - if self.pg_config_version > self.version_to_num('11.0'): - return unittest.skip('You need PostgreSQL =< 11 for this test') - - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), - set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - "postgres", - "create table t_heap as select i" - " as id from generate_series(0,1) i" - ) - - self.backup_node(backup_dir, 'node', node, options=['--stream']) - gdb = self.backup_node( - backup_dir, 'node', node, backup_type='ptrack', - options=['--stream'], - gdb=True - ) - - gdb.set_breakpoint('make_pagemap_from_ptrack') - gdb.run_until_break() - - node.safe_psql( - "postgres", - "update t_heap set id = 100500") - - tablespace_oid = node.safe_psql( - "postgres", - "select oid from pg_tablespace where spcname = 'pg_default'").rstrip() - - relfilenode = node.safe_psql( - "postgres", - "select 't_heap'::regclass::oid").rstrip() - - node.safe_psql( - "postgres", - "SELECT pg_ptrack_get_and_clear({0}, {1})".format( - tablespace_oid, relfilenode)) - - gdb.continue_execution_until_exit() - - self.backup_node( - backup_dir, 'node', node, - backup_type='ptrack', options=['--stream'] - ) - if self.paranoia: - pgdata = self.pgdata_content(node.data_dir) - - result = node.safe_psql("postgres", "SELECT * FROM t_heap") - node.cleanup() - self.restore_node(backup_dir, 'node', node, options=["-j", "4"]) - - # Physical comparison - if self.paranoia: - pgdata_restored = self.pgdata_content( - node.data_dir, ignore_ptrack=False) - self.compare_pgdata(pgdata, pgdata_restored) - - node.slow_start() - # Logical comparison - self.assertEqual( - result, - node.safe_psql("postgres", "SELECT * FROM t_heap")) - - # Clean after yourself - self.del_test_dir(module_name, fname) - - @unittest.expectedFailure - def test_ptrack_concurrent_get_and_clear_2(self): - """make node, make full and ptrack stream backups," - " restore them and check data correctness""" - - if not self.ptrack: - return unittest.skip('Skipped because ptrack support is disabled') - - if self.pg_config_version > self.version_to_num('11.0'): - return unittest.skip('You need PostgreSQL =< 11 for this test') - - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), - set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - "postgres", - "create table t_heap as select i" - " as id from generate_series(0,1) i" - ) - - self.backup_node(backup_dir, 'node', node, options=['--stream']) - gdb = self.backup_node( - backup_dir, 'node', node, backup_type='ptrack', - options=['--stream'], - gdb=True - ) - - gdb.set_breakpoint('pthread_create') - gdb.run_until_break() - - node.safe_psql( - "postgres", - "update t_heap set id = 100500") - - tablespace_oid = node.safe_psql( - "postgres", - "select oid from pg_tablespace " - "where spcname = 'pg_default'").rstrip() - - relfilenode = node.safe_psql( - "postgres", - "select 't_heap'::regclass::oid").rstrip() - - node.safe_psql( - "postgres", - "SELECT pg_ptrack_get_and_clear({0}, {1})".format( - tablespace_oid, relfilenode)) - - gdb._execute("delete breakpoints") - gdb.continue_execution_until_exit() - - try: - self.backup_node( - backup_dir, 'node', node, - backup_type='ptrack', options=['--stream'] - ) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because of LSN mismatch from ptrack_control " - "and previous backup ptrack_lsn.\n" - " Output: {0} \n CMD: {1}".format(repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'ERROR: LSN from ptrack_control' in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - if self.paranoia: - pgdata = self.pgdata_content(node.data_dir) - - result = node.safe_psql("postgres", "SELECT * FROM t_heap") - node.cleanup() - self.restore_node(backup_dir, 'node', node, options=["-j", "4"]) - - # Physical comparison - if self.paranoia: - pgdata_restored = self.pgdata_content( - node.data_dir, ignore_ptrack=False) - self.compare_pgdata(pgdata, pgdata_restored) - - node.slow_start() - # Logical comparison - self.assertEqual( - result, - node.safe_psql("postgres", "SELECT * FROM t_heap") - ) - - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") @unittest.expectedFailure def test_pg_10_waldir(self): diff --git a/tests/ptrack.py b/tests/ptrack.py index 011f8754a..fb530a691 100644 --- a/tests/ptrack.py +++ b/tests/ptrack.py @@ -14,6 +14,10 @@ class PtrackTest(ProbackupTest, unittest.TestCase): + def setUp(self): + if self.pg_config_version < self.version_to_num('11.0'): + return unittest.skip('You need PostgreSQL >= 11 for this test') + self.fname = self.id().split('.')[3] # @unittest.skip("skip") def test_ptrack_stop_pg(self): @@ -22,10 +26,9 @@ def test_ptrack_stop_pg(self): restart node, check that ptrack backup can be taken """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) @@ -52,7 +55,7 @@ def test_ptrack_stop_pg(self): backup_type='ptrack', options=['--stream']) # Clean after yourself - self.del_test_dir(module_name, fname) + self.del_test_dir(module_name, self.fname) # @unittest.skip("skip") def test_ptrack_multi_timeline_backup(self): @@ -60,10 +63,9 @@ def test_ptrack_multi_timeline_backup(self): t2 /------P2 t1 ------F---*-----P1 """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) @@ -130,7 +132,7 @@ def test_ptrack_multi_timeline_backup(self): self.assertEqual('0', balance) # Clean after yourself - self.del_test_dir(module_name, fname) + self.del_test_dir(module_name, self.fname) # @unittest.skip("skip") def test_ptrack_multi_timeline_backup_1(self): @@ -142,10 +144,9 @@ def test_ptrack_multi_timeline_backup_1(self): t2 /------P2 t1 ---F--------* """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) @@ -206,17 +207,16 @@ def test_ptrack_multi_timeline_backup_1(self): self.assertEqual('0', balance) # Clean after yourself - self.del_test_dir(module_name, fname) + self.del_test_dir(module_name, self.fname) # @unittest.skip("skip") def test_ptrack_eat_my_data(self): """ PGPRO-4051 """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) @@ -236,7 +236,7 @@ def test_ptrack_eat_my_data(self): self.backup_node(backup_dir, 'node', node) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(module_name, self.fname, 'node_restored')) pgbench = node.pgbench(options=['-T', '300', '-c', '1', '--no-vacuum']) @@ -287,16 +287,15 @@ def test_ptrack_eat_my_data(self): 'Data loss') # Clean after yourself - self.del_test_dir(module_name, fname) + self.del_test_dir(module_name, self.fname) # @unittest.skip("skip") def test_ptrack_simple(self): """make node, make full and ptrack stream backups," " restore them and check data correctness""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) @@ -335,7 +334,7 @@ def test_ptrack_simple(self): result = node.safe_psql("postgres", "SELECT * FROM t_heap") node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node( @@ -358,15 +357,14 @@ def test_ptrack_simple(self): node_restored.safe_psql("postgres", "SELECT * FROM t_heap")) # Clean after yourself - self.del_test_dir(module_name, fname) + self.del_test_dir(module_name, self.fname) # @unittest.skip("skip") def test_ptrack_unprivileged(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) @@ -484,7 +482,8 @@ def test_ptrack_unprivileged(self): ) if node.major_version < 11: - fnames = [ + # Reviewer, NB: skip this test in case of old ptrack? + self.fnames = [ 'pg_catalog.oideq(oid, oid)', 'pg_catalog.ptrack_version()', 'pg_catalog.pg_ptrack_clear()', @@ -494,7 +493,7 @@ def test_ptrack_unprivileged(self): 'pg_catalog.pg_ptrack_get_block_2(oid, oid, oid, bigint)' ] - for fname in fnames: + for self.fname in self.fnames: node.safe_psql( "backupdb", "GRANT EXECUTE ON FUNCTION {0} TO backup".format(fname)) @@ -536,10 +535,9 @@ def test_ptrack_unprivileged(self): # @unittest.expectedFailure def test_ptrack_enable(self): """make ptrack without full backup, should result in error""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ 'checkpoint_timeout': '30s', @@ -577,7 +575,7 @@ def test_ptrack_enable(self): ) # Clean after yourself - self.del_test_dir(module_name, fname) + self.del_test_dir(module_name, self.fname) # @unittest.skip("skip") # @unittest.expectedFailure @@ -587,10 +585,9 @@ def test_ptrack_disable(self): enable ptrack, restart postgresql, take ptrack backup which should fail """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums'], @@ -650,15 +647,14 @@ def test_ptrack_disable(self): ) # Clean after yourself - self.del_test_dir(module_name, fname) + self.del_test_dir(module_name, self.fname) # @unittest.skip("skip") def test_ptrack_uncommitted_xact(self): """make ptrack backup while there is uncommitted open transaction""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums'], @@ -689,7 +685,7 @@ def test_ptrack_uncommitted_xact(self): pgdata = self.pgdata_content(node.data_dir) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node( @@ -710,16 +706,15 @@ def test_ptrack_uncommitted_xact(self): self.compare_pgdata(pgdata, pgdata_restored) # Clean after yourself - self.del_test_dir(module_name, fname) + self.del_test_dir(module_name, self.fname) # @unittest.skip("skip") def test_ptrack_vacuum_full(self): """make node, make full and ptrack stream backups, restore them and check data correctness""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) @@ -773,7 +768,7 @@ def test_ptrack_vacuum_full(self): process.join() node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(module_name, self.fname, 'node_restored')) node_restored.cleanup() old_tablespace = self.get_tblspace_path(node, 'somedata') @@ -797,7 +792,7 @@ def test_ptrack_vacuum_full(self): node_restored.slow_start() # Clean after yourself - self.del_test_dir(module_name, fname) + self.del_test_dir(module_name, self.fname) # @unittest.skip("skip") def test_ptrack_vacuum_truncate(self): @@ -805,10 +800,9 @@ def test_ptrack_vacuum_truncate(self): delete last 3 pages, vacuum relation, take ptrack backup, take second ptrack backup, restore last ptrack backup and check data correctness""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) @@ -856,7 +850,7 @@ def test_ptrack_vacuum_truncate(self): pgdata = self.pgdata_content(node.data_dir) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(module_name, self.fname, 'node_restored')) node_restored.cleanup() old_tablespace = self.get_tblspace_path(node, 'somedata') @@ -882,16 +876,17 @@ def test_ptrack_vacuum_truncate(self): node_restored.slow_start() # Clean after yourself - self.del_test_dir(module_name, fname) + self.del_test_dir(module_name, self.fname) # @unittest.skip("skip") def test_ptrack_get_block(self): - """make node, make full and ptrack stream backups," - " restore them and check data correctness""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + """ + make node, make full and ptrack stream backups, + restore them and check data correctness + """ + backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) @@ -900,11 +895,9 @@ def test_ptrack_get_block(self): self.add_instance(backup_dir, 'node', node) node.slow_start() - if node.major_version >= 11: - self.skipTest("skip --- we do not need ptrack_get_block for ptrack 2.*") - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") node.safe_psql( "postgres", @@ -917,10 +910,7 @@ def test_ptrack_get_block(self): options=['--stream'], gdb=True) - if node.major_version > 11: - gdb.set_breakpoint('make_pagemap_from_ptrack_2') - else: - gdb.set_breakpoint('make_pagemap_from_ptrack_1') + gdb.set_breakpoint('make_pagemap_from_ptrack_2') gdb.run_until_break() node.safe_psql( @@ -950,21 +940,18 @@ def test_ptrack_get_block(self): # Logical comparison self.assertEqual( result, - node.safe_psql("postgres", "SELECT * FROM t_heap") - ) + node.safe_psql("postgres", "SELECT * FROM t_heap")) # Clean after yourself - self.del_test_dir(module_name, fname) + self.del_test_dir(module_name, self.fname) # @unittest.skip("skip") def test_ptrack_stream(self): """make node, make full and ptrack stream backups, restore them and check data correctness""" - self.maxDiff = None - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums'], @@ -975,10 +962,9 @@ def test_ptrack_stream(self): self.add_instance(backup_dir, 'node', node) node.slow_start() - if node.major_version >= 11: - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") # FULL BACKUP node.safe_psql("postgres", "create sequence t_seq") @@ -1045,17 +1031,15 @@ def test_ptrack_stream(self): self.assertEqual(ptrack_result, ptrack_result_new) # Clean after yourself - self.del_test_dir(module_name, fname) + self.del_test_dir(module_name, self.fname) # @unittest.skip("skip") def test_ptrack_archive(self): """make archive node, make full and ptrack backups, check data correctness in restored instance""" - self.maxDiff = None - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums'], @@ -1067,10 +1051,9 @@ def test_ptrack_archive(self): self.set_archiving(backup_dir, 'node', node) node.slow_start() - if node.major_version >= 11: - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") # FULL BACKUP node.safe_psql( @@ -1158,20 +1141,18 @@ def test_ptrack_archive(self): node.cleanup() # Clean after yourself - self.del_test_dir(module_name, fname) + self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") + @unittest.skip("skip") def test_ptrack_pgpro417(self): - """Make node, take full backup, take ptrack backup, - delete ptrack backup. Try to take ptrack backup, - which should fail. Actual only for PTRACK 1.x""" - if self.pg_config_version > self.version_to_num('11.0'): - return unittest.skip('You need PostgreSQL =< 11 for this test') - - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + """ + Make node, take full backup, take ptrack backup, + delete ptrack backup. Try to take ptrack backup, + which should fail. Actual only for PTRACK 1.x + """ + backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums'], @@ -1238,22 +1219,18 @@ def test_ptrack_pgpro417(self): repr(e.message), self.cmd)) # Clean after yourself - self.del_test_dir(module_name, fname) + self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") + @unittest.skip("skip") def test_page_pgpro417(self): """ Make archive node, take full backup, take page backup, delete page backup. Try to take ptrack backup, which should fail. Actual only for PTRACK 1.x """ - if self.pg_config_version > self.version_to_num('11.0'): - return unittest.skip('You need PostgreSQL =< 11 for this test') - - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums'], @@ -1308,22 +1285,18 @@ def test_page_pgpro417(self): repr(e.message), self.cmd)) # Clean after yourself - self.del_test_dir(module_name, fname) + self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") + @unittest.skip("skip") def test_full_pgpro417(self): """ Make node, take two full backups, delete full second backup. Try to take ptrack backup, which should fail. Relevant only for PTRACK 1.x """ - if self.pg_config_version > self.version_to_num('11.0'): - return unittest.skip('You need PostgreSQL =< 11 for this test') - - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums'], @@ -1384,7 +1357,7 @@ def test_full_pgpro417(self): repr(e.message), self.cmd)) # Clean after yourself - self.del_test_dir(module_name, fname) + self.del_test_dir(module_name, self.fname) # @unittest.skip("skip") def test_create_db(self): @@ -1392,10 +1365,9 @@ def test_create_db(self): Make node, take full backup, create database db1, take ptrack backup, restore database and check it presense """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums'], @@ -1406,10 +1378,9 @@ def test_create_db(self): self.add_instance(backup_dir, 'node', node) node.slow_start() - if node.major_version >= 11: - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") # FULL BACKUP node.safe_psql( @@ -1439,7 +1410,7 @@ def test_create_db(self): # RESTORE node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node( @@ -1501,7 +1472,7 @@ def test_create_db(self): repr(e.message), self.cmd)) # Clean after yourself - self.del_test_dir(module_name, fname) + self.del_test_dir(module_name, self.fname) # @unittest.skip("skip") def test_create_db_on_replica(self): @@ -1511,10 +1482,9 @@ def test_create_db_on_replica(self): create database db1, take ptrack backup from replica, restore database and check it presense """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums'], @@ -1537,7 +1507,7 @@ def test_create_db_on_replica(self): "md5(i::text)::tsvector as tsvector from generate_series(0,100) i") replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(module_name, self.fname, 'replica')) replica.cleanup() self.backup_node( @@ -1590,7 +1560,7 @@ def test_create_db_on_replica(self): # RESTORE node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node( @@ -1604,16 +1574,15 @@ def test_create_db_on_replica(self): self.compare_pgdata(pgdata, pgdata_restored) # Clean after yourself - self.del_test_dir(module_name, fname) + self.del_test_dir(module_name, self.fname) # @unittest.skip("skip") def test_alter_table_set_tablespace_ptrack(self): """Make node, create tablespace with table, take full backup, alter tablespace location, take ptrack backup, restore database.""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums'], @@ -1661,7 +1630,7 @@ def test_alter_table_set_tablespace_ptrack(self): # RESTORE node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node( @@ -1696,17 +1665,16 @@ def test_alter_table_set_tablespace_ptrack(self): # self.assertEqual(result, result_new, 'lost some data after restore') # Clean after yourself - self.del_test_dir(module_name, fname) + self.del_test_dir(module_name, self.fname) # @unittest.skip("skip") def test_alter_database_set_tablespace_ptrack(self): """Make node, create tablespace with database," " take full backup, alter tablespace location," " take ptrack backup, restore database.""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums'], @@ -1744,7 +1712,7 @@ def test_alter_database_set_tablespace_ptrack(self): # RESTORE node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node( backup_dir, 'node', @@ -1766,7 +1734,7 @@ def test_alter_database_set_tablespace_ptrack(self): node_restored.slow_start() # Clean after yourself - self.del_test_dir(module_name, fname) + self.del_test_dir(module_name, self.fname) # @unittest.skip("skip") def test_drop_tablespace(self): @@ -1774,10 +1742,9 @@ def test_drop_tablespace(self): Make node, create table, alter table tablespace, take ptrack backup, move table from tablespace, take ptrack backup """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums'], @@ -1862,7 +1829,7 @@ def test_drop_tablespace(self): self.compare_pgdata(pgdata, pgdata_restored) # Clean after yourself - self.del_test_dir(module_name, fname) + self.del_test_dir(module_name, self.fname) # @unittest.skip("skip") def test_ptrack_alter_tablespace(self): @@ -1870,10 +1837,9 @@ def test_ptrack_alter_tablespace(self): Make node, create table, alter table tablespace, take ptrack backup, move table from tablespace, take ptrack backup """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums'], @@ -1920,7 +1886,7 @@ def test_ptrack_alter_tablespace(self): # Restore ptrack backup restored_node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'restored_node')) + base_dir=os.path.join(module_name, self.fname, 'restored_node')) restored_node.cleanup() tblspc_path_new = self.get_tblspace_path( restored_node, 'somedata_restored') @@ -1979,7 +1945,7 @@ def test_ptrack_alter_tablespace(self): self.assertEqual(result, result_new) # Clean after yourself - self.del_test_dir(module_name, fname) + self.del_test_dir(module_name, self.fname) # @unittest.skip("skip") def test_ptrack_multiple_segments(self): @@ -1987,10 +1953,9 @@ def test_ptrack_multiple_segments(self): Make node, create table, alter table tablespace, take ptrack backup, move table from tablespace, take ptrack backup """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums'], @@ -2056,7 +2021,7 @@ def test_ptrack_multiple_segments(self): # RESTORE NODE restored_node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'restored_node')) + base_dir=os.path.join(module_name, self.fname, 'restored_node')) restored_node.cleanup() tblspc_path = self.get_tblspace_path(node, 'somedata') tblspc_path_new = self.get_tblspace_path( @@ -2090,28 +2055,23 @@ def test_ptrack_multiple_segments(self): self.compare_pgdata(pgdata, pgdata_restored) # Clean after yourself - self.del_test_dir(module_name, fname) + self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") - # @unittest.expectedFailure + @unittest.skip("skip") def test_atexit_fail(self): """ Take backups of every available types and check that PTRACK is clean. Relevant only for PTRACK 1.x """ - if self.pg_config_version > self.version_to_num('11.0'): - return unittest.skip('You need PostgreSQL =< 11 for this test') - - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums'], pg_options={ 'max_connections': '15'}) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -2147,26 +2107,22 @@ def test_atexit_fail(self): "f") # Clean after yourself - self.del_test_dir(module_name, fname) + self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") + @unittest.skip("skip") # @unittest.expectedFailure def test_ptrack_clean(self): """ Take backups of every available types and check that PTRACK is clean Relevant only for PTRACK 1.x """ - if self.pg_config_version > self.version_to_num('11.0'): - return unittest.skip('You need PostgreSQL =< 11 for this test') - - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -2259,29 +2215,24 @@ def test_ptrack_clean(self): self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size']) # Clean after yourself - self.del_test_dir(module_name, fname) + self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") - # @unittest.expectedFailure + @unittest.skip("skip") def test_ptrack_clean_replica(self): """ Take backups of every available types from master and check that PTRACK on replica is clean. Relevant only for PTRACK 1.x """ - if self.pg_config_version > self.version_to_num('11.0'): - return unittest.skip('You need PostgreSQL =< 11 for this test') - - fname = self.id().split('.')[3] master = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'master'), + base_dir=os.path.join(module_name, self.fname, 'master'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums'], pg_options={ 'archive_timeout': '30s'}) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) master.slow_start() @@ -2289,7 +2240,7 @@ def test_ptrack_clean_replica(self): self.backup_node(backup_dir, 'master', master, options=['--stream']) replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(module_name, self.fname, 'replica')) replica.cleanup() self.restore_node(backup_dir, 'master', replica) @@ -2402,18 +2353,17 @@ def test_ptrack_clean_replica(self): self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size']) # Clean after yourself - self.del_test_dir(module_name, fname) + self.del_test_dir(module_name, self.fname) # @unittest.skip("skip") # @unittest.expectedFailure def test_ptrack_cluster_on_btree(self): - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -2467,18 +2417,17 @@ def test_ptrack_cluster_on_btree(self): self.check_ptrack_map_sanity(node, idx_ptrack) # Clean after yourself - self.del_test_dir(module_name, fname) + self.del_test_dir(module_name, self.fname) # @unittest.skip("skip") def test_ptrack_cluster_on_gist(self): - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -2540,18 +2489,17 @@ def test_ptrack_cluster_on_gist(self): self.compare_pgdata(pgdata, pgdata_restored) # Clean after yourself - self.del_test_dir(module_name, fname) + self.del_test_dir(module_name, self.fname) # @unittest.skip("skip") def test_ptrack_cluster_on_btree_replica(self): - fname = self.id().split('.')[3] master = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'master'), + base_dir=os.path.join(module_name, self.fname, 'master'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) master.slow_start() @@ -2564,7 +2512,7 @@ def test_ptrack_cluster_on_btree_replica(self): self.backup_node(backup_dir, 'master', master, options=['--stream']) replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(module_name, self.fname, 'replica')) replica.cleanup() self.restore_node(backup_dir, 'master', replica) @@ -2628,7 +2576,7 @@ def test_ptrack_cluster_on_btree_replica(self): pgdata = self.pgdata_content(replica.data_dir) node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node')) + base_dir=os.path.join(module_name, self.fname, 'node')) node.cleanup() self.restore_node(backup_dir, 'replica', node) @@ -2637,17 +2585,16 @@ def test_ptrack_cluster_on_btree_replica(self): self.compare_pgdata(pgdata, pgdata_restored) # Clean after yourself - self.del_test_dir(module_name, fname) + self.del_test_dir(module_name, self.fname) # @unittest.skip("skip") def test_ptrack_cluster_on_gist_replica(self): - fname = self.id().split('.')[3] master = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'master'), + base_dir=os.path.join(module_name, self.fname, 'master'), set_replication=True, ptrack_enable=True) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) master.slow_start() @@ -2660,7 +2607,7 @@ def test_ptrack_cluster_on_gist_replica(self): self.backup_node(backup_dir, 'master', master, options=['--stream']) replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(module_name, self.fname, 'replica')) replica.cleanup() self.restore_node(backup_dir, 'master', replica) @@ -2730,7 +2677,7 @@ def test_ptrack_cluster_on_gist_replica(self): pgdata = self.pgdata_content(replica.data_dir) node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node')) + base_dir=os.path.join(module_name, self.fname, 'node')) node.cleanup() self.restore_node(backup_dir, 'replica', node) @@ -2740,20 +2687,19 @@ def test_ptrack_cluster_on_gist_replica(self): self.compare_pgdata(pgdata, pgdata_restored) # Clean after yourself - self.del_test_dir(module_name, fname) + self.del_test_dir(module_name, self.fname) # @unittest.skip("skip") # @unittest.expectedFailure def test_ptrack_empty(self): """Take backups of every available types and check that PTRACK is clean""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -2792,7 +2738,7 @@ def test_ptrack_empty(self): node.safe_psql('postgres', 'checkpoint') node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(module_name, self.fname, 'node_restored')) node_restored.cleanup() tblspace1 = self.get_tblspace_path(node, 'somedata') @@ -2818,7 +2764,7 @@ def test_ptrack_empty(self): self.compare_pgdata(pgdata, pgdata_restored) # Clean after yourself - self.del_test_dir(module_name, fname) + self.del_test_dir(module_name, self.fname) # @unittest.skip("skip") # @unittest.expectedFailure @@ -2827,14 +2773,13 @@ def test_ptrack_empty_replica(self): Take backups of every available types from master and check that PTRACK on replica is clean """ - fname = self.id().split('.')[3] master = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'master'), + base_dir=os.path.join(module_name, self.fname, 'master'), set_replication=True, initdb_params=['--data-checksums'], ptrack_enable=True) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) master.slow_start() @@ -2847,7 +2792,7 @@ def test_ptrack_empty_replica(self): self.backup_node(backup_dir, 'master', master, options=['--stream']) replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(module_name, self.fname, 'replica')) replica.cleanup() self.restore_node(backup_dir, 'master', replica) @@ -2903,7 +2848,7 @@ def test_ptrack_empty_replica(self): pgdata = self.pgdata_content(replica.data_dir) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node( @@ -2915,19 +2860,18 @@ def test_ptrack_empty_replica(self): self.compare_pgdata(pgdata, pgdata_restored) # Clean after yourself - self.del_test_dir(module_name, fname) + self.del_test_dir(module_name, self.fname) # @unittest.skip("skip") # @unittest.expectedFailure def test_ptrack_truncate(self): - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -2998,13 +2942,12 @@ def test_ptrack_truncate(self): self.compare_pgdata(pgdata, pgdata_restored) # Clean after yourself - self.del_test_dir(module_name, fname) + self.del_test_dir(module_name, self.fname) # @unittest.skip("skip") def test_basic_ptrack_truncate_replica(self): - fname = self.id().split('.')[3] master = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'master'), + base_dir=os.path.join(module_name, self.fname, 'master'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums'], @@ -3013,7 +2956,7 @@ def test_basic_ptrack_truncate_replica(self): 'archive_timeout': '10s', 'checkpoint_timeout': '5min'}) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) master.slow_start() @@ -3026,7 +2969,7 @@ def test_basic_ptrack_truncate_replica(self): self.backup_node(backup_dir, 'master', master, options=['--stream']) replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(module_name, self.fname, 'replica')) replica.cleanup() self.restore_node(backup_dir, 'master', replica) @@ -3108,7 +3051,7 @@ def test_basic_ptrack_truncate_replica(self): pgdata = self.pgdata_content(replica.data_dir) node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node')) + base_dir=os.path.join(module_name, self.fname, 'node')) node.cleanup() self.restore_node(backup_dir, 'replica', node, data_dir=node.data_dir) @@ -3127,19 +3070,18 @@ def test_basic_ptrack_truncate_replica(self): 'select 1') # Clean after yourself - self.del_test_dir(module_name, fname) + self.del_test_dir(module_name, self.fname) # @unittest.skip("skip") # @unittest.expectedFailure def test_ptrack_vacuum(self): - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -3215,20 +3157,19 @@ def test_ptrack_vacuum(self): self.compare_pgdata(pgdata, pgdata_restored) # Clean after yourself - self.del_test_dir(module_name, fname) + self.del_test_dir(module_name, self.fname) # @unittest.skip("skip") def test_ptrack_vacuum_replica(self): - fname = self.id().split('.')[3] master = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'master'), + base_dir=os.path.join(module_name, self.fname, 'master'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums'], pg_options={ 'checkpoint_timeout': '30'}) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) master.slow_start() @@ -3241,7 +3182,7 @@ def test_ptrack_vacuum_replica(self): self.backup_node(backup_dir, 'master', master, options=['--stream']) replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(module_name, self.fname, 'replica')) replica.cleanup() self.restore_node(backup_dir, 'master', replica) @@ -3314,7 +3255,7 @@ def test_ptrack_vacuum_replica(self): pgdata = self.pgdata_content(replica.data_dir) node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node')) + base_dir=os.path.join(module_name, self.fname, 'node')) node.cleanup() self.restore_node(backup_dir, 'replica', node, data_dir=node.data_dir) @@ -3323,19 +3264,18 @@ def test_ptrack_vacuum_replica(self): self.compare_pgdata(pgdata, pgdata_restored) # Clean after yourself - self.del_test_dir(module_name, fname) + self.del_test_dir(module_name, self.fname) # @unittest.skip("skip") # @unittest.expectedFailure def test_ptrack_vacuum_bits_frozen(self): - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -3403,18 +3343,17 @@ def test_ptrack_vacuum_bits_frozen(self): self.compare_pgdata(pgdata, pgdata_restored) # Clean after yourself - self.del_test_dir(module_name, fname) + self.del_test_dir(module_name, self.fname) # @unittest.skip("skip") def test_ptrack_vacuum_bits_frozen_replica(self): - fname = self.id().split('.')[3] master = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'master'), + base_dir=os.path.join(module_name, self.fname, 'master'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) master.slow_start() @@ -3427,7 +3366,7 @@ def test_ptrack_vacuum_bits_frozen_replica(self): self.backup_node(backup_dir, 'master', master, options=['--stream']) replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(module_name, self.fname, 'replica')) replica.cleanup() self.restore_node(backup_dir, 'master', replica) @@ -3502,19 +3441,18 @@ def test_ptrack_vacuum_bits_frozen_replica(self): self.compare_pgdata(pgdata, pgdata_restored) # Clean after yourself - self.del_test_dir(module_name, fname) + self.del_test_dir(module_name, self.fname) # @unittest.skip("skip") # @unittest.expectedFailure def test_ptrack_vacuum_bits_visibility(self): - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -3582,18 +3520,17 @@ def test_ptrack_vacuum_bits_visibility(self): self.compare_pgdata(pgdata, pgdata_restored) # Clean after yourself - self.del_test_dir(module_name, fname) + self.del_test_dir(module_name, self.fname) # @unittest.skip("skip") # @unittest.expectedFailure def test_ptrack_vacuum_full(self): - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -3661,19 +3598,18 @@ def test_ptrack_vacuum_full(self): self.compare_pgdata(pgdata, pgdata_restored) # Clean after yourself - self.del_test_dir(module_name, fname) + self.del_test_dir(module_name, self.fname) # @unittest.skip("skip") # @unittest.expectedFailure def test_ptrack_vacuum_full_replica(self): - fname = self.id().split('.')[3] master = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'master'), + base_dir=os.path.join(module_name, self.fname, 'master'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) master.slow_start() @@ -3685,7 +3621,7 @@ def test_ptrack_vacuum_full_replica(self): self.backup_node(backup_dir, 'master', master, options=['--stream']) replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(module_name, self.fname, 'replica')) replica.cleanup() self.restore_node(backup_dir, 'master', replica) @@ -3763,19 +3699,18 @@ def test_ptrack_vacuum_full_replica(self): self.compare_pgdata(pgdata, pgdata_restored) # Clean after yourself - self.del_test_dir(module_name, fname) + self.del_test_dir(module_name, self.fname) # @unittest.skip("skip") # @unittest.expectedFailure def test_ptrack_vacuum_truncate(self): - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -3832,7 +3767,7 @@ def test_ptrack_vacuum_truncate(self): pgdata = self.pgdata_content(node.data_dir) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node(backup_dir, 'node', node_restored) @@ -3841,19 +3776,18 @@ def test_ptrack_vacuum_truncate(self): self.compare_pgdata(pgdata, pgdata_restored) # Clean after yourself - self.del_test_dir(module_name, fname) + self.del_test_dir(module_name, self.fname) # @unittest.skip("skip") # @unittest.expectedFailure def test_ptrack_vacuum_truncate_replica(self): - fname = self.id().split('.')[3] master = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'master'), + base_dir=os.path.join(module_name, self.fname, 'master'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) master.slow_start() @@ -3866,7 +3800,7 @@ def test_ptrack_vacuum_truncate_replica(self): self.backup_node(backup_dir, 'master', master, options=['--stream']) replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(module_name, self.fname, 'replica')) replica.cleanup() self.restore_node(backup_dir, 'master', replica) @@ -3938,7 +3872,7 @@ def test_ptrack_vacuum_truncate_replica(self): pgdata = self.pgdata_content(replica.data_dir) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node(backup_dir, 'replica', node_restored) @@ -3947,22 +3881,21 @@ def test_ptrack_vacuum_truncate_replica(self): self.compare_pgdata(pgdata, pgdata_restored) # Clean after yourself - self.del_test_dir(module_name, fname) + self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") - # @unittest.expectedFailure + @unittest.skip("skip") def test_ptrack_recovery(self): - if self.pg_config_version > self.version_to_num('11.0'): - return unittest.skip('You need PostgreSQL =< 11 for this test') - - fname = self.id().split('.')[3] + """ + Check that ptrack map contain correct bits after recovery. + Actual only for PTRACK 1.x + """ node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -4009,17 +3942,13 @@ def test_ptrack_recovery(self): self.check_ptrack_recovery(idx_ptrack[i]) # Clean after yourself - self.del_test_dir(module_name, fname) + self.del_test_dir(module_name, self.fname) # @unittest.skip("skip") # @unittest.expectedFailure def test_ptrack_recovery_1(self): - if self.pg_config_version < self.version_to_num('12.0'): - return unittest.skip('You need PostgreSQL >= 12 for this test') - - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums'], @@ -4027,7 +3956,7 @@ def test_ptrack_recovery_1(self): 'shared_buffers': '512MB', 'max_wal_size': '3GB'}) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -4067,9 +3996,9 @@ def test_ptrack_recovery_1(self): 'postgres', "create extension pg_buffercache") - print(node.safe_psql( - 'postgres', - "SELECT count(*) FROM pg_buffercache WHERE isdirty")) + #print(node.safe_psql( + # 'postgres', + # "SELECT count(*) FROM pg_buffercache WHERE isdirty")) if self.verbose: print('Killing postmaster. Losing Ptrack changes') @@ -4088,7 +4017,7 @@ def test_ptrack_recovery_1(self): pgdata = self.pgdata_content(node.data_dir) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node( @@ -4098,19 +4027,18 @@ def test_ptrack_recovery_1(self): self.compare_pgdata(pgdata, pgdata_restored) # Clean after yourself - self.del_test_dir(module_name, fname) + self.del_test_dir(module_name, self.fname) # @unittest.skip("skip") # @unittest.expectedFailure def test_ptrack_zero_changes(self): - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -4144,14 +4072,13 @@ def test_ptrack_zero_changes(self): self.compare_pgdata(pgdata, pgdata_restored) # Clean after yourself - self.del_test_dir(module_name, fname) + self.del_test_dir(module_name, self.fname) # @unittest.skip("skip") # @unittest.expectedFailure def test_ptrack_pg_resetxlog(self): - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums'], @@ -4159,7 +4086,7 @@ def test_ptrack_pg_resetxlog(self): 'shared_buffers': '512MB', 'max_wal_size': '3GB'}) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -4259,7 +4186,7 @@ def test_ptrack_pg_resetxlog(self): # pgdata = self.pgdata_content(node.data_dir) # # node_restored = self.make_simple_node( -# base_dir=os.path.join(module_name, fname, 'node_restored')) +# base_dir=os.path.join(module_name, self.fname, 'node_restored')) # node_restored.cleanup() # # self.restore_node( @@ -4269,23 +4196,18 @@ def test_ptrack_pg_resetxlog(self): # self.compare_pgdata(pgdata, pgdata_restored) # Clean after yourself - self.del_test_dir(module_name, fname) + self.del_test_dir(module_name, self.fname) # @unittest.skip("skip") # @unittest.expectedFailure def test_corrupt_ptrack_map(self): - - if self.pg_config_version < self.version_to_num('12.0'): - return unittest.skip('You need PostgreSQL >= 12 for this test') - - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -4388,11 +4310,8 @@ def test_corrupt_ptrack_map(self): node.stop(['-m', 'immediate', '-D', node.data_dir]) self.set_auto_conf(node, {'ptrack.map_size': '32', 'shared_preload_libraries': 'ptrack'}) - node.slow_start() - sleep(1) - try: self.backup_node( backup_dir, 'node', node, @@ -4410,8 +4329,6 @@ def test_corrupt_ptrack_map(self): '\n Unexpected Error Message: {0}\n' ' CMD: {1}'.format(repr(e.message), self.cmd)) - sleep(1) - self.backup_node( backup_dir, 'node', node, backup_type='delta', options=['--stream']) @@ -4435,26 +4352,21 @@ def test_corrupt_ptrack_map(self): self.compare_pgdata(pgdata, pgdata_restored) # Clean after yourself - self.del_test_dir(module_name, fname) + self.del_test_dir(module_name, self.fname) # @unittest.skip("skip") def test_horizon_lsn_ptrack(self): """ https://github.com/postgrespro/pg_probackup/pull/386 """ - - if self.pg_config_version < self.version_to_num('11.0'): - return unittest.skip("You need PostgreSQL >= 11 for this test") - self.assertLessEqual( self.version_to_num(self.old_probackup_version), self.version_to_num('2.4.15'), 'You need pg_probackup old_binary =< 2.4.15 for this test') - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) @@ -4510,4 +4422,4 @@ def test_horizon_lsn_ptrack(self): self.assertEqual(delta_bytes, ptrack_bytes) # Clean after yourself - self.del_test_dir(module_name, fname) + self.del_test_dir(module_name, self.fname) diff --git a/tests/restore.py b/tests/restore.py index 8ccffa44c..4e4b5c926 100644 --- a/tests/restore.py +++ b/tests/restore.py @@ -3300,6 +3300,7 @@ def test_missing_database_map(self): if self.ptrack: fnames = [] if node.major_version < 12: + # Reviewer, NB: skip this test in case of old ptrack? fnames += [ 'pg_catalog.oideq(oid, oid)', 'pg_catalog.ptrack_version()', @@ -3314,7 +3315,6 @@ def test_missing_database_map(self): # fnames += [ # 'pg_ptrack_get_pagemapset(pg_lsn)', # 'pg_ptrack_control_lsn()', -# 'pg_ptrack_get_block(oid, oid, oid, bigint)' # ] node.safe_psql( "backupdb", From 7c3f49a7eb992d1de510b549206116b6d9e33bcf Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Thu, 17 Jun 2021 19:28:48 +0300 Subject: [PATCH 161/525] travis: run on every branch, do not tolerate job failures --- .travis.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.travis.yml b/.travis.yml index 457835d3c..eb01d7ae4 100644 --- a/.travis.yml +++ b/.travis.yml @@ -42,11 +42,11 @@ env: - PG_VERSION=9.6 PG_BRANCH=REL9_6_STABLE - PG_VERSION=9.5 PG_BRANCH=REL9_5_STABLE -jobs: - allow_failures: - - if: env(MODE) IN (archive, backup, delta, locking, merge, replica, retention, restore) +#jobs: +# allow_failures: +# - if: env(MODE) IN (archive, backup, delta, locking, merge, replica, retention, restore) # Only run CI for master branch commits to limit our travis usage -branches: - only: - - master +#branches: +# only: +# - master From 4c27a93e8dd8a442afc350fcc70ced375bfada48 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Thu, 17 Jun 2021 19:44:08 +0300 Subject: [PATCH 162/525] tests: minor fix in ptrack.py --- tests/ptrack.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/ptrack.py b/tests/ptrack.py index fb530a691..611c6c8b0 100644 --- a/tests/ptrack.py +++ b/tests/ptrack.py @@ -483,7 +483,7 @@ def test_ptrack_unprivileged(self): if node.major_version < 11: # Reviewer, NB: skip this test in case of old ptrack? - self.fnames = [ + fnames = [ 'pg_catalog.oideq(oid, oid)', 'pg_catalog.ptrack_version()', 'pg_catalog.pg_ptrack_clear()', @@ -493,7 +493,7 @@ def test_ptrack_unprivileged(self): 'pg_catalog.pg_ptrack_get_block_2(oid, oid, oid, bigint)' ] - for self.fname in self.fnames: + for fname in fnames: node.safe_psql( "backupdb", "GRANT EXECUTE ON FUNCTION {0} TO backup".format(fname)) From 5f54e623c957f0798be1763a9a6b414155600398 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Thu, 17 Jun 2021 21:23:34 +0300 Subject: [PATCH 163/525] travis: start sshd in container --- travis/run_tests.sh | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/travis/run_tests.sh b/travis/run_tests.sh index 1bb3a6fde..9aa49bae4 100755 --- a/travis/run_tests.sh +++ b/travis/run_tests.sh @@ -4,6 +4,13 @@ # Copyright (c) 2019-2020, Postgres Professional # +sudo su -c 'apt-get update -y' +sudo su -c 'apt-get install openssh-client openssh-server -y' +/etc/init.d/ssh start + +ssh-keygen -t rsa -q -N "" +cat ~/.ssh/id_rsa.pub > ~/.ssh/authorized_keys +ssh-keyscan -H localhost >> ~/.ssh/known_hosts PG_SRC=$PWD/postgres From 88bea549d33b6837275efb95027ad67e57bf1a25 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Thu, 17 Jun 2021 21:31:30 +0300 Subject: [PATCH 164/525] travis: move to python3 --- travis/Dockerfile.in | 4 ++-- travis/run_tests.sh | 10 +++++----- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/travis/Dockerfile.in b/travis/Dockerfile.in index a3c858ee2..a1f30d7f6 100644 --- a/travis/Dockerfile.in +++ b/travis/Dockerfile.in @@ -2,11 +2,11 @@ FROM ololobus/postgres-dev:stretch USER root RUN apt-get update -RUN apt-get -yq install python python-pip +RUN apt-get -yq install python3 python3-pip # RUN curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py # RUN python2 get-pip.py -RUN python2 -m pip install virtualenv +RUN python3 -m pip install virtualenv # Environment ENV PG_MAJOR=${PG_VERSION} PG_BRANCH=${PG_BRANCH} diff --git a/travis/run_tests.sh b/travis/run_tests.sh index 9aa49bae4..149111c60 100755 --- a/travis/run_tests.sh +++ b/travis/run_tests.sh @@ -67,17 +67,17 @@ make USE_PGXS=1 top_srcdir=$PG_SRC install # Setup python environment echo "############### Setting up python env:" -python2 -m virtualenv pyenv +python3 -m virtualenv pyenv source pyenv/bin/activate -pip install testgres==1.8.2 +pip3 install testgres echo "############### Testing:" if [ "$MODE" = "basic" ]; then export PG_PROBACKUP_TEST_BASIC=ON - python -m unittest -v tests - python -m unittest -v tests.init + python3 -m unittest -v tests + python3 -m unittest -v tests.init else - python -m unittest -v tests.$MODE + python3 -m unittest -v tests.$MODE fi # Generate *.gcov files From 365c959f58e1fb509019b2bd7400bf1f6fd3f9eb Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Thu, 17 Jun 2021 21:40:46 +0300 Subject: [PATCH 165/525] travis: some more fixes --- travis/run_tests.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/travis/run_tests.sh b/travis/run_tests.sh index 149111c60..b985208ab 100755 --- a/travis/run_tests.sh +++ b/travis/run_tests.sh @@ -3,7 +3,9 @@ # # Copyright (c) 2019-2020, Postgres Professional # +set -xe +sudo su -c 'mkdir /run/sshd' sudo su -c 'apt-get update -y' sudo su -c 'apt-get install openssh-client openssh-server -y' /etc/init.d/ssh start From 423ffcaf38e272b689c2a9a360db375a98f49f72 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Thu, 17 Jun 2021 21:23:34 +0300 Subject: [PATCH 166/525] travis: start sshd in container --- travis/run_tests.sh | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/travis/run_tests.sh b/travis/run_tests.sh index 1bb3a6fde..9aa49bae4 100755 --- a/travis/run_tests.sh +++ b/travis/run_tests.sh @@ -4,6 +4,13 @@ # Copyright (c) 2019-2020, Postgres Professional # +sudo su -c 'apt-get update -y' +sudo su -c 'apt-get install openssh-client openssh-server -y' +/etc/init.d/ssh start + +ssh-keygen -t rsa -q -N "" +cat ~/.ssh/id_rsa.pub > ~/.ssh/authorized_keys +ssh-keyscan -H localhost >> ~/.ssh/known_hosts PG_SRC=$PWD/postgres From 6506eade264700c2525e8cbf920b8322b584e6ba Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Thu, 17 Jun 2021 21:31:30 +0300 Subject: [PATCH 167/525] travis: move to python3 --- travis/Dockerfile.in | 4 ++-- travis/run_tests.sh | 10 +++++----- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/travis/Dockerfile.in b/travis/Dockerfile.in index a3c858ee2..a1f30d7f6 100644 --- a/travis/Dockerfile.in +++ b/travis/Dockerfile.in @@ -2,11 +2,11 @@ FROM ololobus/postgres-dev:stretch USER root RUN apt-get update -RUN apt-get -yq install python python-pip +RUN apt-get -yq install python3 python3-pip # RUN curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py # RUN python2 get-pip.py -RUN python2 -m pip install virtualenv +RUN python3 -m pip install virtualenv # Environment ENV PG_MAJOR=${PG_VERSION} PG_BRANCH=${PG_BRANCH} diff --git a/travis/run_tests.sh b/travis/run_tests.sh index 9aa49bae4..149111c60 100755 --- a/travis/run_tests.sh +++ b/travis/run_tests.sh @@ -67,17 +67,17 @@ make USE_PGXS=1 top_srcdir=$PG_SRC install # Setup python environment echo "############### Setting up python env:" -python2 -m virtualenv pyenv +python3 -m virtualenv pyenv source pyenv/bin/activate -pip install testgres==1.8.2 +pip3 install testgres echo "############### Testing:" if [ "$MODE" = "basic" ]; then export PG_PROBACKUP_TEST_BASIC=ON - python -m unittest -v tests - python -m unittest -v tests.init + python3 -m unittest -v tests + python3 -m unittest -v tests.init else - python -m unittest -v tests.$MODE + python3 -m unittest -v tests.$MODE fi # Generate *.gcov files From 318cb6e5b84cf0d16c828a6a9cc97dca48ad6dab Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Thu, 17 Jun 2021 21:40:46 +0300 Subject: [PATCH 168/525] travis: some more fixes --- travis/run_tests.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/travis/run_tests.sh b/travis/run_tests.sh index 149111c60..b985208ab 100755 --- a/travis/run_tests.sh +++ b/travis/run_tests.sh @@ -3,7 +3,9 @@ # # Copyright (c) 2019-2020, Postgres Professional # +set -xe +sudo su -c 'mkdir /run/sshd' sudo su -c 'apt-get update -y' sudo su -c 'apt-get install openssh-client openssh-server -y' /etc/init.d/ssh start From cf8fb8c9deea49d045e756f551bf7d33f6d1986e Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Thu, 17 Jun 2021 21:59:04 +0300 Subject: [PATCH 169/525] travis: run only smoke suit --- .travis.yml | 21 +++++++++++---------- travis/run_tests.sh | 4 ++-- 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/.travis.yml b/.travis.yml index eb01d7ae4..462534a7f 100644 --- a/.travis.yml +++ b/.travis.yml @@ -26,21 +26,22 @@ notifications: # Default MODE is basic, i.e. all tests with PG_PROBACKUP_TEST_BASIC=ON env: + - PG_VERSION=12 PG_BRANCH=REL_13_STABLE - PG_VERSION=12 PG_BRANCH=REL_12_STABLE - - PG_VERSION=12 PG_BRANCH=REL_12_STABLE MODE=archive - - PG_VERSION=12 PG_BRANCH=REL_12_STABLE MODE=backup - - PG_VERSION=12 PG_BRANCH=REL_12_STABLE MODE=compression - - PG_VERSION=12 PG_BRANCH=REL_12_STABLE MODE=delta - - PG_VERSION=12 PG_BRANCH=REL_12_STABLE MODE=locking - - PG_VERSION=12 PG_BRANCH=REL_12_STABLE MODE=merge - - PG_VERSION=12 PG_BRANCH=REL_12_STABLE MODE=page - - PG_VERSION=12 PG_BRANCH=REL_12_STABLE MODE=replica - - PG_VERSION=12 PG_BRANCH=REL_12_STABLE MODE=retention - - PG_VERSION=12 PG_BRANCH=REL_12_STABLE MODE=restore - PG_VERSION=11 PG_BRANCH=REL_11_STABLE - PG_VERSION=10 PG_BRANCH=REL_10_STABLE - PG_VERSION=9.6 PG_BRANCH=REL9_6_STABLE - PG_VERSION=9.5 PG_BRANCH=REL9_5_STABLE +# - PG_VERSION=12 PG_BRANCH=REL_12_STABLE MODE=archive +# - PG_VERSION=12 PG_BRANCH=REL_12_STABLE MODE=backup +# - PG_VERSION=12 PG_BRANCH=REL_12_STABLE MODE=compression +# - PG_VERSION=12 PG_BRANCH=REL_12_STABLE MODE=delta +# - PG_VERSION=12 PG_BRANCH=REL_12_STABLE MODE=locking +# - PG_VERSION=12 PG_BRANCH=REL_12_STABLE MODE=merge +# - PG_VERSION=12 PG_BRANCH=REL_12_STABLE MODE=page +# - PG_VERSION=12 PG_BRANCH=REL_12_STABLE MODE=replica +# - PG_VERSION=12 PG_BRANCH=REL_12_STABLE MODE=retention +# - PG_VERSION=12 PG_BRANCH=REL_12_STABLE MODE=restore #jobs: # allow_failures: diff --git a/travis/run_tests.sh b/travis/run_tests.sh index b985208ab..748a06d59 100755 --- a/travis/run_tests.sh +++ b/travis/run_tests.sh @@ -8,9 +8,9 @@ set -xe sudo su -c 'mkdir /run/sshd' sudo su -c 'apt-get update -y' sudo su -c 'apt-get install openssh-client openssh-server -y' -/etc/init.d/ssh start +sudo su -c '/etc/init.d/ssh start' -ssh-keygen -t rsa -q -N "" +ssh-keygen -t rsa -f ~/.ssh/id_rsa -q -N "" cat ~/.ssh/id_rsa.pub > ~/.ssh/authorized_keys ssh-keyscan -H localhost >> ~/.ssh/known_hosts From dbbfee4a635a5688521a9d35398d7b2801a9fa50 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Thu, 17 Jun 2021 22:29:55 +0300 Subject: [PATCH 170/525] tests: disable ptrack testing for PG < 11 --- tests/helpers/ptrack_helpers.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index 0a87eb1b6..5eeeeaa91 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -312,6 +312,12 @@ def __init__(self, *args, **kwargs): os.environ["PGAPPNAME"] = "pg_probackup" + if self.ptrack: + self.assertGreaterEqual( + self.pg_config_version, + self.version_to_num('11.0'), + "ptrack testing require PostgreSQL >= 11") + @property def pg_config_version(self): return self.version_to_num( From d65ae6ccbdbe98cde690c88f64e87f47fc50c775 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Fri, 18 Jun 2021 01:15:43 +0300 Subject: [PATCH 171/525] travis: build only required PG objects to speed up test --- travis/run_tests.sh | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/travis/run_tests.sh b/travis/run_tests.sh index 748a06d59..fd5779e1f 100755 --- a/travis/run_tests.sh +++ b/travis/run_tests.sh @@ -36,7 +36,10 @@ git clone https://github.com/postgres/postgres.git -b $PG_BRANCH --depth=1 echo "############### Compiling Postgres:" cd postgres # Go to postgres dir ./configure --prefix=$PGHOME --enable-debug --enable-cassert --enable-depend --enable-tap-tests -make -s -j$(nproc) install +#make -s -j$(nproc) install +make -s -j$(nproc) -C 'src/common' install +make -s -j$(nproc) -C 'src/port' install +make -s -j$(nproc) -C 'src/interfaces' install make -s -j$(nproc) -C contrib/ install # Override default Postgres instance From 469c5a1736cf909d1b92ff07b55954cca0379af4 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Fri, 18 Jun 2021 10:38:15 +0300 Subject: [PATCH 172/525] travis: some more fixes --- .travis.yml | 2 +- travis/run_tests.sh | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.travis.yml b/.travis.yml index 462534a7f..c66cf6439 100644 --- a/.travis.yml +++ b/.travis.yml @@ -26,7 +26,7 @@ notifications: # Default MODE is basic, i.e. all tests with PG_PROBACKUP_TEST_BASIC=ON env: - - PG_VERSION=12 PG_BRANCH=REL_13_STABLE + - PG_VERSION=13 PG_BRANCH=REL_13_STABLE - PG_VERSION=12 PG_BRANCH=REL_12_STABLE - PG_VERSION=11 PG_BRANCH=REL_11_STABLE - PG_VERSION=10 PG_BRANCH=REL_10_STABLE diff --git a/travis/run_tests.sh b/travis/run_tests.sh index fd5779e1f..635b9f422 100755 --- a/travis/run_tests.sh +++ b/travis/run_tests.sh @@ -36,10 +36,10 @@ git clone https://github.com/postgres/postgres.git -b $PG_BRANCH --depth=1 echo "############### Compiling Postgres:" cd postgres # Go to postgres dir ./configure --prefix=$PGHOME --enable-debug --enable-cassert --enable-depend --enable-tap-tests -#make -s -j$(nproc) install -make -s -j$(nproc) -C 'src/common' install -make -s -j$(nproc) -C 'src/port' install -make -s -j$(nproc) -C 'src/interfaces' install +make -s -j$(nproc) install +#make -s -j$(nproc) -C 'src/common' install +#make -s -j$(nproc) -C 'src/port' install +#make -s -j$(nproc) -C 'src/interfaces' install make -s -j$(nproc) -C contrib/ install # Override default Postgres instance From 7ca590c6cc39df44e2e73d3e363447aca24a0fef Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Fri, 18 Jun 2021 12:12:37 +0300 Subject: [PATCH 173/525] tests: drop ptrack1 suppport --- tests/backup.py | 231 +++---------------- tests/cfs_backup.py | 7 +- tests/compatibility.py | 7 +- tests/delete.py | 7 +- tests/helpers/ptrack_helpers.py | 10 +- tests/merge.py | 7 +- tests/ptrack.py | 381 +++++++++++++++++++------------- tests/restore.py | 68 ++---- 8 files changed, 296 insertions(+), 422 deletions(-) diff --git a/tests/backup.py b/tests/backup.py index 8c537dbc3..45fd137eb 100644 --- a/tests/backup.py +++ b/tests/backup.py @@ -18,9 +18,6 @@ class BackupTest(ProbackupTest, unittest.TestCase): # PGPRO-707 def test_backup_modes_archive(self): """standart backup modes with ARCHIVE WAL method""" - if not self.ptrack: - return unittest.skip('Skipped because ptrack support is disabled') - fname = self.id().split('.')[3] node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), @@ -33,12 +30,7 @@ def test_backup_modes_archive(self): self.set_archiving(backup_dir, 'node', node) node.slow_start() - if node.major_version >= 12: - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") - - backup_id = self.backup_node(backup_dir, 'node', node) + full_backup_id = self.backup_node(backup_dir, 'node', node) show_backup = self.show_pb(backup_dir, 'node')[0] self.assertEqual(show_backup['status'], "OK") @@ -47,7 +39,7 @@ def test_backup_modes_archive(self): # postmaster.pid and postmaster.opts shouldn't be copied excluded = True db_dir = os.path.join( - backup_dir, "backups", 'node', backup_id, "database") + backup_dir, "backups", 'node', full_backup_id, "database") for f in os.listdir(db_dir): if ( @@ -64,31 +56,30 @@ def test_backup_modes_archive(self): page_backup_id = self.backup_node( backup_dir, 'node', node, backup_type="page") - # print self.show_pb(node) - show_backup = self.show_pb(backup_dir, 'node')[1] + show_backup_1 = self.show_pb(backup_dir, 'node')[1] self.assertEqual(show_backup['status'], "OK") self.assertEqual(show_backup['backup-mode'], "PAGE") + # delta backup mode + delta_backup_id = self.backup_node( + backup_dir, 'node', node, backup_type="delta") + + show_backup_2 = self.show_pb(backup_dir, 'node')[2] + self.assertEqual(show_backup['status'], "OK") + self.assertEqual(show_backup['backup-mode'], "DELTA") + # Check parent backup self.assertEqual( - backup_id, + full_backup_id, self.show_pb( backup_dir, 'node', - backup_id=show_backup['id'])["parent-backup-id"]) - - # ptrack backup mode - self.backup_node(backup_dir, 'node', node, backup_type="ptrack") + backup_id=show_backup_1['id'])["parent-backup-id"]) - show_backup = self.show_pb(backup_dir, 'node')[2] - self.assertEqual(show_backup['status'], "OK") - self.assertEqual(show_backup['backup-mode'], "PTRACK") - - # Check parent backup self.assertEqual( page_backup_id, self.show_pb( backup_dir, 'node', - backup_id=show_backup['id'])["parent-backup-id"]) + backup_id=show_backup_2['id'])["parent-backup-id"]) # Clean after yourself self.del_test_dir(module_name, fname) @@ -118,10 +109,7 @@ def test_smooth_checkpoint(self): # @unittest.skip("skip") def test_incremental_backup_without_full(self): - """page-level backup without validated full backup""" - if not self.ptrack: - return unittest.skip('Skipped because ptrack support is disabled') - + """page backup without validated full backup""" fname = self.id().split('.')[3] node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), @@ -134,11 +122,6 @@ def test_incremental_backup_without_full(self): self.set_archiving(backup_dir, 'node', node) node.slow_start() - if node.major_version >= 12: - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") - try: self.backup_node(backup_dir, 'node', node, backup_type="page") # we should die here because exception is what we expect to happen @@ -154,29 +137,10 @@ def test_incremental_backup_without_full(self): "\n Unexpected Error Message: {0}\n CMD: {1}".format( repr(e.message), self.cmd)) - try: - self.backup_node(backup_dir, 'node', node, backup_type="ptrack") - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because page backup should not be possible " - "without valid full backup.\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - "WARNING: Valid full backup on current timeline 1 is not found" in e.message and - "ERROR: Create new full backup before an incremental one" in e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - self.assertEqual( self.show_pb(backup_dir, 'node')[0]['status'], "ERROR") - self.assertEqual( - self.show_pb(backup_dir, 'node')[1]['status'], - "ERROR") - # Clean after yourself self.del_test_dir(module_name, fname) @@ -242,64 +206,19 @@ def test_incremental_backup_corrupt_full(self): self.del_test_dir(module_name, fname) # @unittest.skip("skip") - def test_ptrack_threads(self): - """ptrack multi thread backup mode""" - if not self.ptrack: - return unittest.skip('Skipped because ptrack support is disabled') - - fname = self.id().split('.')[3] - node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), - initdb_params=['--data-checksums'], - ptrack_enable=True) - - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - if node.major_version >= 12: - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") - - self.backup_node( - backup_dir, 'node', node, - backup_type="full", options=["-j", "4"]) - self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK") - - self.backup_node( - backup_dir, 'node', node, - backup_type="ptrack", options=["-j", "4"]) - self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK") - - # Clean after yourself - self.del_test_dir(module_name, fname) - - # @unittest.skip("skip") - def test_ptrack_threads_stream(self): - """ptrack multi thread backup mode and stream""" - if not self.ptrack: - return unittest.skip('Skipped because ptrack support is disabled') - + def test_delta_threads_stream(self): + """delta multi thread backup mode and stream""" fname = self.id().split('.')[3] node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums'], - ptrack_enable=True) + initdb_params=['--data-checksums']) backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() - if node.major_version >= 12: - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") - self.backup_node( backup_dir, 'node', node, backup_type="full", options=["-j", "4", "--stream"]) @@ -307,7 +226,7 @@ def test_ptrack_threads_stream(self): self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK") self.backup_node( backup_dir, 'node', node, - backup_type="ptrack", options=["-j", "4", "--stream"]) + backup_type="delta", options=["-j", "4", "--stream"]) self.assertEqual(self.show_pb(backup_dir, 'node')[1]['status'], "OK") # Clean after yourself @@ -1459,76 +1378,6 @@ def test_drop_rel_during_backup_page(self): # Clean after yourself self.del_test_dir(module_name, fname) - # @unittest.skip("skip") - def test_drop_rel_during_backup_ptrack(self): - """""" - if not self.ptrack: - return unittest.skip('Skipped because ptrack support is disabled') - - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), - set_replication=True, - ptrack_enable=self.ptrack, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - if node.major_version >= 12: - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") - - node.safe_psql( - "postgres", - "create table t_heap as select i" - " as id from generate_series(0,100) i") - - relative_path = node.safe_psql( - "postgres", - "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip() - - absolute_path = os.path.join(node.data_dir, relative_path) - - # FULL backup - self.backup_node(backup_dir, 'node', node, options=['--stream']) - - # PTRACK backup - gdb = self.backup_node( - backup_dir, 'node', node, backup_type='ptrack', - gdb=True, options=['--log-level-file=LOG']) - - gdb.set_breakpoint('backup_files') - gdb.run_until_break() - - # REMOVE file - os.remove(absolute_path) - - # File removed, we can proceed with backup - gdb.continue_execution_until_exit() - - pgdata = self.pgdata_content(node.data_dir) - - with open(os.path.join(backup_dir, 'log', 'pg_probackup.log')) as f: - log_content = f.read() - self.assertTrue( - 'LOG: File not found: "{0}"'.format(absolute_path) in log_content, - 'File "{0}" should be deleted but it`s not'.format(absolute_path)) - - node.cleanup() - self.restore_node(backup_dir, 'node', node, options=["-j", "4"]) - - # Physical comparison - pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_persistent_slot_for_stream_backup(self): """""" @@ -1992,10 +1841,11 @@ def test_backup_with_least_privileges_role(self): 'postgres', 'CREATE DATABASE backupdb') - if self.ptrack and node.major_version >= 12: + if self.ptrack: node.safe_psql( "backupdb", - "CREATE EXTENSION ptrack WITH SCHEMA pg_catalog") + "CREATE SCHEMA ptrack; " + "CREATE EXTENSION ptrack WITH SCHEMA ptrack") # PG 9.5 if self.get_version(node) < 90600: @@ -2105,33 +1955,14 @@ def test_backup_with_least_privileges_role(self): ) if self.ptrack: - if node.major_version < 12: - # Reviewer, NB: skip this test in case of old ptrack? - for fname in [ - 'pg_catalog.oideq(oid, oid)', - 'pg_catalog.ptrack_version()', - 'pg_catalog.pg_ptrack_clear()', - 'pg_catalog.pg_ptrack_control_lsn()', - 'pg_catalog.pg_ptrack_get_and_clear_db(oid, oid)', - 'pg_catalog.pg_ptrack_get_and_clear(oid, oid)', - 'pg_catalog.pg_ptrack_get_block_2(oid, oid, oid, bigint)', - 'pg_catalog.pg_stop_backup()']: - - node.safe_psql( - "backupdb", - "GRANT EXECUTE ON FUNCTION {0} " - "TO backup".format(fname)) - else: - fnames = [ - 'pg_catalog.ptrack_get_pagemapset(pg_lsn)', - 'pg_catalog.ptrack_init_lsn()' - ] - - for fname in fnames: - node.safe_psql( - "backupdb", - "GRANT EXECUTE ON FUNCTION {0} " - "TO backup".format(fname)) + node.safe_psql( + "backupdb", + "GRANT USAGE ON SCHEMA ptrack TO backup") + + node.safe_psql( + "backupdb", + "GRANT EXECUTE ON FUNCTION ptrack.ptrack_get_pagemapset(pg_lsn) TO backup; " + "GRANT EXECUTE ON FUNCTION 'ptrack.ptrack_init_lsn()' TO backup; ") if ProbackupTest.enterprise: node.safe_psql( @@ -2391,7 +2222,7 @@ def test_backup_with_less_privileges_role(self): 'postgres', 'CREATE DATABASE backupdb') - if self.ptrack and node.major_version >= 12: + if self.ptrack: node.safe_psql( 'backupdb', 'CREATE EXTENSION ptrack') diff --git a/tests/cfs_backup.py b/tests/cfs_backup.py index 2e686d46c..d820360fe 100644 --- a/tests/cfs_backup.py +++ b/tests/cfs_backup.py @@ -35,10 +35,9 @@ def setUp(self): self.node.slow_start() - if self.node.major_version >= 12: - self.node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") + self.node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") self.create_tblspace_in_node(self.node, tblspace_name, cfs=True) diff --git a/tests/compatibility.py b/tests/compatibility.py index d0fae2528..e274c22be 100644 --- a/tests/compatibility.py +++ b/tests/compatibility.py @@ -304,10 +304,9 @@ def test_backward_compatibility_ptrack(self): self.set_archiving(backup_dir, 'node', node, old_binary=True) node.slow_start() - if node.major_version >= 12: - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") node.pgbench_init(scale=10) diff --git a/tests/delete.py b/tests/delete.py index 8ebd7d13a..345a70284 100644 --- a/tests/delete.py +++ b/tests/delete.py @@ -203,10 +203,9 @@ def test_delete_increment_ptrack(self): self.set_archiving(backup_dir, 'node', node) node.slow_start() - if node.major_version >= 12: - node.safe_psql( - 'postgres', - 'CREATE EXTENSION ptrack') + node.safe_psql( + 'postgres', + 'CREATE EXTENSION ptrack') # full backup mode self.backup_node(backup_dir, 'node', node) diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index 5eeeeaa91..ea661d158 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -388,11 +388,8 @@ def make_simple_node( options['max_wal_senders'] = 10 if ptrack_enable: - if node.major_version >= 11: - options['ptrack.map_size'] = '128' - options['shared_preload_libraries'] = 'ptrack' - else: - options['ptrack_enable'] = 'on' + options['ptrack.map_size'] = '128' + options['shared_preload_libraries'] = 'ptrack' if node.major_version >= 13: options['wal_keep_size'] = '200MB' @@ -622,9 +619,6 @@ def get_ptrack_bits_per_page_for_fork(self, node, file, size=[]): return ptrack_bits_for_fork def check_ptrack_map_sanity(self, node, idx_ptrack): - if node.major_version >= 12: - return - success = True for i in idx_ptrack: # get new size of heap and indexes. size calculated in pages diff --git a/tests/merge.py b/tests/merge.py index 668691fc8..fe0927f49 100644 --- a/tests/merge.py +++ b/tests/merge.py @@ -811,10 +811,9 @@ def test_merge_ptrack_truncate(self): self.set_archiving(backup_dir, 'node', node) node.slow_start() - if node.major_version >= 12: - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") self.create_tblspace_in_node(node, 'somedata') diff --git a/tests/ptrack.py b/tests/ptrack.py index 611c6c8b0..bcc8dc20a 100644 --- a/tests/ptrack.py +++ b/tests/ptrack.py @@ -19,6 +19,136 @@ def setUp(self): return unittest.skip('You need PostgreSQL >= 11 for this test') self.fname = self.id().split('.')[3] + # @unittest.skip("skip") + def test_drop_rel_during_backup_ptrack(self): + """ + drop relation during ptrack backup + """ + backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(module_name, self.fname, 'node'), + set_replication=True, + ptrack_enable=self.ptrack, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") + + node.safe_psql( + "postgres", + "create table t_heap as select i" + " as id from generate_series(0,100) i") + + relative_path = node.safe_psql( + "postgres", + "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip() + + absolute_path = os.path.join(node.data_dir, relative_path) + + # FULL backup + self.backup_node(backup_dir, 'node', node, options=['--stream']) + + # PTRACK backup + gdb = self.backup_node( + backup_dir, 'node', node, backup_type='ptrack', + gdb=True, options=['--log-level-file=LOG']) + + gdb.set_breakpoint('backup_files') + gdb.run_until_break() + + # REMOVE file + os.remove(absolute_path) + + # File removed, we can proceed with backup + gdb.continue_execution_until_exit() + + pgdata = self.pgdata_content(node.data_dir) + + with open(os.path.join(backup_dir, 'log', 'pg_probackup.log')) as f: + log_content = f.read() + self.assertTrue( + 'LOG: File not found: "{0}"'.format(absolute_path) in log_content, + 'File "{0}" should be deleted but it`s not'.format(absolute_path)) + + node.cleanup() + self.restore_node(backup_dir, 'node', node, options=["-j", "4"]) + + # Physical comparison + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # Clean after yourself + self.del_test_dir(module_name, self.fname) + + # @unittest.skip("skip") + def test_ptrack_without_full(self): + """ptrack backup without validated full backup""" + node = self.make_simple_node( + base_dir=os.path.join(module_name, self.fname, 'node'), + initdb_params=['--data-checksums'], + ptrack_enable=True) + + backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + try: + self.backup_node(backup_dir, 'node', node, backup_type="ptrack") + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because page backup should not be possible " + "without valid full backup.\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertTrue( + "WARNING: Valid full backup on current timeline 1 is not found" in e.message and + "ERROR: Create new full backup before an incremental one" in e.message, + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) + + self.assertEqual( + self.show_pb(backup_dir, 'node')[0]['status'], + "ERROR") + + # Clean after yourself + self.del_test_dir(module_name, self.fname) + + # @unittest.skip("skip") + def test_ptrack_threads(self): + """ptrack multi thread backup mode""" + node = self.make_simple_node( + base_dir=os.path.join(module_name, self.fname, 'node'), + initdb_params=['--data-checksums'], + ptrack_enable=True) + + backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + self.backup_node( + backup_dir, 'node', node, + backup_type="full", options=["-j", "4"]) + self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK") + + self.backup_node( + backup_dir, 'node', node, + backup_type="ptrack", options=["-j", "4"]) + self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK") + + # Clean after yourself + self.del_test_dir(module_name, self.fname) + # @unittest.skip("skip") def test_ptrack_stop_pg(self): """ @@ -37,10 +167,9 @@ def test_ptrack_stop_pg(self): self.add_instance(backup_dir, 'node', node) node.slow_start() - if node.major_version >= 11: - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") node.pgbench_init(scale=1) @@ -75,10 +204,9 @@ def test_ptrack_multi_timeline_backup(self): self.set_archiving(backup_dir, 'node', node) node.slow_start() - if node.major_version >= 11: - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") node.pgbench_init(scale=5) @@ -156,10 +284,9 @@ def test_ptrack_multi_timeline_backup_1(self): self.set_archiving(backup_dir, 'node', node) node.slow_start() - if node.major_version >= 11: - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") node.pgbench_init(scale=5) @@ -226,10 +353,9 @@ def test_ptrack_eat_my_data(self): self.set_archiving(backup_dir, 'node', node) node.slow_start() - if node.major_version >= 11: - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") node.pgbench_init(scale=50) @@ -304,10 +430,9 @@ def test_ptrack_simple(self): self.add_instance(backup_dir, 'node', node) node.slow_start() - if node.major_version >= 11: - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") self.backup_node(backup_dir, 'node', node, options=['--stream']) @@ -481,33 +606,15 @@ def test_ptrack_unprivileged(self): "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" ) - if node.major_version < 11: - # Reviewer, NB: skip this test in case of old ptrack? - fnames = [ - 'pg_catalog.oideq(oid, oid)', - 'pg_catalog.ptrack_version()', - 'pg_catalog.pg_ptrack_clear()', - 'pg_catalog.pg_ptrack_control_lsn()', - 'pg_catalog.pg_ptrack_get_and_clear_db(oid, oid)', - 'pg_catalog.pg_ptrack_get_and_clear(oid, oid)', - 'pg_catalog.pg_ptrack_get_block_2(oid, oid, oid, bigint)' - ] - - for fname in fnames: - node.safe_psql( - "backupdb", - "GRANT EXECUTE ON FUNCTION {0} TO backup".format(fname)) - - else: - node.safe_psql( - "backupdb", - "CREATE SCHEMA ptrack") - node.safe_psql( - "backupdb", - "CREATE EXTENSION ptrack WITH SCHEMA ptrack") - node.safe_psql( - "backupdb", - "GRANT USAGE ON SCHEMA ptrack TO backup") + node.safe_psql( + "backupdb", + "CREATE SCHEMA ptrack") + node.safe_psql( + "backupdb", + "CREATE EXTENSION ptrack WITH SCHEMA ptrack") + node.safe_psql( + "backupdb", + "GRANT USAGE ON SCHEMA ptrack TO backup") node.safe_psql( "backupdb", @@ -547,10 +654,9 @@ def test_ptrack_enable(self): self.add_instance(backup_dir, 'node', node) node.slow_start() - if node.major_version >= 11: - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") # PTRACK BACKUP try: @@ -597,28 +703,21 @@ def test_ptrack_disable(self): self.add_instance(backup_dir, 'node', node) node.slow_start() - if node.major_version >= 11: - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") # FULL BACKUP self.backup_node(backup_dir, 'node', node, options=['--stream']) # DISABLE PTRACK - if node.major_version >= 11: - node.safe_psql('postgres', "alter system set ptrack.map_size to 0") - else: - node.safe_psql('postgres', "alter system set ptrack_enable to off") + node.safe_psql('postgres', "alter system set ptrack.map_size to 0") node.stop() node.slow_start() # ENABLE PTRACK - if node.major_version >= 11: - node.safe_psql('postgres', "alter system set ptrack.map_size to '128'") - node.safe_psql('postgres', "alter system set shared_preload_libraries to 'ptrack'") - else: - node.safe_psql('postgres', "alter system set ptrack_enable to on") + node.safe_psql('postgres', "alter system set ptrack.map_size to '128'") + node.safe_psql('postgres', "alter system set shared_preload_libraries to 'ptrack'") node.stop() node.slow_start() @@ -665,10 +764,9 @@ def test_ptrack_uncommitted_xact(self): self.add_instance(backup_dir, 'node', node) node.slow_start() - if node.major_version >= 11: - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") self.backup_node(backup_dir, 'node', node, options=['--stream']) @@ -725,10 +823,9 @@ def test_ptrack_vacuum_full(self): self.create_tblspace_in_node(node, 'somedata') - if node.major_version >= 11: - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") self.backup_node(backup_dir, 'node', node, options=['--stream']) @@ -813,10 +910,9 @@ def test_ptrack_vacuum_truncate(self): self.create_tblspace_in_node(node, 'somedata') - if node.major_version >= 11: - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") node.safe_psql( "postgres", @@ -1495,10 +1591,9 @@ def test_create_db_on_replica(self): self.add_instance(backup_dir, 'node', node) node.slow_start() - if node.major_version >= 11: - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") # FULL BACKUP node.safe_psql( @@ -1593,10 +1688,9 @@ def test_alter_table_set_tablespace_ptrack(self): self.add_instance(backup_dir, 'node', node) node.slow_start() - if node.major_version >= 11: - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") # FULL BACKUP self.create_tblspace_in_node(node, 'somedata') @@ -1685,10 +1779,9 @@ def test_alter_database_set_tablespace_ptrack(self): self.add_instance(backup_dir, 'node', node) node.slow_start() - if node.major_version >= 11: - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") # FULL BACKUP self.backup_node(backup_dir, 'node', node, options=["--stream"]) @@ -1755,10 +1848,9 @@ def test_drop_tablespace(self): self.add_instance(backup_dir, 'node', node) node.slow_start() - if node.major_version >= 11: - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") self.create_tblspace_in_node(node, 'somedata') @@ -1850,10 +1942,9 @@ def test_ptrack_alter_tablespace(self): self.add_instance(backup_dir, 'node', node) node.slow_start() - if node.major_version >= 11: - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") self.create_tblspace_in_node(node, 'somedata') tblspc_path = self.get_tblspace_path(node, 'somedata') @@ -1966,10 +2057,9 @@ def test_ptrack_multiple_segments(self): self.add_instance(backup_dir, 'node', node) node.slow_start() - if node.major_version >= 11: - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") self.create_tblspace_in_node(node, 'somedata') @@ -2368,10 +2458,9 @@ def test_ptrack_cluster_on_btree(self): self.add_instance(backup_dir, 'node', node) node.slow_start() - if node.major_version >= 11: - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") self.create_tblspace_in_node(node, 'somedata') @@ -2432,10 +2521,9 @@ def test_ptrack_cluster_on_gist(self): self.add_instance(backup_dir, 'node', node) node.slow_start() - if node.major_version >= 11: - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") # Create table and indexes node.safe_psql( @@ -2704,10 +2792,9 @@ def test_ptrack_empty(self): self.add_instance(backup_dir, 'node', node) node.slow_start() - if node.major_version >= 11: - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") self.create_tblspace_in_node(node, 'somedata') @@ -2876,10 +2963,9 @@ def test_ptrack_truncate(self): self.add_instance(backup_dir, 'node', node) node.slow_start() - if node.major_version >= 11: - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") self.create_tblspace_in_node(node, 'somedata') @@ -3086,10 +3172,9 @@ def test_ptrack_vacuum(self): self.add_instance(backup_dir, 'node', node) node.slow_start() - if node.major_version >= 11: - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") self.create_tblspace_in_node(node, 'somedata') @@ -3280,10 +3365,9 @@ def test_ptrack_vacuum_bits_frozen(self): self.add_instance(backup_dir, 'node', node) node.slow_start() - if node.major_version >= 11: - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") self.create_tblspace_in_node(node, 'somedata') @@ -3457,10 +3541,9 @@ def test_ptrack_vacuum_bits_visibility(self): self.add_instance(backup_dir, 'node', node) node.slow_start() - if node.major_version >= 11: - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") self.create_tblspace_in_node(node, 'somedata') @@ -3535,10 +3618,9 @@ def test_ptrack_vacuum_full(self): self.add_instance(backup_dir, 'node', node) node.slow_start() - if node.major_version >= 11: - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") self.create_tblspace_in_node(node, 'somedata') @@ -3715,10 +3797,9 @@ def test_ptrack_vacuum_truncate(self): self.add_instance(backup_dir, 'node', node) node.slow_start() - if node.major_version >= 11: - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") # Create table and indexes res = node.safe_psql( @@ -4043,10 +4124,9 @@ def test_ptrack_zero_changes(self): self.add_instance(backup_dir, 'node', node) node.slow_start() - if node.major_version >= 11: - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") # Create table node.safe_psql( @@ -4091,10 +4171,9 @@ def test_ptrack_pg_resetxlog(self): self.add_instance(backup_dir, 'node', node) node.slow_start() - if node.major_version >= 11: - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") # Create table node.safe_psql( @@ -4212,10 +4291,9 @@ def test_corrupt_ptrack_map(self): self.add_instance(backup_dir, 'node', node) node.slow_start() - if node.major_version >= 11: - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") # Create table node.safe_psql( @@ -4375,10 +4453,9 @@ def test_horizon_lsn_ptrack(self): self.add_instance(backup_dir, 'node', node) node.slow_start() - if node.major_version >= 11: - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") # TODO: ptrack version must be 2.1 ptrack_version = node.safe_psql( diff --git a/tests/restore.py b/tests/restore.py index 4e4b5c926..e59a1b0ec 100644 --- a/tests/restore.py +++ b/tests/restore.py @@ -512,10 +512,9 @@ def test_restore_full_ptrack_archive(self): self.set_archiving(backup_dir, 'node', node) node.slow_start() - if node.major_version >= 12: - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") node.pgbench_init(scale=2) @@ -567,10 +566,9 @@ def test_restore_ptrack(self): self.set_archiving(backup_dir, 'node', node) node.slow_start() - if node.major_version >= 12: - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") node.pgbench_init(scale=2) @@ -630,10 +628,9 @@ def test_restore_full_ptrack_stream(self): self.set_archiving(backup_dir, 'node', node) node.slow_start() - if node.major_version >= 12: - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") node.pgbench_init(scale=2) @@ -689,10 +686,9 @@ def test_restore_full_ptrack_under_load(self): self.set_archiving(backup_dir, 'node', node) node.slow_start() - if node.major_version >= 12: - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") node.pgbench_init(scale=2) @@ -759,10 +755,9 @@ def test_restore_full_under_load_ptrack(self): self.set_archiving(backup_dir, 'node', node) node.slow_start() - if node.major_version >= 12: - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") # wal_segment_size = self.guc_wal_segment_size(node) node.pgbench_init(scale=2) @@ -3298,32 +3293,13 @@ def test_missing_database_map(self): ) if self.ptrack: - fnames = [] - if node.major_version < 12: - # Reviewer, NB: skip this test in case of old ptrack? - fnames += [ - 'pg_catalog.oideq(oid, oid)', - 'pg_catalog.ptrack_version()', - 'pg_catalog.pg_ptrack_clear()', - 'pg_catalog.pg_ptrack_control_lsn()', - 'pg_catalog.pg_ptrack_get_and_clear_db(oid, oid)', - 'pg_catalog.pg_ptrack_get_and_clear(oid, oid)', - 'pg_catalog.pg_ptrack_get_block_2(oid, oid, oid, bigint)' - ] - else: - # TODO why backup works without these grants ? -# fnames += [ -# 'pg_ptrack_get_pagemapset(pg_lsn)', -# 'pg_ptrack_control_lsn()', -# ] - node.safe_psql( - "backupdb", - "CREATE EXTENSION ptrack WITH SCHEMA pg_catalog") - - for fname in fnames: - node.safe_psql( - "backupdb", - "GRANT EXECUTE ON FUNCTION {0} TO backup".format(fname)) + # TODO why backup works without these grants ? + # 'pg_ptrack_get_pagemapset(pg_lsn)', + # 'pg_ptrack_control_lsn()', + # because PUBLIC + node.safe_psql( + "backupdb", + "CREATE EXTENSION ptrack WITH SCHEMA pg_catalog") if ProbackupTest.enterprise: node.safe_psql( From e68132d10c0b2fe604e3a0c95b6d6d9e02a22778 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Fri, 18 Jun 2021 12:20:58 +0300 Subject: [PATCH 174/525] tests: fix tests.archive.ArchiveTest.test_basic_master_and_replica_concurrent_archiving --- tests/archive.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/archive.py b/tests/archive.py index a7bc04e13..44fd7bcfb 100644 --- a/tests/archive.py +++ b/tests/archive.py @@ -895,6 +895,9 @@ def test_basic_master_and_replica_concurrent_archiving(self): set replica with archiving, make sure that archiving on both node is working. """ + if self.pg_config_version < self.version_to_num('9.6.0'): + return unittest.skip('You need PostgreSQL >= 9.6 for this test') + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') master = self.make_simple_node( From d55838e80de51743ea3b644deeccfa34132f5efd Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Fri, 18 Jun 2021 13:39:11 +0300 Subject: [PATCH 175/525] fix test restore.RestoreTest.test_missing_database_map for PGPROEE11 compatibility --- tests/restore.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/restore.py b/tests/restore.py index e59a1b0ec..a76272b12 100644 --- a/tests/restore.py +++ b/tests/restore.py @@ -3299,7 +3299,9 @@ def test_missing_database_map(self): # because PUBLIC node.safe_psql( "backupdb", - "CREATE EXTENSION ptrack WITH SCHEMA pg_catalog") + "CREATE SCHEMA ptrack; " + "GRANT USAGE ON SCHEMA ptrack TO backup; " + "CREATE EXTENSION ptrack WITH SCHEMA ptrack") if ProbackupTest.enterprise: node.safe_psql( From ad4fc967d6a2b36a91ef12bf849dbc4a6632d89f Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Fri, 18 Jun 2021 20:04:39 +0300 Subject: [PATCH 176/525] fix segfault in get_index_list --- src/checkdb.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/checkdb.c b/src/checkdb.c index 5d7d6652b..4ea1d0800 100644 --- a/src/checkdb.c +++ b/src/checkdb.c @@ -455,6 +455,7 @@ get_index_list(const char *dbname, bool first_db_with_amcheck, ind->heapallindexed_is_supported = heapallindexed_is_supported; ind->amcheck_nspname = pgut_malloc(strlen(amcheck_nspname) + 1); strcpy(ind->amcheck_nspname, amcheck_nspname); + pg_atomic_clear_flag(&ind->lock); if (index_list == NULL) index_list = parray_new(); @@ -462,8 +463,6 @@ get_index_list(const char *dbname, bool first_db_with_amcheck, parray_append(index_list, ind); } - pfilearray_clear_locks(index_list); - PQclear(res); return index_list; From e27a6a91e0444bf70c3203b1d7efef6585917df7 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Sun, 20 Jun 2021 02:13:35 +0300 Subject: [PATCH 177/525] [Issue #400] PostgreSQL 14 support --- src/backup.c | 4 ++-- src/catalog.c | 4 ++-- src/merge.c | 2 +- src/parsexlog.c | 12 ++++++++++++ src/show.c | 4 ++-- src/utils/pgut.c | 15 ++++++++++++++- tests/helpers/ptrack_helpers.py | 4 ++++ 7 files changed, 37 insertions(+), 8 deletions(-) diff --git a/src/backup.c b/src/backup.c index 6f1aa867a..83785c1cb 100644 --- a/src/backup.c +++ b/src/backup.c @@ -725,7 +725,7 @@ pgdata_basic_setup(ConnectionOptions conn_opt, PGNodeInfo *nodeInfo) elog(WARNING, "Current PostgreSQL role is superuser. " "It is not recommended to run backup or checkdb as superuser."); - StrNCpy(current.server_version, nodeInfo->server_version_str, + strlcpy(current.server_version, nodeInfo->server_version_str, sizeof(current.server_version)); return cur_conn; @@ -761,7 +761,7 @@ do_backup(pgSetBackupParams *set_backup_params, current.status = BACKUP_STATUS_RUNNING; current.start_time = current.backup_id; - StrNCpy(current.program_version, PROGRAM_VERSION, + strlcpy(current.program_version, PROGRAM_VERSION, sizeof(current.program_version)); current.compress_alg = instance_config.compress_alg; diff --git a/src/catalog.c b/src/catalog.c index 3ea4d9bca..3ba17e9fd 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -2648,14 +2648,14 @@ readBackupControlFile(const char *path) if (program_version) { - StrNCpy(backup->program_version, program_version, + strlcpy(backup->program_version, program_version, sizeof(backup->program_version)); pfree(program_version); } if (server_version) { - StrNCpy(backup->server_version, server_version, + strlcpy(backup->server_version, server_version, sizeof(backup->server_version)); pfree(server_version); } diff --git a/src/merge.c b/src/merge.c index 3fd5b13ae..f351975d3 100644 --- a/src/merge.c +++ b/src/merge.c @@ -735,7 +735,7 @@ merge_chain(parray *parent_chain, pgBackup *full_backup, pgBackup *dest_backup, * We cannot set backup status to OK just yet, * because it still has old start_time. */ - StrNCpy(full_backup->program_version, PROGRAM_VERSION, + strlcpy(full_backup->program_version, PROGRAM_VERSION, sizeof(full_backup->program_version)); full_backup->parent_backup = INVALID_BACKUP_ID; full_backup->start_lsn = dest_backup->start_lsn; diff --git a/src/parsexlog.c b/src/parsexlog.c index 8dfb2c78c..19078fb64 100644 --- a/src/parsexlog.c +++ b/src/parsexlog.c @@ -1798,6 +1798,18 @@ extractPageInfo(XLogReaderState *record, XLogReaderData *reader_data, * source system. */ } + else if (rmid == RM_XACT_ID && + ((rminfo & XLOG_XACT_OPMASK) == XLOG_XACT_COMMIT || + (rminfo & XLOG_XACT_OPMASK) == XLOG_XACT_COMMIT_PREPARED || + (rminfo & XLOG_XACT_OPMASK) == XLOG_XACT_ABORT || + (rminfo & XLOG_XACT_OPMASK) == XLOG_XACT_ABORT_PREPARED)) + { + /* + * These records can include "dropped rels". We can safely ignore + * them, we will see that they are missing and copy them from the + * source. + */ + } else if (info & XLR_SPECIAL_REL_UPDATE) { /* diff --git a/src/show.c b/src/show.c index c1482772e..496c9d833 100644 --- a/src/show.c +++ b/src/show.c @@ -552,7 +552,7 @@ show_instance_plain(const char *instance_name, parray *backup_list, bool show_na time2iso(row->recovery_time, lengthof(row->recovery_time), backup->recovery_time, false); else - StrNCpy(row->recovery_time, "----", sizeof(row->recovery_time)); + strlcpy(row->recovery_time, "----", sizeof(row->recovery_time)); widths[cur] = Max(widths[cur], strlen(row->recovery_time)); cur++; @@ -587,7 +587,7 @@ show_instance_plain(const char *instance_name, parray *backup_list, bool show_na pretty_time_interval(difftime(backup->end_time, backup->start_time), row->duration, lengthof(row->duration)); else - StrNCpy(row->duration, "----", sizeof(row->duration)); + strlcpy(row->duration, "----", sizeof(row->duration)); widths[cur] = Max(widths[cur], strlen(row->duration)); cur++; diff --git a/src/utils/pgut.c b/src/utils/pgut.c index e1e52b24b..1d8845c23 100644 --- a/src/utils/pgut.c +++ b/src/utils/pgut.c @@ -16,6 +16,10 @@ #include "libpq/pqsignal.h" #include "pqexpbuffer.h" +#if PG_VERSION_NUM >= 140000 +#include "common/string.h" +#endif + #include #include "pgut.h" @@ -75,7 +79,16 @@ prompt_for_password(const char *username) password = NULL; } -#if PG_VERSION_NUM >= 100000 +#if PG_VERSION_NUM >= 140000 + if (username == NULL) + password = simple_prompt("Password: ", false); + else + { + char message[256]; + snprintf(message, lengthof(message), "Password for user %s: ", username); + password = simple_prompt(message , false); + } +#elif PG_VERSION_NUM >= 100000 password = (char *) pgut_malloc(sizeof(char) * 100 + 1); if (username == NULL) simple_prompt("Password: ", password, 100, false); diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index b6dc6d028..bf84f266e 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -139,6 +139,10 @@ def slow_start(self, replica=False): except testgres.QueryException as e: if 'database system is starting up' in e.message: pass + elif 'FATAL: the database system is not accepting connections' in e.message: + pass + elif replica and 'Hot standby mode is disabled' in e.message: + raise e else: raise e From b395b3c942ce6cf49ed7d299fa1764eef2a9b0f8 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Sun, 20 Jun 2021 02:28:59 +0300 Subject: [PATCH 178/525] Readme: update --- README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 49b9351df..b7e170cf5 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,7 @@ `pg_probackup` is a utility to manage backup and recovery of PostgreSQL database clusters. It is designed to perform periodic backups of the PostgreSQL instance that enable you to restore the server in case of a failure. The utility is compatible with: -* PostgreSQL 9.5, 9.6, 10, 11, 12, 13; +* PostgreSQL 9.5, 9.6, 10, 11, 12, 13, 14; As compared to other backup solutions, `pg_probackup` offers the following benefits that can help you implement different backup strategies and deal with large amounts of data: * Incremental backup: page-level incremental backup allows you to save disk space, speed up backup and restore. With three different incremental modes, you can plan the backup strategy in accordance with your data flow. @@ -41,9 +41,9 @@ Regardless of the chosen backup type, all backups taken with `pg_probackup` supp ## ptrack support `PTRACK` backup support provided via following options: -* vanilla PostgreSQL 12,13 with [ptrack extension](https://github.com/postgrespro/ptrack) -* Postgres Pro Standard 9.6, 10, 11, 12 -* Postgres Pro Enterprise 9.6, 10, 11, 12 +* vanilla PostgreSQL 11, 12, 13, 14 with [ptrack extension](https://github.com/postgrespro/ptrack) +* Postgres Pro Standard 9.6, 10, 11, 12, 13 +* Postgres Pro Enterprise 9.6, 10, 11, 12, 13 ## Limitations From 7de728496d1080cd34d957af254eed556fc163d3 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" <16117281+kulaginm@users.noreply.github.com> Date: Mon, 21 Jun 2021 11:45:10 +0300 Subject: [PATCH 179/525] Catchup command implementation (#392) [ PR #392] New command "catchup" is added, it allows fallen-behind standby to "catch up" with master, or create standby from scratch without resorting to restore from backup Co-authored-by: Grigory Smolkin Co-authored-by: anastasia Co-authored-by: Elena Indrupskaya --- Makefile | 2 +- doc/pgprobackup.xml | 282 ++++++++- src/archive.c | 2 +- src/backup.c | 78 ++- src/catalog.c | 2 +- src/catchup.c | 1020 +++++++++++++++++++++++++++++++ src/data.c | 314 +++++++++- src/dir.c | 9 +- src/help.c | 63 +- src/init.c | 2 +- src/pg_probackup.c | 47 +- src/pg_probackup.h | 42 +- src/ptrack.c | 2 +- src/restore.c | 16 +- src/stream.c | 30 +- src/util.c | 38 +- src/utils/configuration.c | 1 + src/utils/configuration.h | 3 +- src/utils/file.c | 256 +++++++- src/utils/file.h | 4 +- src/utils/parray.c | 7 + src/utils/parray.h | 1 + tests/__init__.py | 4 +- tests/catchup.py | 977 +++++++++++++++++++++++++++++ tests/helpers/ptrack_helpers.py | 46 +- 25 files changed, 3146 insertions(+), 102 deletions(-) create mode 100644 src/catchup.c create mode 100644 tests/catchup.py diff --git a/Makefile b/Makefile index 1431be4ef..5173aa38f 100644 --- a/Makefile +++ b/Makefile @@ -7,7 +7,7 @@ OBJS = src/utils/configuration.o src/utils/json.o src/utils/logger.o \ OBJS += src/archive.o src/backup.o src/catalog.o src/checkdb.o src/configure.o src/data.o \ src/delete.o src/dir.o src/fetch.o src/help.o src/init.o src/merge.o \ src/parsexlog.o src/ptrack.o src/pg_probackup.o src/restore.o src/show.o src/stream.o \ - src/util.o src/validate.o src/datapagemap.o + src/util.o src/validate.o src/datapagemap.o src/catchup.o # borrowed files OBJS += src/pg_crc.o src/receivelog.o src/streamutil.o \ diff --git a/doc/pgprobackup.xml b/doc/pgprobackup.xml index b1ddd0032..f7814c2d2 100644 --- a/doc/pgprobackup.xml +++ b/doc/pgprobackup.xml @@ -143,6 +143,14 @@ doc/src/sgml/pgprobackup.sgml wal_file_name option + + pg_probackup + + catchup_mode + =path_to_pgdata_on_remote_server + =path_to_local_dir + option + @@ -283,6 +291,11 @@ doc/src/sgml/pgprobackup.sgml Partial restore: restoring only the specified databases. + + + Catchup: cloning a PostgreSQL instance for a fallen-behind standby server to catch up with master. + + To manage backup data, pg_probackup creates a @@ -1076,7 +1089,8 @@ GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; mode: , , , - , + , + , and . @@ -1431,6 +1445,7 @@ pg_probackup backup -B backup_dir --instance + Performing Cluster Verification @@ -1506,6 +1521,7 @@ pg_probackup checkdb --amcheck --skip-block-validation [connection_ higher cost of CPU, memory, and I/O consumption. + Validating a Backup @@ -2073,6 +2089,7 @@ pg_probackup restore -B backup_dir --instance , , , + , and processes can be executed on several parallel threads. This can significantly @@ -3390,6 +3407,148 @@ pg_probackup delete -B backup_dir --instance + + + Cloning <productname>PostgreSQL</productname> Instance + + pg_probackup can create a copy of a PostgreSQL + instance directly, without using the backup catalog. This allows you + to add a new standby server in a parallel mode or to have a standby + server that has fallen behind catch up with master. + + + + Cloning a PostgreSQL instance is different from other pg_probackup + operations: + + + + The backup catalog is not required. + + + + + STREAM WAL delivery mode is only supported. + + + + + Copying external directories + is not supported. + + + + + No SQL commands involving tablespaces, such as + CREATE TABLESPACE/DROP TABLESPACE, + can be run simultaneously with catchup. + + + + + catchup takes configuration files, such as + postgresql.conf, postgresql.auto.conf, + or pg_hba.conf, from the source server and overwrites them + on the target server. + + + + + + + Before cloning a PostgreSQL instance, set up the source database server as follows: + + + + Configure + the database cluster for the instance to copy. + + + + + To copy from a remote server, configure the remote mode. + + + + + To use the PTRACK catchup mode, set up PTRACK backups. + + + + + + + To clone a PostgreSQL instance, ensure that the source + database server is running and accepting connections and + on the server with the destination database, run the following command: + + +pg_probackup catchup -b catchup-mode --source-pgdata=path_to_pgdata_on_remote_server --destination-pgdata=path_to_local_dir --stream [connection_options] [remote_options] + + + Where catchup_mode can take one of the + following values: FULL, DELTA, or PTRACK. + + + + + FULL — creates a full copy of the PostgreSQL instance. + The destination directory must be empty for this mode. + + + + + DELTA — reads all data files in the data directory and + creates an incremental copy for pages that have changed + since the destination database was shut down cleanly. + For this mode, the destination directory must contain a previous + copy of the database that was shut down cleanly. + + + + + PTRACK — tracking page changes on the fly, + only copies pages that have changed since the point of divergence + of the source and destination databases. + For this mode, the destination directory must contain a previous + copy of the database that was shut down cleanly. + + + + + You can use connection_options to specify + the connection to the source database cluster. If it is located on a different server, + also specify remote_options. + If the source database contains tablespaces that must be located in + a different directory, additionally specify the + option: + +pg_probackup catchup -b catchup-mode --source-pgdata=path_to_pgdata_on_remote_server --destination-pgdata=path_to_local_dir --stream --tablespace-mapping=OLDDIR=NEWDIR + + To run the catchup command on parallel threads, specify the number + of threads with the option: + +pg_probackup catchup -b catchup-mode --source-pgdata=path_to_pgdata_on_remote_server --destination-pgdata=path_to_local_dir --stream --threads=num_threads + + + + For example, assume that a remote standby server with the PostgreSQL instance having /replica-pgdata data directory has fallen behind. To sync this instance with the one in /master-pgdata data directory, you can run + the catchup command in the PTRACK mode on four parallel threads as follows: + +pg_probackup catchup --source-pgdata=/master-pgdata --destination-pgdata=/replica-pgdata -p 5432 -d postgres -U remote-postgres-user --stream --backup-mode=PTRACK --remote-host=remote-hostname --remote-user=remote-unix-username -j 4 + + + + Another example shows how you can add a new remote standby server with the PostgreSQL data directory /replica-pgdata by running the catchup command in the FULL mode + on four parallel threads: + +pg_probackup catchup --source-pgdata=/master-pgdata --destination-pgdata=/replica-pgdata -p 5432 -d postgres -U remote-postgres-user --stream --backup-mode=FULL --remote-host=remote-hostname --remote-user=remote-unix-username -j 4 + + + @@ -4262,6 +4421,121 @@ pg_probackup archive-get -B backup_dir --instance Archiving Options. + + + catchup + +pg_probackup catchup -b catchup_mode +--source-pgdata=path_to_pgdata_on_remote_server +--destination-pgdata=path_to_local_dir +[--help] [--stream] [-j num_threads] +[-T OLDDIR=NEWDIR] +[connection_options] [remote_options] + + + Creates a copy of a PostgreSQL + instance without using the backup catalog. + + + + + + + + Specifies the catchup mode to use. Possible values are: + + + + + FULL — creates a full copy of the PostgreSQL instance. + + + + + DELTA — reads all data files in the data directory and + creates an incremental copy for pages that have changed + since the destination database was shut down cleanly. + + + + + PTRACK — tracking page changes on the fly, + only copies pages that have changed since the point of divergence + of the source and destination databases. + + + + + + + + + + + + Specifies the path to the data directory of the instance to be copied. The path can be local or remote. + + + + + + + + + Specifies the path to the local data directory to copy to. + + + + + + + + + Makes a STREAM backup, which + includes all the necessary WAL files by streaming them from + the database server via replication protocol. + + + + + + + + + + Sets the number of parallel threads for + catchup process. + + + + + + + + + + Relocates the tablespace from the OLDDIR to the NEWDIR + directory at the time of recovery. Both OLDDIR and NEWDIR must + be absolute paths. If the path contains the equals sign (=), + escape it with a backslash. This option can be specified + multiple times for multiple tablespaces. + + + + + + + + + Additionally, connection + options, remote + mode options can be used. + + + For details on usage, see the section + Cloning PostgreSQL Instance. + + Options @@ -4651,7 +4925,7 @@ pg_probackup archive-get -B backup_dir --instance - Disable the coloring for console log messages of warning and error levels. + Disable coloring for console log messages of warning and error levels. @@ -4804,7 +5078,8 @@ pg_probackup archive-get -B backup_dir --instance Connection Options You can use these options together with - and + + , , and commands. @@ -5095,6 +5370,7 @@ pg_probackup archive-get -B backup_dir --instance , , , + , , , and commands. diff --git a/src/archive.c b/src/archive.c index 4058cd0d4..7bb8c1c03 100644 --- a/src/archive.c +++ b/src/archive.c @@ -148,7 +148,7 @@ do_archive_push(InstanceState *instanceState, InstanceConfig *instance, char *wa elog(ERROR, "getcwd() error"); /* verify that archive-push --instance parameter is valid */ - system_id = get_system_identifier(current_dir); + system_id = get_system_identifier(current_dir, FIO_DB_HOST); if (instance->pgdata == NULL) elog(ERROR, "Cannot read pg_probackup.conf for this instance"); diff --git a/src/backup.c b/src/backup.c index 688afefca..2d834410a 100644 --- a/src/backup.c +++ b/src/backup.c @@ -94,7 +94,6 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, { int i; char external_prefix[MAXPGPATH]; /* Temp value. Used as template */ - char dst_backup_path[MAXPGPATH]; char label[1024]; XLogRecPtr prev_backup_start_lsn = InvalidXLogRecPtr; @@ -137,7 +136,7 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, #if PG_VERSION_NUM >= 90600 current.tli = get_current_timeline(backup_conn); #else - current.tli = get_current_timeline_from_control(false); + current.tli = get_current_timeline_from_control(instance_config.pgdata, FIO_DB_HOST, false); #endif /* @@ -258,17 +257,19 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, /* start stream replication */ if (current.stream) { - join_path_components(dst_backup_path, current.database_dir, PG_XLOG_DIR); - fio_mkdir(dst_backup_path, DIR_PERMISSION, FIO_BACKUP_HOST); + char stream_xlog_path[MAXPGPATH]; - start_WAL_streaming(backup_conn, dst_backup_path, &instance_config.conn_opt, + join_path_components(stream_xlog_path, current.database_dir, PG_XLOG_DIR); + fio_mkdir(stream_xlog_path, DIR_PERMISSION, FIO_BACKUP_HOST); + + start_WAL_streaming(backup_conn, stream_xlog_path, &instance_config.conn_opt, current.start_lsn, current.tli); /* Make sure that WAL streaming is working * PAGE backup in stream mode is waited twice, first for * segment in WAL archive and then for streamed segment */ - wait_wal_lsn(dst_backup_path, current.start_lsn, true, current.tli, false, true, ERROR, true); + wait_wal_lsn(stream_xlog_path, current.start_lsn, true, current.tli, false, true, ERROR, true); } /* initialize backup's file list */ @@ -315,23 +316,7 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, elog(ERROR, "PGDATA is almost empty. Either it was concurrently deleted or " "pg_probackup do not possess sufficient permissions to list PGDATA content"); - /* Calculate pgdata_bytes */ - for (i = 0; i < parray_num(backup_files_list); i++) - { - pgFile *file = (pgFile *) parray_get(backup_files_list, i); - - if (file->external_dir_num != 0) - continue; - - if (S_ISDIR(file->mode)) - { - current.pgdata_bytes += 4096; - continue; - } - - current.pgdata_bytes += file->size; - } - + current.pgdata_bytes += calculate_datasize_of_filelist(backup_files_list); pretty_size(current.pgdata_bytes, pretty_bytes, lengthof(pretty_bytes)); elog(INFO, "PGDATA size: %s", pretty_bytes); @@ -697,7 +682,7 @@ pgdata_basic_setup(ConnectionOptions conn_opt, PGNodeInfo *nodeInfo) if (nodeInfo->is_superuser) elog(WARNING, "Current PostgreSQL role is superuser. " - "It is not recommended to run backup or checkdb as superuser."); + "It is not recommended to run pg_probackup under superuser."); strlcpy(current.server_version, nodeInfo->server_version_str, sizeof(current.server_version)); @@ -786,7 +771,7 @@ do_backup(InstanceState *instanceState, pgSetBackupParams *set_backup_params, // elog(WARNING, "ptrack_version_num %d", ptrack_version_num); if (nodeInfo.ptrack_version_num > 0) - nodeInfo.is_ptrack_enable = pg_ptrack_enable(backup_conn, nodeInfo.ptrack_version_num); + nodeInfo.is_ptrack_enabled = pg_is_ptrack_enabled(backup_conn, nodeInfo.ptrack_version_num); if (current.backup_mode == BACKUP_MODE_DIFF_PTRACK) { @@ -795,7 +780,7 @@ do_backup(InstanceState *instanceState, pgSetBackupParams *set_backup_params, elog(ERROR, "This PostgreSQL instance does not support ptrack"); else { - if (!nodeInfo.is_ptrack_enable) + if (!nodeInfo.is_ptrack_enabled) elog(ERROR, "Ptrack is disabled"); } } @@ -953,12 +938,12 @@ check_server_version(PGconn *conn, PGNodeInfo *nodeInfo) * All system identifiers must be equal. */ void -check_system_identifiers(PGconn *conn, char *pgdata) +check_system_identifiers(PGconn *conn, const char *pgdata) { uint64 system_id_conn; uint64 system_id_pgdata; - system_id_pgdata = get_system_identifier(pgdata); + system_id_pgdata = get_system_identifier(pgdata, FIO_DB_HOST); system_id_conn = get_remote_system_identifier(conn); /* for checkdb check only system_id_pgdata and system_id_conn */ @@ -1069,7 +1054,7 @@ pg_start_backup(const char *label, bool smooth, pgBackup *backup, * Switch to a new WAL segment. It should be called only for master. * For PG 9.5 it should be called only if pguser is superuser. */ -static void +void pg_switch_wal(PGconn *conn) { PGresult *res; @@ -2282,7 +2267,7 @@ process_block_change(ForkNumber forknum, RelFileNode rnode, BlockNumber blkno) } -static void +void check_external_for_tablespaces(parray *external_list, PGconn *backup_conn) { PGresult *res; @@ -2346,3 +2331,36 @@ check_external_for_tablespaces(parray *external_list, PGconn *backup_conn) } } } + +/* + * Calculate pgdata_bytes + * accepts (parray *) of (pgFile *) + */ +int64 +calculate_datasize_of_filelist(parray *filelist) +{ + int64 bytes = 0; + int i; + + /* parray_num don't check for NULL */ + if (filelist == NULL) + return 0; + + for (i = 0; i < parray_num(filelist); i++) + { + pgFile *file = (pgFile *) parray_get(filelist, i); + + if (file->external_dir_num != 0) + continue; + + if (S_ISDIR(file->mode)) + { + // TODO is a dir always 4K? + bytes += 4096; + continue; + } + + bytes += file->size; + } + return bytes; +} diff --git a/src/catalog.c b/src/catalog.c index f8af2b72e..9775968b8 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -2883,7 +2883,7 @@ pgNodeInit(PGNodeInfo *node) node->server_version_str[0] = '\0'; node->ptrack_version_num = 0; - node->is_ptrack_enable = false; + node->is_ptrack_enabled = false; node->ptrack_schema = NULL; } diff --git a/src/catchup.c b/src/catchup.c new file mode 100644 index 000000000..f80a0f0f9 --- /dev/null +++ b/src/catchup.c @@ -0,0 +1,1020 @@ +/*------------------------------------------------------------------------- + * + * catchup.c: sync DB cluster + * + * Copyright (c) 2021, Postgres Professional + * + *------------------------------------------------------------------------- + */ + +#include "pg_probackup.h" + +#if PG_VERSION_NUM < 110000 +#include "catalog/catalog.h" +#endif +#include "catalog/pg_tablespace.h" +#include "access/timeline.h" +#include "pgtar.h" +#include "streamutil.h" + +#include +#include +#include + +#include "utils/thread.h" +#include "utils/file.h" + +/* + * Catchup routines + */ +static PGconn *catchup_collect_info(PGNodeInfo *source_node_info, const char *source_pgdata, const char *dest_pgdata); +static void catchup_preflight_checks(PGNodeInfo *source_node_info, PGconn *source_conn, const char *source_pgdata, + const char *dest_pgdata); +static void catchup_check_tablespaces_existance_in_tbsmapping(PGconn *conn); +static parray* catchup_get_tli_history(ConnectionOptions *conn_opt, TimeLineID tli); + +//REVIEW The name of this function looks strange to me. +//Maybe catchup_init_state() or catchup_setup() will do better? +//I'd also suggest to wrap all these fields into some CatchupState, but it isn't urgent. +/* + * Prepare for work: fill some globals, open connection to source database + */ +static PGconn * +catchup_collect_info(PGNodeInfo *source_node_info, const char *source_pgdata, const char *dest_pgdata) +{ + PGconn *source_conn; + + /* Initialize PGInfonode */ + pgNodeInit(source_node_info); + + /* Get WAL segments size and system ID of source PG instance */ + instance_config.xlog_seg_size = get_xlog_seg_size(source_pgdata); + instance_config.system_identifier = get_system_identifier(source_pgdata, FIO_DB_HOST); + current.start_time = time(NULL); + + StrNCpy(current.program_version, PROGRAM_VERSION, sizeof(current.program_version)); + + /* Do some compatibility checks and fill basic info about PG instance */ + source_conn = pgdata_basic_setup(instance_config.conn_opt, source_node_info); + +#if PG_VERSION_NUM >= 110000 + if (!RetrieveWalSegSize(source_conn)) + elog(ERROR, "Failed to retrieve wal_segment_size"); +#endif + + get_ptrack_version(source_conn, source_node_info); + if (source_node_info->ptrack_version_num > 0) + source_node_info->is_ptrack_enabled = pg_is_ptrack_enabled(source_conn, source_node_info->ptrack_version_num); + + /* Obtain current timeline */ +#if PG_VERSION_NUM >= 90600 + current.tli = get_current_timeline(source_conn); +#else + instance_config.pgdata = source_pgdata; + current.tli = get_current_timeline_from_control(source_pgdata, FIO_DB_HOST, false); +#endif + + elog(INFO, "Catchup start, pg_probackup version: %s, " + "PostgreSQL version: %s, " + "remote: %s, source-pgdata: %s, destination-pgdata: %s", + PROGRAM_VERSION, source_node_info->server_version_str, + IsSshProtocol() ? "true" : "false", + source_pgdata, dest_pgdata); + + if (current.from_replica) + elog(INFO, "Running catchup from standby"); + + return source_conn; +} + +/* + * Check that catchup can be performed on source and dest + * this function is for checks, that can be performed without modification of data on disk + */ +static void +catchup_preflight_checks(PGNodeInfo *source_node_info, PGconn *source_conn, + const char *source_pgdata, const char *dest_pgdata) +{ + /* TODO + * gsmol - fallback to FULL mode if dest PGDATA is empty + * kulaginm -- I think this is a harmful feature. If user requested an incremental catchup, then + * he expects that this will be done quickly and efficiently. If, for example, he made a mistake + * with dest_dir, then he will receive a second full copy instead of an error message, and I think + * that in some cases he would prefer the error. + * I propose in future versions to offer a backup_mode auto, in which we will look to the dest_dir + * and decide which of the modes will be the most effective. + * I.e.: + * if(requested_backup_mode == BACKUP_MODE_DIFF_AUTO) + * { + * if(dest_pgdata_is_empty) + * backup_mode = BACKUP_MODE_FULL; + * else + * if(ptrack supported and applicable) + * backup_mode = BACKUP_MODE_DIFF_PTRACK; + * else + * backup_mode = BACKUP_MODE_DIFF_DELTA; + * } + */ + + if (dir_is_empty(dest_pgdata, FIO_LOCAL_HOST)) + { + if (current.backup_mode == BACKUP_MODE_DIFF_PTRACK || + current.backup_mode == BACKUP_MODE_DIFF_DELTA) + elog(ERROR, "\"%s\" is empty, but incremental catchup mode requested.", + dest_pgdata); + } + else /* dest dir not empty */ + { + if (current.backup_mode == BACKUP_MODE_FULL) + elog(ERROR, "Can't perform full catchup into non-empty directory \"%s\".", + dest_pgdata); + } + + /* check that postmaster is not running in destination */ + if (current.backup_mode != BACKUP_MODE_FULL) + { + pid_t pid; + pid = fio_check_postmaster(dest_pgdata, FIO_LOCAL_HOST); + if (pid == 1) /* postmaster.pid is mangled */ + { + char pid_filename[MAXPGPATH]; + join_path_components(pid_filename, dest_pgdata, "postmaster.pid"); + elog(ERROR, "Pid file \"%s\" is mangled, cannot determine whether postmaster is running or not", + pid_filename); + } + else if (pid > 1) /* postmaster is up */ + { + elog(ERROR, "Postmaster with pid %u is running in destination directory \"%s\"", + pid, dest_pgdata); + } + } + + /* check backup_label absence in dest */ + if (current.backup_mode != BACKUP_MODE_FULL) + { + char backup_label_filename[MAXPGPATH]; + + join_path_components(backup_label_filename, dest_pgdata, PG_BACKUP_LABEL_FILE); + if (fio_access(backup_label_filename, F_OK, FIO_LOCAL_HOST) == 0) + elog(ERROR, "Destination directory contains \"" PG_BACKUP_LABEL_FILE "\" file"); + } + + /* check that destination database is shutdowned cleanly */ + if (current.backup_mode != BACKUP_MODE_FULL) + { + DBState state; + state = get_system_dbstate(dest_pgdata, FIO_LOCAL_HOST); + /* see states in postgres sources (src/include/catalog/pg_control.h) */ + if (state != DB_SHUTDOWNED && state != DB_SHUTDOWNED_IN_RECOVERY) + elog(ERROR, "Postmaster in destination directory \"%s\" must be stopped cleanly", + dest_pgdata); + } + + /* Check that connected PG instance, source and destination PGDATA are the same */ + { + uint64 source_conn_id, source_id, dest_id; + + source_conn_id = get_remote_system_identifier(source_conn); + source_id = get_system_identifier(source_pgdata, FIO_DB_HOST); /* same as instance_config.system_identifier */ + + if (source_conn_id != source_id) + elog(ERROR, "Database identifiers mismatch: we connected to DB id %lu, but in \"%s\" we found id %lu", + source_conn_id, source_pgdata, source_id); + + if (current.backup_mode != BACKUP_MODE_FULL) + { + dest_id = get_system_identifier(dest_pgdata, FIO_LOCAL_HOST); + if (source_conn_id != dest_id) + elog(ERROR, "Database identifiers mismatch: we connected to DB id %lu, but in \"%s\" we found id %lu", + source_conn_id, dest_pgdata, dest_id); + } + } + + /* check PTRACK version */ + if (current.backup_mode == BACKUP_MODE_DIFF_PTRACK) + { + if (source_node_info->ptrack_version_num == 0) + elog(ERROR, "This PostgreSQL instance does not support ptrack"); + else if (source_node_info->ptrack_version_num < 200) + elog(ERROR, "ptrack extension is too old.\n" + "Upgrade ptrack to version >= 2"); + else if (!source_node_info->is_ptrack_enabled) + elog(ERROR, "Ptrack is disabled"); + } + + if (current.from_replica && exclusive_backup) + elog(ERROR, "Catchup from standby is only available for PostgreSQL >= 9.6"); + + /* check that we don't overwrite tablespace in source pgdata */ + catchup_check_tablespaces_existance_in_tbsmapping(source_conn); + + /* check timelines */ + if (current.backup_mode != BACKUP_MODE_FULL) + { + RedoParams dest_redo = { 0, InvalidXLogRecPtr, 0 }; + + /* fill dest_redo.lsn and dest_redo.tli */ + get_redo(dest_pgdata, FIO_LOCAL_HOST, &dest_redo); + + if (current.tli != 1) + { + parray *source_timelines; /* parray* of TimeLineHistoryEntry* */ + source_timelines = catchup_get_tli_history(&instance_config.conn_opt, current.tli); + + if (source_timelines == NULL) + elog(ERROR, "Cannot get source timeline history"); + + if (!satisfy_timeline(source_timelines, dest_redo.tli, dest_redo.lsn)) + elog(ERROR, "Destination is not in source timeline history"); + + parray_walk(source_timelines, pfree); + parray_free(source_timelines); + } + else /* special case -- no history files in source */ + { + if (dest_redo.tli != 1) + elog(ERROR, "Source is behind destination in timeline history"); + } + } +} + +/* + * Check that all tablespaces exists in tablespace mapping (--tablespace-mapping option) + * Check that all local mapped directories is empty if it is local FULL catchup + * Emit fatal error if that (not existent in map or not empty) tablespace found + */ +static void +catchup_check_tablespaces_existance_in_tbsmapping(PGconn *conn) +{ + PGresult *res; + int i; + char *tablespace_path = NULL; + const char *linked_path = NULL; + char *query = "SELECT pg_catalog.pg_tablespace_location(oid) " + "FROM pg_catalog.pg_tablespace " + "WHERE pg_catalog.pg_tablespace_location(oid) <> '';"; + + res = pgut_execute(conn, query, 0, NULL); + + if (!res) + elog(ERROR, "Failed to get list of tablespaces"); + + for (i = 0; i < res->ntups; i++) + { + tablespace_path = PQgetvalue(res, i, 0); + Assert (strlen(tablespace_path) > 0); + + canonicalize_path(tablespace_path); + linked_path = get_tablespace_mapping(tablespace_path); + + if (strcmp(tablespace_path, linked_path) == 0) + /* same result -> not found in mapping */ + { + if (!fio_is_remote(FIO_DB_HOST)) + elog(ERROR, "Local catchup executed, but source database contains " + "tablespace (\"%s\"), that is not listed in the map", tablespace_path); + else + elog(WARNING, "Remote catchup executed and source database contains " + "tablespace (\"%s\"), that is not listed in the map", tablespace_path); + } + + if (!is_absolute_path(linked_path)) + elog(ERROR, "Tablespace directory path must be an absolute path: \"%s\"", + linked_path); + + if (current.backup_mode == BACKUP_MODE_FULL + && !dir_is_empty(linked_path, FIO_LOCAL_HOST)) + elog(ERROR, "Target mapped tablespace directory (\"%s\") is not empty in FULL catchup", + linked_path); + } + PQclear(res); +} + +/* + * Get timeline history via replication connection + * returns parray* of TimeLineHistoryEntry* + */ +static parray* +catchup_get_tli_history(ConnectionOptions *conn_opt, TimeLineID tli) +{ + PGresult *res; + PGconn *conn; + char *history; + char query[128]; + parray *result = NULL; + + snprintf(query, sizeof(query), "TIMELINE_HISTORY %u", tli); + + /* + * Connect in replication mode to the server. + */ + conn = pgut_connect_replication(conn_opt->pghost, + conn_opt->pgport, + conn_opt->pgdatabase, + conn_opt->pguser, + false); + + if (!conn) + return NULL; + + res = PQexec(conn, query); + PQfinish(conn); + + if (PQresultStatus(res) != PGRES_TUPLES_OK) + { + elog(WARNING, "Could not send replication command \"%s\": %s", + query, PQresultErrorMessage(res)); + PQclear(res); + return NULL; + } + + /* + * The response to TIMELINE_HISTORY is a single row result set + * with two fields: filename and content + */ + if (PQnfields(res) != 2 || PQntuples(res) != 1) + { + elog(ERROR, "Unexpected response to TIMELINE_HISTORY command: " + "got %d rows and %d fields, expected %d rows and %d fields", + PQntuples(res), PQnfields(res), 1, 2); + PQclear(res); + return NULL; + } + + history = pgut_strdup(PQgetvalue(res, 0, 1)); + result = parse_tli_history_buffer(history, tli); + + /* some cleanup */ + pg_free(history); + PQclear(res); + + return result; +} + +/* + * catchup multithreaded copy rountine and helper structure and function + */ + +/* parameters for catchup_thread_runner() passed from catchup_multithreaded_copy() */ +typedef struct +{ + PGNodeInfo *nodeInfo; + const char *from_root; + const char *to_root; + parray *source_filelist; + parray *dest_filelist; + XLogRecPtr sync_lsn; + BackupMode backup_mode; + int thread_num; + bool completed; +} catchup_thread_runner_arg; + +/* Catchup file copier executed in separate thread */ +static void * +catchup_thread_runner(void *arg) +{ + int i; + char from_fullpath[MAXPGPATH]; + char to_fullpath[MAXPGPATH]; + + catchup_thread_runner_arg *arguments = (catchup_thread_runner_arg *) arg; + int n_files = parray_num(arguments->source_filelist); + + /* catchup a file */ + for (i = 0; i < n_files; i++) + { + pgFile *file = (pgFile *) parray_get(arguments->source_filelist, i); + pgFile *dest_file = NULL; + + /* We have already copied all directories */ + if (S_ISDIR(file->mode)) + continue; + + if (!pg_atomic_test_set_flag(&file->lock)) + continue; + + /* check for interrupt */ + if (interrupted || thread_interrupted) + elog(ERROR, "Interrupted during catchup"); + + if (progress) + elog(INFO, "Progress: (%d/%d). Process file \"%s\"", + i + 1, n_files, file->rel_path); + + /* construct destination filepath */ + Assert(file->external_dir_num == 0); + join_path_components(from_fullpath, arguments->from_root, file->rel_path); + join_path_components(to_fullpath, arguments->to_root, file->rel_path); + + /* Encountered some strange beast */ + if (!S_ISREG(file->mode)) + elog(WARNING, "Unexpected type %d of file \"%s\", skipping", + file->mode, from_fullpath); + + /* Check that file exist in dest pgdata */ + if (arguments->backup_mode != BACKUP_MODE_FULL) + { + pgFile **dest_file_tmp = NULL; + dest_file_tmp = (pgFile **) parray_bsearch(arguments->dest_filelist, + file, pgFileCompareRelPathWithExternal); + if (dest_file_tmp) + { + /* File exists in destination PGDATA */ + file->exists_in_prev = true; + dest_file = *dest_file_tmp; + } + } + + /* Do actual work */ + if (file->is_datafile && !file->is_cfs) + { + catchup_data_file(file, from_fullpath, to_fullpath, + arguments->sync_lsn, + arguments->backup_mode, + NONE_COMPRESS, + 0, + arguments->nodeInfo->checksum_version, + arguments->nodeInfo->ptrack_version_num, + arguments->nodeInfo->ptrack_schema, + false, + dest_file != NULL ? dest_file->size : 0); + } + else + { + backup_non_data_file(file, dest_file, from_fullpath, to_fullpath, + arguments->backup_mode, current.parent_backup, true); + } + + if (file->write_size == FILE_NOT_FOUND) + continue; + + if (file->write_size == BYTES_INVALID) + { + elog(VERBOSE, "Skipping the unchanged file: \"%s\", read %li bytes", from_fullpath, file->read_size); + continue; + } + + elog(VERBOSE, "File \"%s\". Copied "INT64_FORMAT " bytes", + from_fullpath, file->write_size); + } + + /* ssh connection to longer needed */ + fio_disconnect(); + + /* Data files transferring is successful */ + arguments->completed = true; + + return NULL; +} + +/* + * main multithreaded copier + */ +static bool +catchup_multithreaded_copy(int num_threads, + PGNodeInfo *source_node_info, + const char *source_pgdata_path, + const char *dest_pgdata_path, + parray *source_filelist, + parray *dest_filelist, + XLogRecPtr sync_lsn, + BackupMode backup_mode) +{ + /* arrays with meta info for multi threaded catchup */ + catchup_thread_runner_arg *threads_args; + pthread_t *threads; + + bool all_threads_successful = true; + int i; + + /* init thread args */ + threads_args = (catchup_thread_runner_arg *) palloc(sizeof(catchup_thread_runner_arg) * num_threads); + for (i = 0; i < num_threads; i++) + threads_args[i] = (catchup_thread_runner_arg){ + .nodeInfo = source_node_info, + .from_root = source_pgdata_path, + .to_root = dest_pgdata_path, + .source_filelist = source_filelist, + .dest_filelist = dest_filelist, + .sync_lsn = sync_lsn, + .backup_mode = backup_mode, + .thread_num = i + 1, + .completed = false, + }; + + /* Run threads */ + thread_interrupted = false; + threads = (pthread_t *) palloc(sizeof(pthread_t) * num_threads); + for (i = 0; i < num_threads; i++) + { + elog(VERBOSE, "Start thread num: %i", i); + pthread_create(&threads[i], NULL, &catchup_thread_runner, &(threads_args[i])); + } + + /* Wait threads */ + for (i = 0; i < num_threads; i++) + { + pthread_join(threads[i], NULL); + all_threads_successful &= threads_args[i].completed; + } + + free(threads); + free(threads_args); + return all_threads_successful; +} + +/* + * + */ +static void +catchup_sync_destination_files(const char* pgdata_path, fio_location location, parray *filelist, pgFile *pg_control_file) +{ + char fullpath[MAXPGPATH]; + time_t start_time, end_time; + char pretty_time[20]; + int i; + + elog(INFO, "Syncing copied files to disk"); + time(&start_time); + + for (i = 0; i < parray_num(filelist); i++) + { + pgFile *file = (pgFile *) parray_get(filelist, i); + + /* TODO: sync directory ? */ + if (S_ISDIR(file->mode)) + continue; + + Assert(file->external_dir_num == 0); + join_path_components(fullpath, pgdata_path, file->rel_path); + if (fio_sync(fullpath, location) != 0) + elog(ERROR, "Cannot sync file \"%s\": %s", fullpath, strerror(errno)); + } + + /* + * sync pg_control file + */ + join_path_components(fullpath, pgdata_path, pg_control_file->rel_path); + if (fio_sync(fullpath, location) != 0) + elog(ERROR, "Cannot sync file \"%s\": %s", fullpath, strerror(errno)); + + time(&end_time); + pretty_time_interval(difftime(end_time, start_time), + pretty_time, lengthof(pretty_time)); + elog(INFO, "Files are synced, time elapsed: %s", pretty_time); +} + +/* + * Entry point of pg_probackup CATCHUP subcommand. + */ +int +do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, bool sync_dest_files) +{ + PGconn *source_conn = NULL; + PGNodeInfo source_node_info; + bool backup_logs = false; + parray *source_filelist = NULL; + pgFile *source_pg_control_file = NULL; + parray *dest_filelist = NULL; + char dest_xlog_path[MAXPGPATH]; + + RedoParams dest_redo = { 0, InvalidXLogRecPtr, 0 }; + PGStopBackupResult stop_backup_result; + bool catchup_isok = true; + + int i; + + /* for fancy reporting */ + time_t start_time, end_time; + char pretty_time[20]; + char pretty_bytes[20]; + + source_conn = catchup_collect_info(&source_node_info, source_pgdata, dest_pgdata); + catchup_preflight_checks(&source_node_info, source_conn, source_pgdata, dest_pgdata); + + elog(LOG, "Database catchup start"); + + { + char label[1024]; + /* notify start of backup to PostgreSQL server */ + time2iso(label, lengthof(label), current.start_time, false); + strncat(label, " with pg_probackup", lengthof(label) - + strlen(" with pg_probackup")); + + /* Call pg_start_backup function in PostgreSQL connect */ + pg_start_backup(label, smooth_checkpoint, ¤t, &source_node_info, source_conn); + elog(LOG, "pg_start_backup START LSN %X/%X", (uint32) (current.start_lsn >> 32), (uint32) (current.start_lsn)); + } + + //REVIEW I wonder, if we can move this piece above and call before pg_start backup()? + //It seems to be a part of setup phase. + if (current.backup_mode != BACKUP_MODE_FULL) + { + dest_filelist = parray_new(); + dir_list_file(dest_filelist, dest_pgdata, + true, true, false, backup_logs, true, 0, FIO_LOCAL_HOST); + + // fill dest_redo.lsn and dest_redo.tli + get_redo(dest_pgdata, FIO_LOCAL_HOST, &dest_redo); + elog(INFO, "syncLSN = %X/%X", (uint32) (dest_redo.lsn >> 32), (uint32) dest_redo.lsn); + + /* + * Future improvement to catch partial catchup: + * 1. rename dest pg_control into something like pg_control.pbk + * (so user can't start partial catchup'ed instance from this point) + * 2. try to read by get_redo() pg_control and pg_control.pbk (to detect partial catchup) + * 3. at the end (after copy of correct pg_control), remove pg_control.pbk + */ + } + + //REVIEW I wonder, if we can move this piece above and call before pg_start backup()? + //It seems to be a part of setup phase. + /* + * TODO: move to separate function to use in both backup.c and catchup.c + */ + if (current.backup_mode == BACKUP_MODE_DIFF_PTRACK) + { + XLogRecPtr ptrack_lsn = get_last_ptrack_lsn(source_conn, &source_node_info); + + // new ptrack is more robust and checks Start LSN + if (ptrack_lsn > dest_redo.lsn || ptrack_lsn == InvalidXLogRecPtr) + elog(ERROR, "LSN from ptrack_control in source %X/%X is greater than checkpoint LSN in destination %X/%X.\n" + "You can perform only FULL catchup.", + (uint32) (ptrack_lsn >> 32), (uint32) (ptrack_lsn), + (uint32) (dest_redo.lsn >> 32), + (uint32) (dest_redo.lsn)); + } + + /* Check that dest_redo.lsn is less than current.start_lsn */ + if (current.backup_mode != BACKUP_MODE_FULL && + dest_redo.lsn > current.start_lsn) + elog(ERROR, "Current START LSN %X/%X is lower than SYNC LSN %X/%X, " + "it may indicate that we are trying to catchup with PostgreSQL instance from the past", + (uint32) (current.start_lsn >> 32), (uint32) (current.start_lsn), + (uint32) (dest_redo.lsn >> 32), (uint32) (dest_redo.lsn)); + + /* Start stream replication */ + join_path_components(dest_xlog_path, dest_pgdata, PG_XLOG_DIR); + fio_mkdir(dest_xlog_path, DIR_PERMISSION, FIO_LOCAL_HOST); + start_WAL_streaming(source_conn, dest_xlog_path, &instance_config.conn_opt, + current.start_lsn, current.tli); + + source_filelist = parray_new(); + + /* list files with the logical path. omit $PGDATA */ + if (fio_is_remote(FIO_DB_HOST)) + fio_list_dir(source_filelist, source_pgdata, + true, true, false, backup_logs, true, 0); + else + dir_list_file(source_filelist, source_pgdata, + true, true, false, backup_logs, true, 0, FIO_LOCAL_HOST); + + //REVIEW FIXME. Let's fix that before release. + // TODO filter pg_xlog/wal? + // TODO what if wal is not a dir (symlink to a dir)? + + /* close ssh session in main thread */ + fio_disconnect(); + + //REVIEW Do we want to do similar calculation for dest? + current.pgdata_bytes += calculate_datasize_of_filelist(source_filelist); + pretty_size(current.pgdata_bytes, pretty_bytes, lengthof(pretty_bytes)); + elog(INFO, "Source PGDATA size: %s", pretty_bytes); + + /* + * Sort pathname ascending. It is necessary to create intermediate + * directories sequentially. + * + * For example: + * 1 - create 'base' + * 2 - create 'base/1' + * + * Sorted array is used at least in parse_filelist_filenames(), + * extractPageMap(), make_pagemap_from_ptrack(). + */ + parray_qsort(source_filelist, pgFileCompareRelPathWithExternal); + + /* Extract information about files in source_filelist parsing their names:*/ + parse_filelist_filenames(source_filelist, source_pgdata); + + elog(LOG, "Start LSN (source): %X/%X, TLI: %X", + (uint32) (current.start_lsn >> 32), (uint32) (current.start_lsn), + current.tli); + if (current.backup_mode != BACKUP_MODE_FULL) + elog(LOG, "LSN in destination: %X/%X, TLI: %X", + (uint32) (dest_redo.lsn >> 32), (uint32) (dest_redo.lsn), + dest_redo.tli); + + /* Build page mapping in PTRACK mode */ + if (current.backup_mode == BACKUP_MODE_DIFF_PTRACK) + { + time(&start_time); + elog(INFO, "Extracting pagemap of changed blocks"); + + /* Build the page map from ptrack information */ + make_pagemap_from_ptrack_2(source_filelist, source_conn, + source_node_info.ptrack_schema, + source_node_info.ptrack_version_num, + dest_redo.lsn); + time(&end_time); + elog(INFO, "Pagemap successfully extracted, time elapsed: %.0f sec", + difftime(end_time, start_time)); + } + + /* + * Make directories before catchup + */ + /* + * We iterate over source_filelist and for every directory with parent 'pg_tblspc' + * we must lookup this directory name in tablespace map. + * If we got a match, we treat this directory as tablespace. + * It means that we create directory specified in tablespace_map and + * original directory created as symlink to it. + */ + for (i = 0; i < parray_num(source_filelist); i++) + { + pgFile *file = (pgFile *) parray_get(source_filelist, i); + char parent_dir[MAXPGPATH]; + + if (!S_ISDIR(file->mode)) + continue; + + /* + * check if it is fake "directory" and is a tablespace link + * this is because we passed the follow_symlink when building the list + */ + /* get parent dir of rel_path */ + strncpy(parent_dir, file->rel_path, MAXPGPATH); + get_parent_directory(parent_dir); + + /* check if directory is actually link to tablespace */ + if (strcmp(parent_dir, PG_TBLSPC_DIR) != 0) + { + /* if the entry is a regular directory, create it in the destination */ + char dirpath[MAXPGPATH]; + + join_path_components(dirpath, dest_pgdata, file->rel_path); + + elog(VERBOSE, "Create directory '%s'", dirpath); + fio_mkdir(dirpath, DIR_PERMISSION, FIO_LOCAL_HOST); + } + else + { + /* this directory located in pg_tblspc */ + const char *linked_path = NULL; + char to_path[MAXPGPATH]; + + // TODO perform additional check that this is actually symlink? + { /* get full symlink path and map this path to new location */ + char source_full_path[MAXPGPATH]; + char symlink_content[MAXPGPATH]; + join_path_components(source_full_path, source_pgdata, file->rel_path); + fio_readlink(source_full_path, symlink_content, sizeof(symlink_content), FIO_DB_HOST); + /* we checked that mapping exists in preflight_checks for local catchup */ + linked_path = get_tablespace_mapping(symlink_content); + elog(INFO, "Map tablespace full_path: \"%s\" old_symlink_content: \"%s\" new_symlink_content: \"%s\"\n", + source_full_path, + symlink_content, + linked_path); + } + + if (!is_absolute_path(linked_path)) + elog(ERROR, "Tablespace directory path must be an absolute path: %s\n", + linked_path); + + join_path_components(to_path, dest_pgdata, file->rel_path); + + elog(VERBOSE, "Create directory \"%s\" and symbolic link \"%s\"", + linked_path, to_path); + + /* create tablespace directory */ + if (fio_mkdir(linked_path, file->mode, FIO_LOCAL_HOST) != 0) + elog(ERROR, "Could not create tablespace directory \"%s\": %s", + linked_path, strerror(errno)); + + /* create link to linked_path */ + if (fio_symlink(linked_path, to_path, true, FIO_LOCAL_HOST) < 0) + elog(ERROR, "Could not create symbolic link \"%s\" -> \"%s\": %s", + linked_path, to_path, strerror(errno)); + } + } + + /* + * find pg_control file (in already sorted source_filelist) + * and exclude it from list for future special processing + */ + { + int control_file_elem_index; + pgFile search_key; + MemSet(&search_key, 0, sizeof(pgFile)); + /* pgFileCompareRelPathWithExternal uses only .rel_path and .external_dir_num for comparision */ + search_key.rel_path = XLOG_CONTROL_FILE; + search_key.external_dir_num = 0; + control_file_elem_index = parray_bsearch_index(source_filelist, &search_key, pgFileCompareRelPathWithExternal); + if(control_file_elem_index < 0) + elog(ERROR, "\"%s\" not found in \"%s\"\n", XLOG_CONTROL_FILE, source_pgdata); + source_pg_control_file = parray_remove(source_filelist, control_file_elem_index); + } + + /* + * remove absent source files in dest (dropped tables, etc...) + * note: global/pg_control will also be deleted here + */ + if (current.backup_mode != BACKUP_MODE_FULL) + { + elog(INFO, "Removing redundant files in destination directory"); + parray_qsort(dest_filelist, pgFileCompareRelPathWithExternalDesc); + for (i = 0; i < parray_num(dest_filelist); i++) + { + bool redundant = true; + pgFile *file = (pgFile *) parray_get(dest_filelist, i); + + //TODO optimize it and use some merge-like algorithm + //instead of bsearch for each file. + if (parray_bsearch(source_filelist, file, pgFileCompareRelPathWithExternal)) + redundant = false; + + /* pg_filenode.map are always restored, because it's crc cannot be trusted */ + Assert(file->external_dir_num == 0); + if (pg_strcasecmp(file->name, RELMAPPER_FILENAME) == 0) + redundant = true; + + //REVIEW This check seems unneded. Anyway we delete only redundant stuff below. + /* do not delete the useful internal directories */ + if (S_ISDIR(file->mode) && !redundant) + continue; + + /* if file does not exists in destination list, then we can safely unlink it */ + if (redundant) + { + char fullpath[MAXPGPATH]; + + join_path_components(fullpath, dest_pgdata, file->rel_path); + + fio_delete(file->mode, fullpath, FIO_DB_HOST); + elog(VERBOSE, "Deleted file \"%s\"", fullpath); + + /* shrink pgdata list */ + pgFileFree(file); + parray_remove(dest_filelist, i); + i--; + } + } + } + + /* clear file locks */ + pfilearray_clear_locks(source_filelist); + + /* Sort by size for load balancing */ + parray_qsort(source_filelist, pgFileCompareSizeDesc); + + /* Sort the array for binary search */ + if (dest_filelist) + parray_qsort(dest_filelist, pgFileCompareRelPathWithExternal); + + /* run copy threads */ + elog(INFO, "Start transferring data files"); + time(&start_time); + catchup_isok = catchup_multithreaded_copy(num_threads, &source_node_info, + source_pgdata, dest_pgdata, + source_filelist, dest_filelist, + dest_redo.lsn, current.backup_mode); + + /* at last copy control file */ + if (catchup_isok) + { + char from_fullpath[MAXPGPATH]; + char to_fullpath[MAXPGPATH]; + join_path_components(from_fullpath, source_pgdata, source_pg_control_file->rel_path); + join_path_components(to_fullpath, dest_pgdata, source_pg_control_file->rel_path); + copy_pgcontrol_file(from_fullpath, FIO_DB_HOST, + to_fullpath, FIO_LOCAL_HOST, source_pg_control_file); + } + + time(&end_time); + pretty_time_interval(difftime(end_time, start_time), + pretty_time, lengthof(pretty_time)); + if (catchup_isok) + elog(INFO, "Data files are transferred, time elapsed: %s", + pretty_time); + else + elog(ERROR, "Data files transferring failed, time elapsed: %s", + pretty_time); + + /* Notify end of backup */ + { + //REVIEW Is it relevant to catchup? I suppose it isn't, since catchup is a new code. + //If we do need it, please write a comment explaining that. + /* kludge against some old bug in archive_timeout. TODO: remove in 3.0.0 */ + int timeout = (instance_config.archive_timeout > 0) ? + instance_config.archive_timeout : ARCHIVE_TIMEOUT_DEFAULT; + char *stop_backup_query_text = NULL; + + pg_silent_client_messages(source_conn); + + //REVIEW. Do we want to support pg 9.5? I suppose we never test it... + //Maybe check it and error out early? + /* Create restore point + * Only if backup is from master. + * For PG 9.5 create restore point only if pguser is superuser. + */ + if (!current.from_replica && + !(source_node_info.server_version < 90600 && + !source_node_info.is_superuser)) //TODO: check correctness + pg_create_restore_point(source_conn, current.start_time); + + /* Execute pg_stop_backup using PostgreSQL connection */ + pg_stop_backup_send(source_conn, source_node_info.server_version, current.from_replica, exclusive_backup, &stop_backup_query_text); + + /* + * Wait for the result of pg_stop_backup(), but no longer than + * archive_timeout seconds + */ + pg_stop_backup_consume(source_conn, source_node_info.server_version, exclusive_backup, timeout, stop_backup_query_text, &stop_backup_result); + + /* Cleanup */ + pg_free(stop_backup_query_text); + } + + wait_wal_and_calculate_stop_lsn(dest_xlog_path, stop_backup_result.lsn, ¤t); + +#if PG_VERSION_NUM >= 90600 + /* Write backup_label */ + Assert(stop_backup_result.backup_label_content != NULL); + pg_stop_backup_write_file_helper(dest_pgdata, PG_BACKUP_LABEL_FILE, "backup label", + stop_backup_result.backup_label_content, stop_backup_result.backup_label_content_len, + NULL); + free(stop_backup_result.backup_label_content); + stop_backup_result.backup_label_content = NULL; + stop_backup_result.backup_label_content_len = 0; + + /* tablespace_map */ + if (stop_backup_result.tablespace_map_content != NULL) + { + // TODO what if tablespace is created during catchup? + /* Because we have already created symlinks in pg_tblspc earlier, + * we do not need to write the tablespace_map file. + * So this call is unnecessary: + * pg_stop_backup_write_file_helper(dest_pgdata, PG_TABLESPACE_MAP_FILE, "tablespace map", + * stop_backup_result.tablespace_map_content, stop_backup_result.tablespace_map_content_len, + * NULL); + */ + free(stop_backup_result.tablespace_map_content); + stop_backup_result.tablespace_map_content = NULL; + stop_backup_result.tablespace_map_content_len = 0; + } +#endif + + if(wait_WAL_streaming_end(NULL)) + elog(ERROR, "WAL streaming failed"); + + //REVIEW Please add a comment about these lsns. It is a crutial part of the algorithm. + current.recovery_xid = stop_backup_result.snapshot_xid; + + elog(LOG, "Getting the Recovery Time from WAL"); + + /* iterate over WAL from stop_backup lsn to start_backup lsn */ + if (!read_recovery_info(dest_xlog_path, current.tli, + instance_config.xlog_seg_size, + current.start_lsn, current.stop_lsn, + ¤t.recovery_time)) + { + elog(LOG, "Failed to find Recovery Time in WAL, forced to trust current_timestamp"); + current.recovery_time = stop_backup_result.invocation_time; + } + + /* + * In case of backup from replica >= 9.6 we must fix minRecPoint + */ + if (current.from_replica && !exclusive_backup) + { + set_min_recovery_point(source_pg_control_file, dest_pgdata, current.stop_lsn); + } + + /* close ssh session in main thread */ + fio_disconnect(); + + /* Sync all copied files unless '--no-sync' flag is used */ + if (catchup_isok) + { + if (sync_dest_files) + catchup_sync_destination_files(dest_pgdata, FIO_LOCAL_HOST, source_filelist, source_pg_control_file); + else + elog(WARNING, "Files are not synced to disk"); + } + + /* Cleanup */ + if (dest_filelist) + { + parray_walk(dest_filelist, pgFileFree); + parray_free(dest_filelist); + } + parray_walk(source_filelist, pgFileFree); + parray_free(source_filelist); + pgFileFree(source_pg_control_file); + + //REVIEW: Are we going to do that before release? + /* TODO: show the amount of transfered data in bytes and calculate incremental ratio */ + + return 0; +} diff --git a/src/data.c b/src/data.c index 314490585..49b696059 100644 --- a/src/data.c +++ b/src/data.c @@ -268,7 +268,7 @@ get_checksum_errormsg(Page page, char **errormsg, BlockNumber absolute_blkno) * PageIsOk(0) if page was successfully retrieved * PageIsTruncated(-1) if the page was truncated * SkipCurrentPage(-2) if we need to skip this page, - * only used for DELTA backup + * only used for DELTA and PTRACK backup * PageIsCorrupted(-3) if the page checksum mismatch * or header corruption, * only used for checkdb @@ -400,7 +400,12 @@ prepare_page(pgFile *file, XLogRecPtr prev_backup_start_lsn, page_st->lsn > 0 && page_st->lsn < prev_backup_start_lsn) { - elog(VERBOSE, "Skipping blknum %u in file: \"%s\"", blknum, from_fullpath); + elog(VERBOSE, "Skipping blknum %u in file: \"%s\", file->exists_in_prev: %s, page_st->lsn: %X/%X, prev_backup_start_lsn: %X/%X", + blknum, from_fullpath, + file->exists_in_prev ? "true" : "false", + (uint32) (page_st->lsn >> 32), (uint32) page_st->lsn, + (uint32) (prev_backup_start_lsn >> 32), (uint32) prev_backup_start_lsn + ); return SkipCurrentPage; } @@ -458,6 +463,23 @@ compress_and_backup_page(pgFile *file, BlockNumber blknum, return compressed_size; } +/* взята из compress_and_backup_page, но выпилена вся магия заголовков и компрессии, просто копирование 1-в-1 */ +static int +copy_page(pgFile *file, BlockNumber blknum, + FILE *in, FILE *out, Page page, + const char *to_fullpath) +{ + /* write data page */ + if (fio_fwrite(out, page, BLCKSZ) != BLCKSZ) + elog(ERROR, "File: \"%s\", cannot write at block %u: %s", + to_fullpath, blknum, strerror(errno)); + + file->write_size += BLCKSZ; + file->uncompressed_size += BLCKSZ; + + return BLCKSZ; +} + /* * Backup data file in the from_root directory to the to_root directory with * same relative path. If prev_backup_start_lsn is not NULL, only pages with @@ -623,6 +645,169 @@ backup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpat pg_free(headers); } +/* + * Backup data file in the from_root directory to the to_root directory with + * same relative path. If prev_backup_start_lsn is not NULL, only pages with + * higher lsn will be copied. + * Not just copy file, but read it block by block (use bitmap in case of + * incremental backup), validate checksum, optionally compress and write to + * backup with special header. + */ +void +catchup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpath, + XLogRecPtr prev_backup_start_lsn, BackupMode backup_mode, + CompressAlg calg, int clevel, uint32 checksum_version, + int ptrack_version_num, const char *ptrack_schema, + bool is_merge, size_t prev_size) +{ + int rc; + bool use_pagemap; + char *errmsg = NULL; + BlockNumber err_blknum = 0; + /* page headers */ + BackupPageHeader2 *headers = NULL; + + /* sanity */ + if (file->size % BLCKSZ != 0) + elog(WARNING, "File: \"%s\", invalid file size %zu", from_fullpath, file->size); + + /* + * Compute expected number of blocks in the file. + * NOTE This is a normal situation, if the file size has changed + * since the moment we computed it. + */ + file->n_blocks = file->size/BLCKSZ; + + /* + * Skip unchanged file only if it exists in previous backup. + * This way we can correctly handle null-sized files which are + * not tracked by pagemap and thus always marked as unchanged. + */ + if (backup_mode == BACKUP_MODE_DIFF_PTRACK && + file->pagemap.bitmapsize == PageBitmapIsEmpty && + file->exists_in_prev && file->size == prev_size && !file->pagemap_isabsent) + { + /* + * There are no changed blocks since last backup. We want to make + * incremental backup, so we should exit. + */ + file->write_size = BYTES_INVALID; + return; + } + + /* reset size summary */ + file->read_size = 0; + file->write_size = 0; + file->uncompressed_size = 0; + INIT_FILE_CRC32(true, file->crc); + + /* + * Read each page, verify checksum and write it to backup. + * If page map is empty or file is not present in previous backup + * backup all pages of the relation. + * + * In PTRACK 1.x there was a problem + * of data files with missing _ptrack map. + * Such files should be fully copied. + */ + + if (file->pagemap.bitmapsize == PageBitmapIsEmpty || + file->pagemap_isabsent || !file->exists_in_prev || + !file->pagemap.bitmap) + use_pagemap = false; + else + use_pagemap = true; + + if (use_pagemap) + elog(VERBOSE, "Using pagemap for file \"%s\"", file->rel_path); + + /* Remote mode */ + if (fio_is_remote(FIO_DB_HOST)) + { + rc = fio_copy_pages(to_fullpath, from_fullpath, file, + /* send prev backup START_LSN */ + (backup_mode == BACKUP_MODE_DIFF_DELTA || backup_mode == BACKUP_MODE_DIFF_PTRACK) && + file->exists_in_prev ? prev_backup_start_lsn : InvalidXLogRecPtr, + calg, clevel, checksum_version, + /* send pagemap if any */ + use_pagemap, + /* variables for error reporting */ + &err_blknum, &errmsg, &headers); + } + else + { + /* TODO: stop handling errors internally */ + rc = copy_pages(to_fullpath, from_fullpath, file, + /* send prev backup START_LSN */ + (backup_mode == BACKUP_MODE_DIFF_DELTA || backup_mode == BACKUP_MODE_DIFF_PTRACK) && + file->exists_in_prev ? prev_backup_start_lsn : InvalidXLogRecPtr, + checksum_version, use_pagemap, + backup_mode, ptrack_version_num, ptrack_schema); + } + + /* check for errors */ + if (rc == FILE_MISSING) + { + elog(is_merge ? ERROR : LOG, "File not found: \"%s\"", from_fullpath); + file->write_size = FILE_NOT_FOUND; + goto cleanup; + } + + else if (rc == WRITE_FAILED) + elog(ERROR, "Cannot write block %u of \"%s\": %s", + err_blknum, to_fullpath, strerror(errno)); + + else if (rc == PAGE_CORRUPTION) + { + if (errmsg) + elog(ERROR, "Corruption detected in file \"%s\", block %u: %s", + from_fullpath, err_blknum, errmsg); + else + elog(ERROR, "Corruption detected in file \"%s\", block %u", + from_fullpath, err_blknum); + } + /* OPEN_FAILED and READ_FAILED */ + else if (rc == OPEN_FAILED) + { + if (errmsg) + elog(ERROR, "%s", errmsg); + else + elog(ERROR, "Cannot open file \"%s\"", from_fullpath); + } + else if (rc == READ_FAILED) + { + if (errmsg) + elog(ERROR, "%s", errmsg); + else + elog(ERROR, "Cannot read file \"%s\"", from_fullpath); + } + + file->read_size = rc * BLCKSZ; + + /* refresh n_blocks for FULL and DELTA */ + if (backup_mode == BACKUP_MODE_FULL || + backup_mode == BACKUP_MODE_DIFF_DELTA) + file->n_blocks = file->read_size / BLCKSZ; + + /* Determine that file didn`t changed in case of incremental catchup */ + if (backup_mode != BACKUP_MODE_FULL && + file->exists_in_prev && + file->write_size == 0 && + file->n_blocks > 0) + { + file->write_size = BYTES_INVALID; + } + +cleanup: + + /* finish CRC calculation */ + FIN_FILE_CRC32(true, file->crc); + + pg_free(errmsg); + pg_free(file->pagemap.bitmap); + pg_free(headers); +} + /* * Backup non data file * We do not apply compression to this file. @@ -1992,6 +2177,7 @@ send_pages(const char *to_fullpath, const char *from_fullpath, true, checksum_version, ptrack_version_num, ptrack_schema, from_fullpath, &page_st); + if (rc == PageIsTruncated) break; @@ -2068,6 +2254,130 @@ send_pages(const char *to_fullpath, const char *from_fullpath, return n_blocks_read; } +/* copy local file (взята из send_pages, но используется простое копирование странички, без добавления заголовков и компрессии) */ +int +copy_pages(const char *to_fullpath, const char *from_fullpath, + pgFile *file, XLogRecPtr sync_lsn, + uint32 checksum_version, bool use_pagemap, + BackupMode backup_mode, int ptrack_version_num, const char *ptrack_schema) +{ + FILE *in = NULL; + FILE *out = NULL; + char curr_page[BLCKSZ]; + int n_blocks_read = 0; + BlockNumber blknum = 0; + datapagemap_iterator_t *iter = NULL; + + /* stdio buffers */ + char *in_buf = NULL; + char *out_buf = NULL; + + /* open source file for read */ + in = fopen(from_fullpath, PG_BINARY_R); + if (in == NULL) + { + /* + * If file is not found, this is not en error. + * It could have been deleted by concurrent postgres transaction. + */ + if (errno == ENOENT) + return FILE_MISSING; + + elog(ERROR, "Cannot open file \"%s\": %s", from_fullpath, strerror(errno)); + } + + /* + * Enable stdio buffering for local input file, + * unless the pagemap is involved, which + * imply a lot of random access. + */ + + if (use_pagemap) + { + iter = datapagemap_iterate(&file->pagemap); + datapagemap_next(iter, &blknum); /* set first block */ + + setvbuf(in, NULL, _IONBF, BUFSIZ); + } + else + { + in_buf = pgut_malloc(STDIO_BUFSIZE); + setvbuf(in, in_buf, _IOFBF, STDIO_BUFSIZE); + } + + out = fio_fopen(to_fullpath, PG_BINARY_R "+", FIO_BACKUP_HOST); + if (out == NULL) + elog(ERROR, "Cannot open destination file \"%s\": %s", + to_fullpath, strerror(errno)); + + /* update file permission */ + if (fio_chmod(to_fullpath, file->mode, FIO_BACKUP_HOST) == -1) + elog(ERROR, "Cannot change mode of \"%s\": %s", to_fullpath, + strerror(errno)); + + elog(VERBOSE, "ftruncate file \"%s\" to size %lu", + to_fullpath, file->size); + if (fio_ftruncate(out, file->size) == -1) + elog(ERROR, "Cannot ftruncate file \"%s\" to size %lu: %s", + to_fullpath, file->size, strerror(errno)); + + if (!fio_is_remote_file(out)) + { + out_buf = pgut_malloc(STDIO_BUFSIZE); + setvbuf(out, out_buf, _IOFBF, STDIO_BUFSIZE); + } + + while (blknum < file->n_blocks) + { + PageState page_st; + int rc = prepare_page(file, sync_lsn, + blknum, in, backup_mode, curr_page, + true, checksum_version, + ptrack_version_num, ptrack_schema, + from_fullpath, &page_st); + if (rc == PageIsTruncated) + break; + + else if (rc == PageIsOk) + { + if (fio_fseek(out, blknum * BLCKSZ) < 0) + { + elog(ERROR, "Cannot seek block %u of \"%s\": %s", + blknum, to_fullpath, strerror(errno)); + } + copy_page(file, blknum, in, out, curr_page, to_fullpath); + } + + n_blocks_read++; + + /* next block */ + if (use_pagemap) + { + /* exit if pagemap is exhausted */ + if (!datapagemap_next(iter, &blknum)) + break; + } + else + blknum++; + } + + /* cleanup */ + if (in && fclose(in)) + elog(ERROR, "Cannot close the source file \"%s\": %s", + to_fullpath, strerror(errno)); + + /* close local output file */ + if (out && fio_fclose(out)) + elog(ERROR, "Cannot close the destination file \"%s\": %s", + to_fullpath, strerror(errno)); + + pg_free(iter); + pg_free(in_buf); + pg_free(out_buf); + + return n_blocks_read; +} + /* * Attempt to open header file, read content and return as * array of headers. diff --git a/src/dir.c b/src/dir.c index ce255d0ad..473534c8b 100644 --- a/src/dir.c +++ b/src/dir.c @@ -485,6 +485,13 @@ pgFileCompareSize(const void *f1, const void *f2) return 0; } +/* Compare two pgFile with their size in descending order */ +int +pgFileCompareSizeDesc(const void *f1, const void *f2) +{ + return -1 * pgFileCompareSize(f1, f2); +} + static int pgCompareString(const void *str1, const void *str2) { @@ -887,7 +894,7 @@ dir_list_file_internal(parray *files, pgFile *parent, const char *parent_dir, * * Copy of function get_tablespace_mapping() from pg_basebackup.c. */ -static const char * +const char * get_tablespace_mapping(const char *dir) { TablespaceListCell *cell; diff --git a/src/help.c b/src/help.c index e1c8d6833..921feaec0 100644 --- a/src/help.c +++ b/src/help.c @@ -2,7 +2,7 @@ * * help.c * - * Copyright (c) 2017-2019, Postgres Professional + * Copyright (c) 2017-2021, Postgres Professional * *------------------------------------------------------------------------- */ @@ -29,6 +29,7 @@ static void help_archive_get(void); static void help_checkdb(void); static void help_help(void); static void help_version(void); +static void help_catchup(void); void help_print_version(void) @@ -70,6 +71,7 @@ help_command(ProbackupSubcmd const subcmd) &help_internal, // AGENT_CMD &help_help, &help_version, + &help_catchup, }; Assert((int)subcmd < sizeof(help_functions) / sizeof(help_functions[0])); @@ -246,6 +248,19 @@ help_pg_probackup(void) printf(_(" [--ssh-options]\n")); printf(_(" [--help]\n")); + printf(_("\n %s catchup -b catchup-mode\n"), PROGRAM_NAME); + printf(_(" --source-pgdata=path_to_pgdata_on_remote_server\n")); + printf(_(" --destination-pgdata=path_to_local_dir\n")); + printf(_(" [--stream [-S slot-name]] [--temp-slot]\n")); + printf(_(" [-j num-threads]\n")); + printf(_(" [-T OLDDIR=NEWDIR]\n")); + printf(_(" [-d dbname] [-h host] [-p port] [-U username]\n")); + printf(_(" [-w --no-password] [-W --password]\n")); + printf(_(" [--remote-proto] [--remote-host]\n")); + printf(_(" [--remote-port] [--remote-path] [--remote-user]\n")); + printf(_(" [--ssh-options]\n")); + printf(_(" [--help]\n")); + if ((PROGRAM_URL || PROGRAM_EMAIL)) { printf("\n"); @@ -1009,3 +1024,49 @@ help_version(void) printf(_("\n%s version\n"), PROGRAM_NAME); printf(_("%s --version\n\n"), PROGRAM_NAME); } + +static void +help_catchup(void) +{ + printf(_("\n%s catchup -b catchup-mode\n"), PROGRAM_NAME); + printf(_(" --source-pgdata=path_to_pgdata_on_remote_server\n")); + printf(_(" --destination-pgdata=path_to_local_dir\n")); + printf(_(" [--stream [-S slot-name]] [--temp-slot]\n")); + printf(_(" [-j num-threads]\n")); + printf(_(" [-T OLDDIR=NEWDIR]\n")); + printf(_(" [-d dbname] [-h host] [-p port] [-U username]\n")); + printf(_(" [-w --no-password] [-W --password]\n")); + printf(_(" [--remote-proto] [--remote-host]\n")); + printf(_(" [--remote-port] [--remote-path] [--remote-user]\n")); + printf(_(" [--ssh-options]\n")); + printf(_(" [--help]\n\n")); + + printf(_(" -b, --backup-mode=catchup-mode catchup mode=FULL|DELTA|PTRACK\n")); + printf(_(" --stream stream the transaction log (only supported mode)\n")); + printf(_(" -S, --slot=SLOTNAME replication slot to use\n")); + printf(_(" --temp-slot use temporary replication slot\n")); + + printf(_(" -j, --threads=NUM number of parallel threads\n")); + + printf(_(" -T, --tablespace-mapping=OLDDIR=NEWDIR\n")); + printf(_(" relocate the tablespace from directory OLDDIR to NEWDIR\n")); + + printf(_("\n Connection options:\n")); + printf(_(" -U, --pguser=USERNAME user name to connect as (default: current local user)\n")); + printf(_(" -d, --pgdatabase=DBNAME database to connect (default: username)\n")); + printf(_(" -h, --pghost=HOSTNAME database server host or socket directory(default: 'local socket')\n")); + printf(_(" -p, --pgport=PORT database server port (default: 5432)\n")); + printf(_(" -w, --no-password never prompt for password\n")); + printf(_(" -W, --password force password prompt\n\n")); + + printf(_("\n Remote options:\n")); + printf(_(" --remote-proto=protocol remote protocol to use\n")); + printf(_(" available options: 'ssh', 'none' (default: ssh)\n")); + printf(_(" --remote-host=hostname remote host address or hostname\n")); + printf(_(" --remote-port=port remote host port (default: 22)\n")); + printf(_(" --remote-path=path path to directory with pg_probackup binary on remote host\n")); + printf(_(" (default: current binary path)\n")); + printf(_(" --remote-user=username user name for ssh connection (default: current user)\n")); + printf(_(" --ssh-options=ssh_options additional ssh options (default: none)\n")); + printf(_(" (example: --ssh-options='-c cipher_spec -F configfile')\n\n")); +} diff --git a/src/init.c b/src/init.c index dc821325a..a4911cb5c 100644 --- a/src/init.c +++ b/src/init.c @@ -57,7 +57,7 @@ do_add_instance(InstanceState *instanceState, InstanceConfig *instance) "(-D, --pgdata)"); /* Read system_identifier from PGDATA */ - instance->system_identifier = get_system_identifier(instance->pgdata); + instance->system_identifier = get_system_identifier(instance->pgdata, FIO_DB_HOST); /* Starting from PostgreSQL 11 read WAL segment size from PGDATA */ instance->xlog_seg_size = get_xlog_seg_size(instance->pgdata); diff --git a/src/pg_probackup.c b/src/pg_probackup.c index 3150900b6..00796be04 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -88,6 +88,9 @@ bool backup_logs = false; bool smooth_checkpoint; char *remote_agent; static char *backup_note = NULL; +/* catchup options */ +static char *catchup_source_pgdata = NULL; +static char *catchup_destination_pgdata = NULL; /* restore options */ static char *target_time = NULL; static char *target_xid = NULL; @@ -201,6 +204,9 @@ static ConfigOption cmd_options[] = { 'b', 184, "merge-expired", &merge_expired, SOURCE_CMD_STRICT }, { 'b', 185, "dry-run", &dry_run, SOURCE_CMD_STRICT }, { 's', 238, "note", &backup_note, SOURCE_CMD_STRICT }, + /* catchup options */ + { 's', 239, "source-pgdata", &catchup_source_pgdata, SOURCE_CMD_STRICT }, + { 's', 240, "destination-pgdata", &catchup_destination_pgdata, SOURCE_CMD_STRICT }, /* restore options */ { 's', 136, "recovery-target-time", &target_time, SOURCE_CMD_STRICT }, { 's', 137, "recovery-target-xid", &target_xid, SOURCE_CMD_STRICT }, @@ -445,11 +451,12 @@ main(int argc, char *argv[]) catalogState->catalog_path, WAL_SUBDIR); } - /* backup_path is required for all pg_probackup commands except help, version and checkdb */ + /* backup_path is required for all pg_probackup commands except help, version, checkdb and catchup */ if (backup_path == NULL && backup_subcmd != CHECKDB_CMD && backup_subcmd != HELP_CMD && - backup_subcmd != VERSION_CMD) + backup_subcmd != VERSION_CMD && + backup_subcmd != CATCHUP_CMD) elog(ERROR, "required parameter not specified: BACKUP_PATH (-B, --backup-path)"); /* ===== catalogState (END) ======*/ @@ -458,12 +465,12 @@ main(int argc, char *argv[]) /* * Option --instance is required for all commands except - * init, show, checkdb and validate + * init, show, checkdb, validate and catchup */ if (instance_name == NULL) { if (backup_subcmd != INIT_CMD && backup_subcmd != SHOW_CMD && - backup_subcmd != VALIDATE_CMD && backup_subcmd != CHECKDB_CMD) + backup_subcmd != VALIDATE_CMD && backup_subcmd != CHECKDB_CMD && backup_subcmd != CATCHUP_CMD) elog(ERROR, "required parameter not specified: --instance"); } else @@ -545,6 +552,10 @@ main(int argc, char *argv[]) setMyLocation(backup_subcmd); } } + else if (backup_subcmd == CATCHUP_CMD) + { + config_get_opt_env(instance_options); + } /* * Disable logging into file for archive-push and archive-get. @@ -587,6 +598,13 @@ main(int argc, char *argv[]) "You must specify --log-directory option when running checkdb with " "--log-level-file option enabled."); + if (backup_subcmd == CATCHUP_CMD && + instance_config.logger.log_level_file != LOG_OFF && + instance_config.logger.log_directory == NULL) + elog(ERROR, "Cannot save catchup logs to a file. " + "You must specify --log-directory option when running catchup with " + "--log-level-file option enabled."); + /* Initialize logger */ init_logger(backup_path, &instance_config.logger); @@ -745,6 +763,25 @@ main(int argc, char *argv[]) } } + /* checking required options */ + if (backup_subcmd == CATCHUP_CMD) + { + if (catchup_source_pgdata == NULL) + elog(ERROR, "You must specify \"--source-pgdata\" option with the \"%s\" command", get_subcmd_name(backup_subcmd)); + if (catchup_destination_pgdata == NULL) + elog(ERROR, "You must specify \"--destination-pgdata\" option with the \"%s\" command", get_subcmd_name(backup_subcmd)); + if (current.backup_mode == BACKUP_MODE_INVALID) + elog(ERROR, "Required parameter not specified: BACKUP_MODE (-b, --backup-mode)"); + if (current.backup_mode != BACKUP_MODE_FULL && current.backup_mode != BACKUP_MODE_DIFF_PTRACK && current.backup_mode != BACKUP_MODE_DIFF_DELTA) + elog(ERROR, "Only \"FULL\", \"PTRACK\" and \"DELTA\" modes are supported with the \"%s\" command", get_subcmd_name(backup_subcmd)); + if (!stream_wal) + elog(INFO, "--stream is required, forcing stream mode"); + current.stream = stream_wal = true; + if (instance_config.external_dir_str) + elog(ERROR, "external directories not supported fom \"%s\" command", get_subcmd_name(backup_subcmd)); + // TODO проверить instance_config.conn_opt + } + /* sanity */ if (backup_subcmd == VALIDATE_CMD && restore_params->no_validate) elog(ERROR, "You cannot specify \"--no-validate\" option with the \"%s\" command", @@ -787,6 +824,8 @@ main(int argc, char *argv[]) return do_backup(instanceState, set_backup_params, no_validate, no_sync, backup_logs); } + case CATCHUP_CMD: + return do_catchup(catchup_source_pgdata, catchup_destination_pgdata, num_threads, !no_sync); case RESTORE_CMD: return do_restore_or_validate(instanceState, current.backup_id, recovery_target_options, diff --git a/src/pg_probackup.h b/src/pg_probackup.h index ccbf803fd..1cad526dd 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -17,6 +17,7 @@ #include "access/xlog_internal.h" #include "utils/pg_crc.h" +#include "catalog/pg_control.h" #if PG_VERSION_NUM >= 120000 #include "common/logging.h" @@ -420,7 +421,7 @@ typedef struct PGNodeInfo char server_version_str[100]; int ptrack_version_num; - bool is_ptrack_enable; + bool is_ptrack_enabled; const char *ptrack_schema; /* used only for ptrack 2.x */ } PGNodeInfo; @@ -840,13 +841,16 @@ extern const char *deparse_backup_mode(BackupMode mode); extern void process_block_change(ForkNumber forknum, RelFileNode rnode, BlockNumber blkno); +/* in catchup.c */ +extern int do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, bool sync_dest_files); + /* in restore.c */ extern int do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pgRecoveryTarget *rt, pgRestoreParams *params, bool no_sync); -extern bool satisfy_timeline(const parray *timelines, const pgBackup *backup); +extern bool satisfy_timeline(const parray *timelines, TimeLineID tli, XLogRecPtr lsn); extern bool satisfy_recovery_target(const pgBackup *backup, const pgRecoveryTarget *rt); extern pgRecoveryTarget *parseRecoveryTargetOptions( @@ -861,6 +865,8 @@ extern parray *get_dbOid_exclude_list(pgBackup *backup, parray *datname_list, extern parray *get_backup_filelist(pgBackup *backup, bool strict); extern parray *read_timeline_history(const char *arclog_path, TimeLineID targetTLI, bool strict); extern bool tliIsPartOfHistory(const parray *timelines, TimeLineID tli); +extern DestDirIncrCompatibility check_incremental_compatibility(const char *pgdata, uint64 system_identifier, + IncrRestoreMode incremental_mode); /* in merge.c */ extern void do_merge(InstanceState *instanceState, time_t backup_id, bool no_validate, bool no_sync); @@ -1002,6 +1008,7 @@ extern void dir_list_file(parray *files, const char *root, bool exclude, bool follow_symlink, bool add_root, bool backup_logs, bool skip_hidden, int external_dir_num, fio_location location); +extern const char *get_tablespace_mapping(const char *dir); extern void create_data_directories(parray *dest_files, const char *data_dir, const char *backup_dir, @@ -1054,6 +1061,7 @@ extern int pgFileCompareRelPathWithExternal(const void *f1, const void *f2); extern int pgFileCompareRelPathWithExternalDesc(const void *f1, const void *f2); extern int pgFileCompareLinked(const void *f1, const void *f2); extern int pgFileCompareSize(const void *f1, const void *f2); +extern int pgFileCompareSizeDesc(const void *f1, const void *f2); extern int pgCompareOid(const void *f1, const void *f2); extern void pfilearray_clear_locks(parray *file_list); @@ -1061,6 +1069,12 @@ extern void pfilearray_clear_locks(parray *file_list); extern bool check_data_file(ConnectionArgs *arguments, pgFile *file, const char *from_fullpath, uint32 checksum_version); + +extern void catchup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpath, + XLogRecPtr prev_backup_start_lsn, BackupMode backup_mode, + CompressAlg calg, int clevel, uint32 checksum_version, + int ptrack_version_num, const char *ptrack_schema, + bool is_merge, size_t prev_size); extern void backup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpath, XLogRecPtr prev_backup_start_lsn, BackupMode backup_mode, CompressAlg calg, int clevel, uint32 checksum_version, @@ -1129,14 +1143,15 @@ extern XLogRecPtr get_next_record_lsn(const char *archivedir, XLogSegNo segno, T /* in util.c */ extern TimeLineID get_current_timeline(PGconn *conn); -extern TimeLineID get_current_timeline_from_control(bool safe); +extern TimeLineID get_current_timeline_from_control(const char *pgdata_path, fio_location location, bool safe); extern XLogRecPtr get_checkpoint_location(PGconn *conn); -extern uint64 get_system_identifier(const char *pgdata_path); +extern uint64 get_system_identifier(const char *pgdata_path, fio_location location); extern uint64 get_remote_system_identifier(PGconn *conn); extern uint32 get_data_checksum_version(bool safe); extern pg_crc32c get_pgcontrol_checksum(const char *pgdata_path); -extern uint32 get_xlog_seg_size(char *pgdata_path); -extern void get_redo(const char *pgdata_path, RedoParams *redo); +extern DBState get_system_dbstate(const char *pgdata_path, fio_location location); +extern uint32 get_xlog_seg_size(const char *pgdata_path); +extern void get_redo(const char *pgdata_path, fio_location pgdata_location, RedoParams *redo); extern void set_min_recovery_point(pgFile *file, const char *backup_path, XLogRecPtr stop_backup_lsn); extern void copy_pgcontrol_file(const char *from_fullpath, fio_location from_location, @@ -1161,7 +1176,7 @@ extern void pretty_size(int64 size, char *buf, size_t len); extern void pretty_time_interval(double time, char *buf, size_t len); extern PGconn *pgdata_basic_setup(ConnectionOptions conn_opt, PGNodeInfo *nodeInfo); -extern void check_system_identifiers(PGconn *conn, char *pgdata); +extern void check_system_identifiers(PGconn *conn, const char *pgdata); extern void parse_filelist_filenames(parray *files, const char *root); /* in ptrack.c */ @@ -1170,7 +1185,8 @@ extern void make_pagemap_from_ptrack_2(parray* files, PGconn* backup_conn, int ptrack_version_num, XLogRecPtr lsn); extern void get_ptrack_version(PGconn *backup_conn, PGNodeInfo *nodeInfo); -extern bool pg_ptrack_enable(PGconn *backup_conn, int ptrack_version_num); +extern bool pg_is_ptrack_enabled(PGconn *backup_conn, int ptrack_version_num); + extern XLogRecPtr get_last_ptrack_lsn(PGconn *backup_conn, PGNodeInfo *nodeInfo); extern parray * pg_ptrack_get_pagemapset(PGconn *backup_conn, const char *ptrack_schema, int ptrack_version_num, XLogRecPtr lsn); @@ -1182,6 +1198,10 @@ extern int send_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, XLogRecPtr prev_backup_start_lsn, CompressAlg calg, int clevel, uint32 checksum_version, bool use_pagemap, BackupPageHeader2 **headers, BackupMode backup_mode, int ptrack_version_num, const char *ptrack_schema); +extern int copy_pages(const char *to_fullpath, const char *from_fullpath, + pgFile *file, XLogRecPtr prev_backup_start_lsn, + uint32 checksum_version, bool use_pagemap, + BackupMode backup_mode, int ptrack_version_num, const char *ptrack_schema); /* FIO */ extern void setMyLocation(ProbackupSubcmd const subcmd); @@ -1190,6 +1210,10 @@ extern int fio_send_pages(const char *to_fullpath, const char *from_fullpath, pg XLogRecPtr horizonLsn, int calg, int clevel, uint32 checksum_version, bool use_pagemap, BlockNumber *err_blknum, char **errormsg, BackupPageHeader2 **headers); +extern int fio_copy_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, + XLogRecPtr horizonLsn, int calg, int clevel, uint32 checksum_version, + bool use_pagemap, BlockNumber *err_blknum, char **errormsg, + BackupPageHeader2 **headers); /* return codes for fio_send_pages */ extern int fio_send_file_gz(const char *from_fullpath, const char *to_fullpath, FILE* out, char **errormsg); extern int fio_send_file(const char *from_fullpath, const char *to_fullpath, FILE* out, @@ -1243,6 +1267,7 @@ extern void start_WAL_streaming(PGconn *backup_conn, char *stream_dst_path, ConnectionOptions *conn_opt, XLogRecPtr startpos, TimeLineID starttli); extern int wait_WAL_streaming_end(parray *backup_files_list); +extern parray* parse_tli_history_buffer(char *history, TimeLineID tli); /* external variables and functions, implemented in backup.c */ typedef struct PGStopBackupResult @@ -1280,5 +1305,6 @@ extern XLogRecPtr wait_wal_lsn(const char *wal_segment_dir, XLogRecPtr lsn, bool bool in_prev_segment, bool segment_only, int timeout_elevel, bool in_stream_dir); extern void wait_wal_and_calculate_stop_lsn(const char *xlog_path, XLogRecPtr stop_lsn, pgBackup *backup); +extern int64 calculate_datasize_of_filelist(parray *filelist); #endif /* PG_PROBACKUP_H */ diff --git a/src/ptrack.c b/src/ptrack.c index 6825686c6..c631d7386 100644 --- a/src/ptrack.c +++ b/src/ptrack.c @@ -118,7 +118,7 @@ get_ptrack_version(PGconn *backup_conn, PGNodeInfo *nodeInfo) * Check if ptrack is enabled in target instance */ bool -pg_ptrack_enable(PGconn *backup_conn, int ptrack_version_num) +pg_is_ptrack_enabled(PGconn *backup_conn, int ptrack_version_num) { PGresult *res_db; bool result = false; diff --git a/src/restore.c b/src/restore.c index 7f5df1a00..005984aed 100644 --- a/src/restore.c +++ b/src/restore.c @@ -67,8 +67,6 @@ static void restore_chain(pgBackup *dest_backup, parray *parent_chain, parray *dbOid_exclude_list, pgRestoreParams *params, const char *pgdata_path, bool no_sync, bool cleanup_pgdata, bool backup_has_tblspc); -static DestDirIncrCompatibility check_incremental_compatibility(const char *pgdata, uint64 system_identifier, - IncrRestoreMode incremental_mode); /* * Iterate over backup list to find all ancestors of the broken parent_backup @@ -293,7 +291,7 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg if (!timelines) elog(ERROR, "Failed to get history file for target timeline %i", rt->target_tli); - if (!satisfy_timeline(timelines, current_backup)) + if (!satisfy_timeline(timelines, current_backup->tli, current_backup->stop_lsn)) { if (target_backup_id != INVALID_BACKUP_ID) elog(ERROR, "target backup %s does not satisfy target timeline", @@ -487,7 +485,7 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg { RedoParams redo; parray *timelines = NULL; - get_redo(instance_config.pgdata, &redo); + get_redo(instance_config.pgdata, FIO_DB_HOST, &redo); if (redo.checksum_version == 0) elog(ERROR, "Incremental restore in 'lsn' mode require " @@ -1819,7 +1817,7 @@ satisfy_recovery_target(const pgBackup *backup, const pgRecoveryTarget *rt) /* TODO description */ bool -satisfy_timeline(const parray *timelines, const pgBackup *backup) +satisfy_timeline(const parray *timelines, TimeLineID tli, XLogRecPtr lsn) { int i; @@ -1828,9 +1826,9 @@ satisfy_timeline(const parray *timelines, const pgBackup *backup) TimeLineHistoryEntry *timeline; timeline = (TimeLineHistoryEntry *) parray_get(timelines, i); - if (backup->tli == timeline->tli && + if (tli == timeline->tli && (XLogRecPtrIsInvalid(timeline->end) || - backup->stop_lsn <= timeline->end)) + lsn <= timeline->end)) return true; } return false; @@ -2186,9 +2184,9 @@ check_incremental_compatibility(const char *pgdata, uint64 system_identifier, * data files content, because based on pg_control information we will * choose a backup suitable for lsn based incremental restore. */ - elog(INFO, "Trying to read pg_control file in destination direstory"); + elog(INFO, "Trying to read pg_control file in destination directory"); - system_id_pgdata = get_system_identifier(pgdata); + system_id_pgdata = get_system_identifier(pgdata, FIO_DB_HOST); if (system_id_pgdata == instance_config.system_identifier) system_id_match = true; diff --git a/src/stream.c b/src/stream.c index 01161f720..615d25281 100644 --- a/src/stream.c +++ b/src/stream.c @@ -70,7 +70,6 @@ static void add_walsegment_to_filelist(parray *filelist, uint32 timeline, uint32 xlog_seg_size); static void add_history_file_to_filelist(parray *filelist, uint32 timeline, char *basedir); -static parray* parse_tli_history_buffer(char *history, TimeLineID tli); /* * Run IDENTIFY_SYSTEM through a given connection and @@ -173,7 +172,7 @@ StreamLog(void *arg) */ stream_arg->startpos -= stream_arg->startpos % instance_config.xlog_seg_size; - xlog_files_list = parray_new(); + xlog_files_list = parray_new(); /* Initialize timeout */ stream_stop_begin = 0; @@ -308,14 +307,14 @@ stop_streaming(XLogRecPtr xlogpos, uint32 timeline, bool segment_finished) /* we assume that we get called once at the end of each segment */ if (segment_finished) - { - elog(VERBOSE, _("finished segment at %X/%X (timeline %u)"), - (uint32) (xlogpos >> 32), (uint32) xlogpos, timeline); + { + elog(VERBOSE, _("finished segment at %X/%X (timeline %u)"), + (uint32) (xlogpos >> 32), (uint32) xlogpos, timeline); - add_walsegment_to_filelist(xlog_files_list, timeline, xlogpos, - (char*) stream_thread_arg.basedir, - instance_config.xlog_seg_size); - } + add_walsegment_to_filelist(xlog_files_list, timeline, xlogpos, + (char*) stream_thread_arg.basedir, + instance_config.xlog_seg_size); + } /* * Note that we report the previous, not current, position here. After a @@ -588,20 +587,25 @@ start_WAL_streaming(PGconn *backup_conn, char *stream_dst_path, ConnectionOption /* Set error exit code as default */ stream_thread_arg.ret = 1; /* we must use startpos as start_lsn from start_backup */ - stream_thread_arg.startpos = current.start_lsn; - stream_thread_arg.starttli = current.tli; + stream_thread_arg.startpos = startpos; + stream_thread_arg.starttli = starttli; thread_interrupted = false; pthread_create(&stream_thread, NULL, StreamLog, &stream_thread_arg); } -/* Wait for the completion of stream */ +/* + * Wait for the completion of stream + * append list of streamed xlog files + * into backup_files_list (if it is not NULL) + */ int wait_WAL_streaming_end(parray *backup_files_list) { pthread_join(stream_thread, NULL); - parray_concat(backup_files_list, xlog_files_list); + if(backup_files_list != NULL) + parray_concat(backup_files_list, xlog_files_list); parray_free(xlog_files_list); return stream_thread_arg.ret; } diff --git a/src/util.c b/src/util.c index c0a1dc9e4..4e32e0639 100644 --- a/src/util.c +++ b/src/util.c @@ -10,8 +10,6 @@ #include "pg_probackup.h" -#include "catalog/pg_control.h" - #include #include @@ -174,7 +172,7 @@ get_current_timeline(PGconn *conn) if (PQresultStatus(res) == PGRES_TUPLES_OK) val = PQgetvalue(res, 0, 0); else - return get_current_timeline_from_control(false); + return get_current_timeline_from_control(instance_config.pgdata, FIO_DB_HOST, false); if (!parse_uint32(val, &tli, 0)) { @@ -182,7 +180,7 @@ get_current_timeline(PGconn *conn) elog(WARNING, "Invalid value of timeline_id %s", val); /* TODO 3.0 remove it and just error out */ - return get_current_timeline_from_control(false); + return get_current_timeline_from_control(instance_config.pgdata, FIO_DB_HOST, false); } return tli; @@ -190,15 +188,15 @@ get_current_timeline(PGconn *conn) /* Get timeline from pg_control file */ TimeLineID -get_current_timeline_from_control(bool safe) +get_current_timeline_from_control(const char *pgdata_path, fio_location location, bool safe) { ControlFileData ControlFile; char *buffer; size_t size; /* First fetch file... */ - buffer = slurpFile(instance_config.pgdata, XLOG_CONTROL_FILE, &size, - safe, FIO_DB_HOST); + buffer = slurpFile(pgdata_path, XLOG_CONTROL_FILE, &size, + safe, location); if (safe && buffer == NULL) return 0; @@ -249,14 +247,14 @@ get_checkpoint_location(PGconn *conn) } uint64 -get_system_identifier(const char *pgdata_path) +get_system_identifier(const char *pgdata_path, fio_location location) { ControlFileData ControlFile; char *buffer; size_t size; /* First fetch file... */ - buffer = slurpFile(pgdata_path, XLOG_CONTROL_FILE, &size, false, FIO_DB_HOST); + buffer = slurpFile(pgdata_path, XLOG_CONTROL_FILE, &size, false, location); if (buffer == NULL) return 0; digestControlFile(&ControlFile, buffer, size); @@ -299,7 +297,7 @@ get_remote_system_identifier(PGconn *conn) } uint32 -get_xlog_seg_size(char *pgdata_path) +get_xlog_seg_size(const char *pgdata_path) { #if PG_VERSION_NUM >= 110000 ControlFileData ControlFile; @@ -351,15 +349,31 @@ get_pgcontrol_checksum(const char *pgdata_path) return ControlFile.crc; } +DBState +get_system_dbstate(const char *pgdata_path, fio_location location) +{ + ControlFileData ControlFile; + char *buffer; + size_t size; + + buffer = slurpFile(pgdata_path, XLOG_CONTROL_FILE, &size, false, location); + if (buffer == NULL) + return 0; + digestControlFile(&ControlFile, buffer, size); + pg_free(buffer); + + return ControlFile.state; +} + void -get_redo(const char *pgdata_path, RedoParams *redo) +get_redo(const char *pgdata_path, fio_location pgdata_location, RedoParams *redo) { ControlFileData ControlFile; char *buffer; size_t size; /* First fetch file... */ - buffer = slurpFile(pgdata_path, XLOG_CONTROL_FILE, &size, false, FIO_DB_HOST); + buffer = slurpFile(pgdata_path, XLOG_CONTROL_FILE, &size, false, pgdata_location); digestControlFile(&ControlFile, buffer, size); pg_free(buffer); diff --git a/src/utils/configuration.c b/src/utils/configuration.c index afc1bc056..04bfbbe3b 100644 --- a/src/utils/configuration.c +++ b/src/utils/configuration.c @@ -110,6 +110,7 @@ static char const * const subcmd_names[] = "agent", "help", "version", + "catchup", }; ProbackupSubcmd diff --git a/src/utils/configuration.h b/src/utils/configuration.h index 4ed4e0e61..3a5de4b83 100644 --- a/src/utils/configuration.h +++ b/src/utils/configuration.h @@ -38,7 +38,8 @@ typedef enum ProbackupSubcmd SSH_CMD, AGENT_CMD, HELP_CMD, - VERSION_CMD + VERSION_CMD, + CATCHUP_CMD, } ProbackupSubcmd; typedef enum OptionSource diff --git a/src/utils/file.c b/src/utils/file.c index e9792dd9c..b808d6293 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -94,7 +94,7 @@ setMyLocation(ProbackupSubcmd const subcmd) MyLocation = IsSshProtocol() ? (subcmd == ARCHIVE_PUSH_CMD || subcmd == ARCHIVE_GET_CMD) ? FIO_DB_HOST - : (subcmd == BACKUP_CMD || subcmd == RESTORE_CMD || subcmd == ADD_INSTANCE_CMD) + : (subcmd == BACKUP_CMD || subcmd == RESTORE_CMD || subcmd == ADD_INSTANCE_CMD || subcmd == CATCHUP_CMD) ? FIO_BACKUP_HOST : FIO_LOCAL_HOST : FIO_LOCAL_HOST; @@ -1139,6 +1139,46 @@ fio_stat(char const* path, struct stat* st, bool follow_symlink, fio_location lo } } +/* + * Read value of a symbolic link + * this is a wrapper about readlink() syscall + * side effects: string truncation occur (and it + * can be checked by caller by comparing + * returned value >= valsiz) + */ +ssize_t +fio_readlink(const char *path, char *value, size_t valsiz, fio_location location) +{ + if (!fio_is_remote(location)) + { + /* readlink don't place trailing \0 */ + ssize_t len = readlink(path, value, valsiz); + value[len < valsiz ? len : valsiz] = '\0'; + return len; + } + else + { + fio_header hdr; + size_t path_len = strlen(path) + 1; + + hdr.cop = FIO_READLINK; + hdr.handle = -1; + Assert(valsiz <= UINT_MAX); /* max value of fio_header.arg */ + hdr.arg = valsiz; + hdr.size = path_len; + + IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); + IO_CHECK(fio_write_all(fio_stdout, path, path_len), path_len); + + IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); + Assert(hdr.cop == FIO_READLINK); + Assert(hdr.size <= valsiz); + IO_CHECK(fio_read_all(fio_stdin, value, hdr.size), hdr.size); + value[hdr.size < valsiz ? hdr.size : valsiz] = '\0'; + return hdr.size; + } +} + /* Check presence of the file */ int fio_access(char const* path, int mode, fio_location location) @@ -1769,7 +1809,7 @@ fio_send_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, /* send message with header - 8bytes 24bytes var var + 16bytes 24bytes var var -------------------------------------------------------------- | fio_header | fio_send_request | FILE PATH | BITMAP(if any) | -------------------------------------------------------------- @@ -1903,6 +1943,198 @@ fio_send_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, return n_blocks_read; } +/* + * Return number of actually(!) readed blocks, attempts or + * half-readed block are not counted. + * Return values in case of error: + * FILE_MISSING + * OPEN_FAILED + * READ_ERROR + * PAGE_CORRUPTION + * WRITE_FAILED + * + * If none of the above, this function return number of blocks + * readed by remote agent. + * + * In case of DELTA mode horizonLsn must be a valid lsn, + * otherwise it should be set to InvalidXLogRecPtr. + * Взято из fio_send_pages + */ +int +fio_copy_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, + XLogRecPtr horizonLsn, int calg, int clevel, uint32 checksum_version, + bool use_pagemap, BlockNumber* err_blknum, char **errormsg, + BackupPageHeader2 **headers) +{ + FILE *out = NULL; + char *out_buf = NULL; + struct { + fio_header hdr; + fio_send_request arg; + } req; + BlockNumber n_blocks_read = 0; + BlockNumber blknum = 0; + + /* send message with header + + 16bytes 24bytes var var + -------------------------------------------------------------- + | fio_header | fio_send_request | FILE PATH | BITMAP(if any) | + -------------------------------------------------------------- + */ + + req.hdr.cop = FIO_SEND_PAGES; + + if (use_pagemap) + { + req.hdr.size = sizeof(fio_send_request) + (*file).pagemap.bitmapsize + strlen(from_fullpath) + 1; + req.arg.bitmapsize = (*file).pagemap.bitmapsize; + + /* TODO: add optimization for the case of pagemap + * containing small number of blocks with big serial numbers: + * https://github.com/postgrespro/pg_probackup/blob/remote_page_backup/src/utils/file.c#L1211 + */ + } + else + { + req.hdr.size = sizeof(fio_send_request) + strlen(from_fullpath) + 1; + req.arg.bitmapsize = 0; + } + + req.arg.nblocks = file->size/BLCKSZ; + req.arg.segmentno = file->segno * RELSEG_SIZE; + req.arg.horizonLsn = horizonLsn; + req.arg.checksumVersion = checksum_version; + req.arg.calg = calg; + req.arg.clevel = clevel; + req.arg.path_len = strlen(from_fullpath) + 1; + + file->compress_alg = calg; /* TODO: wtf? why here? */ + +//<----- +// datapagemap_iterator_t *iter; +// BlockNumber blkno; +// iter = datapagemap_iterate(pagemap); +// while (datapagemap_next(iter, &blkno)) +// elog(INFO, "block %u", blkno); +// pg_free(iter); +//<----- + + /* send header */ + IO_CHECK(fio_write_all(fio_stdout, &req, sizeof(req)), sizeof(req)); + + /* send file path */ + IO_CHECK(fio_write_all(fio_stdout, from_fullpath, req.arg.path_len), req.arg.path_len); + + /* send pagemap if any */ + if (use_pagemap) + IO_CHECK(fio_write_all(fio_stdout, (*file).pagemap.bitmap, (*file).pagemap.bitmapsize), (*file).pagemap.bitmapsize); + + out = fio_fopen(to_fullpath, PG_BINARY_R "+", FIO_BACKUP_HOST); + if (out == NULL) + elog(ERROR, "Cannot open restore target file \"%s\": %s", to_fullpath, strerror(errno)); + + /* update file permission */ + if (fio_chmod(to_fullpath, file->mode, FIO_BACKUP_HOST) == -1) + elog(ERROR, "Cannot change mode of \"%s\": %s", to_fullpath, + strerror(errno)); + + elog(VERBOSE, "ftruncate file \"%s\" to size %lu", + to_fullpath, file->size); + if (fio_ftruncate(out, file->size) == -1) + elog(ERROR, "Cannot ftruncate file \"%s\" to size %lu: %s", + to_fullpath, file->size, strerror(errno)); + + if (!fio_is_remote_file(out)) + { + out_buf = pgut_malloc(STDIO_BUFSIZE); + setvbuf(out, out_buf, _IOFBF, STDIO_BUFSIZE); + } + + while (true) + { + fio_header hdr; + char buf[BLCKSZ + sizeof(BackupPageHeader)]; + IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); + + if (interrupted) + elog(ERROR, "Interrupted during page reading"); + + if (hdr.cop == FIO_ERROR) + { + /* FILE_MISSING, OPEN_FAILED and READ_FAILED */ + if (hdr.size > 0) + { + IO_CHECK(fio_read_all(fio_stdin, buf, hdr.size), hdr.size); + *errormsg = pgut_malloc(hdr.size); + snprintf(*errormsg, hdr.size, "%s", buf); + } + + return hdr.arg; + } + else if (hdr.cop == FIO_SEND_FILE_CORRUPTION) + { + *err_blknum = hdr.arg; + + if (hdr.size > 0) + { + IO_CHECK(fio_read_all(fio_stdin, buf, hdr.size), hdr.size); + *errormsg = pgut_malloc(hdr.size); + snprintf(*errormsg, hdr.size, "%s", buf); + } + return PAGE_CORRUPTION; + } + else if (hdr.cop == FIO_SEND_FILE_EOF) + { + /* n_blocks_read reported by EOF */ + n_blocks_read = hdr.arg; + + /* receive headers if any */ + if (hdr.size > 0) + { + *headers = pgut_malloc(hdr.size); + IO_CHECK(fio_read_all(fio_stdin, *headers, hdr.size), hdr.size); + file->n_headers = (hdr.size / sizeof(BackupPageHeader2)) -1; + } + + break; + } + else if (hdr.cop == FIO_PAGE) + { + blknum = hdr.arg; + + Assert(hdr.size <= sizeof(buf)); + IO_CHECK(fio_read_all(fio_stdin, buf, hdr.size), hdr.size); + + COMP_FILE_CRC32(true, file->crc, buf, hdr.size); + + if (fio_fseek(out, blknum * BLCKSZ) < 0) + { + elog(ERROR, "Cannot seek block %u of \"%s\": %s", + blknum, to_fullpath, strerror(errno)); + } + // должен прилетать некомпрессированный блок с заголовком + // Вставить assert? + if (fio_fwrite(out, buf + sizeof(BackupPageHeader), hdr.size - sizeof(BackupPageHeader)) != BLCKSZ) + { + fio_fclose(out); + *err_blknum = blknum; + return WRITE_FAILED; + } + file->write_size += BLCKSZ; + file->uncompressed_size += BLCKSZ; + } + else + elog(ERROR, "Remote agent returned message of unexpected type: %i", hdr.cop); + } + + if (out) + fclose(out); + pg_free(out_buf); + + return n_blocks_read; +} + /* TODO: read file using large buffer * Return codes: * FIO_ERROR: @@ -3147,6 +3379,26 @@ fio_communicate(int in, int out) case FIO_GET_ASYNC_ERROR: fio_get_async_error_impl(out); break; + case FIO_READLINK: /* Read content of a symbolic link */ + { + /* + * We need a buf for a arguments and for a result at the same time + * hdr.size = strlen(symlink_name) + 1 + * hdr.arg = bufsize for a answer (symlink content) + */ + size_t filename_size = (size_t)hdr.size; + if (filename_size + hdr.arg > buf_size) { + buf_size = hdr.arg; + buf = (char*)realloc(buf, buf_size); + } + rc = readlink(buf, buf + filename_size, hdr.arg); + hdr.cop = FIO_READLINK; + hdr.size = rc > 0 ? rc : 0; + IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); + if (hdr.size != 0) + IO_CHECK(fio_write_all(out, buf + filename_size, hdr.size), hdr.size); + } + break; default: Assert(false); } diff --git a/src/utils/file.h b/src/utils/file.h index ad65b9901..edb5ea0f9 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -55,7 +55,8 @@ typedef enum FIO_LIST_DIR, FIO_CHECK_POSTMASTER, FIO_GET_ASYNC_ERROR, - FIO_WRITE_ASYNC + FIO_WRITE_ASYNC, + FIO_READLINK } fio_operations; typedef enum @@ -128,6 +129,7 @@ extern int fio_mkdir(char const* path, int mode, fio_location location); extern int fio_chmod(char const* path, int mode, fio_location location); extern int fio_access(char const* path, int mode, fio_location location); extern int fio_stat(char const* path, struct stat* st, bool follow_symlinks, fio_location location); +extern ssize_t fio_readlink(const char *path, char *value, size_t valsiz, fio_location location); extern DIR* fio_opendir(char const* path, fio_location location); extern struct dirent * fio_readdir(DIR *dirp); extern int fio_closedir(DIR *dirp); diff --git a/src/utils/parray.c b/src/utils/parray.c index 95b83365d..792e26907 100644 --- a/src/utils/parray.c +++ b/src/utils/parray.c @@ -198,6 +198,13 @@ parray_bsearch(parray *array, const void *key, int(*compare)(const void *, const return bsearch(&key, array->data, array->used, sizeof(void *), compare); } +int +parray_bsearch_index(parray *array, const void *key, int(*compare)(const void *, const void *)) +{ + void **elem = parray_bsearch(array, key, compare); + return elem != NULL ? elem - array->data : -1; +} + /* checks that parray contains element */ bool parray_contains(parray *array, void *elem) { diff --git a/src/utils/parray.h b/src/utils/parray.h index 85d7383f3..e92ad728c 100644 --- a/src/utils/parray.h +++ b/src/utils/parray.h @@ -29,6 +29,7 @@ extern bool parray_rm(parray *array, const void *key, int(*compare)(const void * extern size_t parray_num(const parray *array); extern void parray_qsort(parray *array, int(*compare)(const void *, const void *)); extern void *parray_bsearch(parray *array, const void *key, int(*compare)(const void *, const void *)); +extern int parray_bsearch_index(parray *array, const void *key, int(*compare)(const void *, const void *)); extern void parray_walk(parray *array, void (*action)(void *)); extern bool parray_contains(parray *array, void *elem); diff --git a/tests/__init__.py b/tests/__init__.py index dbf84feea..080512760 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -6,7 +6,8 @@ retention, pgpro560, pgpro589, pgpro2068, false_positive, replica, \ compression, page, ptrack, archive, exclude, cfs_backup, cfs_restore, \ cfs_validate_backup, auth_test, time_stamp, snapfs, logging, \ - locking, remote, external, config, checkdb, set_backup, incr_restore + locking, remote, external, config, checkdb, set_backup, incr_restore, \ + catchup def load_tests(loader, tests, pattern): @@ -23,6 +24,7 @@ def load_tests(loader, tests, pattern): # suite.addTests(loader.loadTestsFromModule(auth_test)) suite.addTests(loader.loadTestsFromModule(archive)) suite.addTests(loader.loadTestsFromModule(backup)) + suite.addTests(loader.loadTestsFromModule(catchup)) suite.addTests(loader.loadTestsFromModule(compatibility)) suite.addTests(loader.loadTestsFromModule(checkdb)) suite.addTests(loader.loadTestsFromModule(config)) diff --git a/tests/catchup.py b/tests/catchup.py new file mode 100644 index 000000000..5df538e42 --- /dev/null +++ b/tests/catchup.py @@ -0,0 +1,977 @@ +import os +import signal +import unittest +from .helpers.ptrack_helpers import ProbackupTest, ProbackupException + +module_name = 'catchup' + +class CatchupTest(ProbackupTest, unittest.TestCase): + def setUp(self): + self.fname = self.id().split('.')[3] + +######################################### +# Basic tests +######################################### + def test_basic_full_catchup(self): + """ + Test 'multithreaded basebackup' mode (aka FULL catchup) + """ + # preparation + src_pg = self.make_simple_node( + base_dir = os.path.join(module_name, self.fname, 'src'), + set_replication = True + ) + src_pg.slow_start() + src_pg.safe_psql( + "postgres", + "CREATE TABLE ultimate_question AS SELECT 42 AS answer") + src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + + # do full catchup + dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + self.catchup_node( + backup_mode = 'FULL', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream'] + ) + + # 1st check: compare data directories + self.compare_pgdata( + self.pgdata_content(src_pg.data_dir), + self.pgdata_content(dst_pg.data_dir) + ) + + # run&recover catchup'ed instance + src_pg.stop() + dst_options = {} + dst_options['port'] = str(dst_pg.port) + self.set_auto_conf(dst_pg, dst_options) + dst_pg.slow_start() + + # 2nd check: run verification query + dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy') + + # Cleanup + dst_pg.stop() + self.del_test_dir(module_name, self.fname) + + def test_full_catchup_with_tablespace(self): + """ + Test tablespace transfers + """ + # preparation + src_pg = self.make_simple_node( + base_dir = os.path.join(module_name, self.fname, 'src'), + set_replication = True + ) + src_pg.slow_start() + tblspace1_old_path = self.get_tblspace_path(src_pg, 'tblspace1_old') + self.create_tblspace_in_node(src_pg, 'tblspace1', tblspc_path = tblspace1_old_path) + src_pg.safe_psql( + "postgres", + "CREATE TABLE ultimate_question TABLESPACE tblspace1 AS SELECT 42 AS answer") + src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + + # do full catchup with tablespace mapping + dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + tblspace1_new_path = self.get_tblspace_path(dst_pg, 'tblspace1_new') + self.catchup_node( + backup_mode = 'FULL', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = [ + '-d', 'postgres', + '-p', str(src_pg.port), + '--stream', + '-T', '{0}={1}'.format(tblspace1_old_path, tblspace1_new_path) + ] + ) + + # 1st check: compare data directories + self.compare_pgdata( + self.pgdata_content(src_pg.data_dir), + self.pgdata_content(dst_pg.data_dir) + ) + + # make changes in master tablespace + src_pg.safe_psql( + "postgres", + "UPDATE ultimate_question SET answer = -1") + src_pg.stop() + + # run&recover catchup'ed instance + dst_options = {} + dst_options['port'] = str(dst_pg.port) + self.set_auto_conf(dst_pg, dst_options) + dst_pg.slow_start() + + # 2nd check: run verification query + dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy') + + # Cleanup + dst_pg.stop() + self.del_test_dir(module_name, self.fname) + + def test_basic_delta_catchup(self): + """ + Test delta catchup + """ + # preparation 1: source + src_pg = self.make_simple_node( + base_dir = os.path.join(module_name, self.fname, 'src'), + set_replication = True, + pg_options = { 'wal_log_hints': 'on' } + ) + src_pg.slow_start() + src_pg.safe_psql( + "postgres", + "CREATE TABLE ultimate_question(answer int)") + + # preparation 2: make clean shutdowned lagging behind replica + dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + self.catchup_node( + backup_mode = 'FULL', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream'] + ) + self.set_replica(src_pg, dst_pg) + dst_options = {} + dst_options['port'] = str(dst_pg.port) + self.set_auto_conf(dst_pg, dst_options) + dst_pg.slow_start(replica = True) + dst_pg.stop() + + # preparation 3: make changes on master (source) + src_pg.pgbench_init(scale = 10) + pgbench = src_pg.pgbench(options=['-T', '10', '--no-vacuum']) + pgbench.wait() + src_pg.safe_psql("postgres", "INSERT INTO ultimate_question VALUES(42)") + src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + + # do delta catchup + self.catchup_node( + backup_mode = 'DELTA', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream'] + ) + + # 1st check: compare data directories + self.compare_pgdata( + self.pgdata_content(src_pg.data_dir), + self.pgdata_content(dst_pg.data_dir) + ) + + # run&recover catchup'ed instance + src_pg.stop() + self.set_replica(master = src_pg, replica = dst_pg) + dst_options = {} + dst_options['port'] = str(dst_pg.port) + self.set_auto_conf(dst_pg, dst_options) + dst_pg.slow_start(replica = True) + + # 2nd check: run verification query + dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy') + + # Cleanup + dst_pg.stop() + self.del_test_dir(module_name, self.fname) + + def test_basic_ptrack_catchup(self): + """ + Test ptrack catchup + """ + if not self.ptrack: + return unittest.skip('Skipped because ptrack support is disabled') + + # preparation 1: source + src_pg = self.make_simple_node( + base_dir = os.path.join(module_name, self.fname, 'src'), + set_replication = True, + ptrack_enable = True, + initdb_params = ['--data-checksums'] + ) + src_pg.slow_start() + src_pg.safe_psql("postgres", "CREATE EXTENSION ptrack") + src_pg.safe_psql( + "postgres", + "CREATE TABLE ultimate_question(answer int)") + + # preparation 2: make clean shutdowned lagging behind replica + dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + self.catchup_node( + backup_mode = 'FULL', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream'] + ) + self.set_replica(src_pg, dst_pg) + dst_options = {} + dst_options['port'] = str(dst_pg.port) + self.set_auto_conf(dst_pg, dst_options) + dst_pg.slow_start(replica = True) + dst_pg.stop() + + # preparation 3: make changes on master (source) + src_pg.pgbench_init(scale = 10) + pgbench = src_pg.pgbench(options=['-T', '10', '--no-vacuum']) + pgbench.wait() + src_pg.safe_psql("postgres", "INSERT INTO ultimate_question VALUES(42)") + src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + + # do ptrack catchup + self.catchup_node( + backup_mode = 'PTRACK', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream'] + ) + + # 1st check: compare data directories + self.compare_pgdata( + self.pgdata_content(src_pg.data_dir), + self.pgdata_content(dst_pg.data_dir) + ) + + # run&recover catchup'ed instance + src_pg.stop() + self.set_replica(master = src_pg, replica = dst_pg) + dst_options = {} + dst_options['port'] = str(dst_pg.port) + self.set_auto_conf(dst_pg, dst_options) + dst_pg.slow_start(replica = True) + + # 2nd check: run verification query + dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy') + + # Cleanup + dst_pg.stop() + self.del_test_dir(module_name, self.fname) + + def test_tli_delta_catchup(self): + """ + Test that we correctly follow timeline change with delta catchup + """ + # preparation 1: source + src_pg = self.make_simple_node( + base_dir = os.path.join(module_name, self.fname, 'src'), + set_replication = True, + pg_options = { 'wal_log_hints': 'on' } + ) + src_pg.slow_start() + + # preparation 2: destination + dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + self.catchup_node( + backup_mode = 'FULL', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream'] + ) + dst_options = {} + dst_options['port'] = str(dst_pg.port) + self.set_auto_conf(dst_pg, dst_options) + dst_pg.slow_start() + dst_pg.stop() + + # preparation 3: promote source + src_pg.stop() + self.set_replica(dst_pg, src_pg) # fake replication + src_pg.slow_start(replica = True) + src_pg.promote() + src_pg.safe_psql("postgres", "CREATE TABLE ultimate_question AS SELECT 42 AS answer") + src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + + # do catchup + self.catchup_node( + backup_mode = 'DELTA', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream'] + ) + + # 1st check: compare data directories + self.compare_pgdata( + self.pgdata_content(src_pg.data_dir), + self.pgdata_content(dst_pg.data_dir) + ) + + # run&recover catchup'ed instance + dst_options = {} + dst_options['port'] = str(dst_pg.port) + self.set_auto_conf(dst_pg, dst_options) + dst_pg.slow_start() + + # 2nd check: run verification query + dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy') + + # Cleanup + src_pg.stop() + dst_pg.stop() + self.del_test_dir(module_name, self.fname) + + def test_tli_ptrack_catchup(self): + """ + Test that we correctly follow timeline change with ptrack catchup + """ + if not self.ptrack: + return unittest.skip('Skipped because ptrack support is disabled') + + # preparation 1: source + src_pg = self.make_simple_node( + base_dir = os.path.join(module_name, self.fname, 'src'), + set_replication = True, + ptrack_enable = True, + initdb_params = ['--data-checksums'] + ) + src_pg.slow_start() + src_pg.safe_psql("postgres", "CREATE EXTENSION ptrack") + + # preparation 2: destination + dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + self.catchup_node( + backup_mode = 'FULL', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream'] + ) + dst_options = {} + dst_options['port'] = str(dst_pg.port) + self.set_auto_conf(dst_pg, dst_options) + dst_pg.slow_start() + dst_pg.stop() + + # preparation 3: promote source + src_pg.stop() + self.set_replica(dst_pg, src_pg) # fake replication + src_pg.slow_start(replica = True) + src_pg.promote() + src_pg.safe_psql("postgres", "CREATE TABLE ultimate_question AS SELECT 42 AS answer") + src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + + # do catchup + self.catchup_node( + backup_mode = 'PTRACK', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream'] + ) + + # 1st check: compare data directories + self.compare_pgdata( + self.pgdata_content(src_pg.data_dir), + self.pgdata_content(dst_pg.data_dir) + ) + + # run&recover catchup'ed instance + dst_options = {} + dst_options['port'] = str(dst_pg.port) + self.set_auto_conf(dst_pg, dst_options) + dst_pg.slow_start() + + # 2nd check: run verification query + dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy') + + # Cleanup + src_pg.stop() + dst_pg.stop() + self.del_test_dir(module_name, self.fname) + +######################################### +# Test various corner conditions +######################################### + def test_table_drop_with_delta(self): + """ + Test that dropped table in source will be dropped in delta catchup'ed instance too + """ + # preparation 1: source + src_pg = self.make_simple_node( + base_dir = os.path.join(module_name, self.fname, 'src'), + set_replication = True, + pg_options = { 'wal_log_hints': 'on' } + ) + src_pg.slow_start() + src_pg.safe_psql( + "postgres", + "CREATE TABLE ultimate_question AS SELECT 42 AS answer") + + # preparation 2: make clean shutdowned lagging behind replica + dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + self.catchup_node( + backup_mode = 'FULL', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream'] + ) + dst_options = {} + dst_options['port'] = str(dst_pg.port) + self.set_auto_conf(dst_pg, dst_options) + dst_pg.slow_start() + dst_pg.stop() + + # preparation 3: make changes on master (source) + # perform checkpoint twice to ensure, that datafile is actually deleted on filesystem + src_pg.safe_psql("postgres", "DROP TABLE ultimate_question") + src_pg.safe_psql("postgres", "CHECKPOINT") + src_pg.safe_psql("postgres", "CHECKPOINT") + + # do delta catchup + self.catchup_node( + backup_mode = 'DELTA', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream'] + ) + + # Check: compare data directories + self.compare_pgdata( + self.pgdata_content(src_pg.data_dir), + self.pgdata_content(dst_pg.data_dir) + ) + + # Cleanup + src_pg.stop() + self.del_test_dir(module_name, self.fname) + + def test_table_drop_with_ptrack(self): + """ + Test that dropped table in source will be dropped in ptrack catchup'ed instance too + """ + if not self.ptrack: + return unittest.skip('Skipped because ptrack support is disabled') + + # preparation 1: source + src_pg = self.make_simple_node( + base_dir = os.path.join(module_name, self.fname, 'src'), + set_replication = True, + ptrack_enable = True, + initdb_params = ['--data-checksums'] + ) + src_pg.slow_start() + src_pg.safe_psql("postgres", "CREATE EXTENSION ptrack") + src_pg.safe_psql( + "postgres", + "CREATE TABLE ultimate_question AS SELECT 42 AS answer") + + # preparation 2: make clean shutdowned lagging behind replica + dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + self.catchup_node( + backup_mode = 'FULL', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream'] + ) + dst_options = {} + dst_options['port'] = str(dst_pg.port) + self.set_auto_conf(dst_pg, dst_options) + dst_pg.slow_start() + dst_pg.stop() + + # preparation 3: make changes on master (source) + # perform checkpoint twice to ensure, that datafile is actually deleted on filesystem + src_pg.safe_psql("postgres", "DROP TABLE ultimate_question") + src_pg.safe_psql("postgres", "CHECKPOINT") + src_pg.safe_psql("postgres", "CHECKPOINT") + + # do ptrack catchup + self.catchup_node( + backup_mode = 'PTRACK', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream'] + ) + + # Check: compare data directories + self.compare_pgdata( + self.pgdata_content(src_pg.data_dir), + self.pgdata_content(dst_pg.data_dir) + ) + + # Cleanup + src_pg.stop() + self.del_test_dir(module_name, self.fname) + + def test_tablefile_truncation_with_delta(self): + """ + Test that truncated table in source will be truncated in delta catchup'ed instance too + """ + # preparation 1: source + src_pg = self.make_simple_node( + base_dir = os.path.join(module_name, self.fname, 'src'), + set_replication = True, + pg_options = { 'wal_log_hints': 'on' } + ) + src_pg.slow_start() + src_pg.safe_psql( + "postgres", + "CREATE SEQUENCE t_seq; " + "CREATE TABLE t_heap AS SELECT i AS id, " + "md5(i::text) AS text, " + "md5(repeat(i::text, 10))::tsvector AS tsvector " + "FROM generate_series(0, 1024) i") + src_pg.safe_psql("postgres", "VACUUM t_heap") + + # preparation 2: make clean shutdowned lagging behind replica + dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + self.catchup_node( + backup_mode = 'FULL', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream'] + ) + dest_options = {} + dst_options = {} + dst_options['port'] = str(dst_pg.port) + self.set_auto_conf(dst_pg, dst_options) + dst_pg.slow_start() + dst_pg.stop() + + # preparation 3: make changes on master (source) + src_pg.safe_psql("postgres", "DELETE FROM t_heap WHERE ctid >= '(11,0)'") + src_pg.safe_psql("postgres", "VACUUM t_heap") + + # do delta catchup + self.catchup_node( + backup_mode = 'DELTA', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream'] + ) + + # Check: compare data directories + self.compare_pgdata( + self.pgdata_content(src_pg.data_dir), + self.pgdata_content(dst_pg.data_dir) + ) + + # Cleanup + src_pg.stop() + self.del_test_dir(module_name, self.fname) + + def test_tablefile_truncation_with_ptrack(self): + """ + Test that truncated table in source will be truncated in ptrack catchup'ed instance too + """ + if not self.ptrack: + return unittest.skip('Skipped because ptrack support is disabled') + + # preparation 1: source + src_pg = self.make_simple_node( + base_dir = os.path.join(module_name, self.fname, 'src'), + set_replication = True, + ptrack_enable = True, + initdb_params = ['--data-checksums'] + ) + src_pg.slow_start() + src_pg.safe_psql("postgres", "CREATE EXTENSION ptrack") + src_pg.safe_psql( + "postgres", + "CREATE SEQUENCE t_seq; " + "CREATE TABLE t_heap AS SELECT i AS id, " + "md5(i::text) AS text, " + "md5(repeat(i::text, 10))::tsvector AS tsvector " + "FROM generate_series(0, 1024) i") + src_pg.safe_psql("postgres", "VACUUM t_heap") + + # preparation 2: make clean shutdowned lagging behind replica + dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + self.catchup_node( + backup_mode = 'FULL', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream'] + ) + dest_options = {} + dst_options = {} + dst_options['port'] = str(dst_pg.port) + self.set_auto_conf(dst_pg, dst_options) + dst_pg.slow_start() + dst_pg.stop() + + # preparation 3: make changes on master (source) + src_pg.safe_psql("postgres", "DELETE FROM t_heap WHERE ctid >= '(11,0)'") + src_pg.safe_psql("postgres", "VACUUM t_heap") + + # do ptrack catchup + self.catchup_node( + backup_mode = 'PTRACK', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream'] + ) + + # Check: compare data directories + self.compare_pgdata( + self.pgdata_content(src_pg.data_dir), + self.pgdata_content(dst_pg.data_dir) + ) + + # Cleanup + src_pg.stop() + self.del_test_dir(module_name, self.fname) + +######################################### +# Test reaction on user errors +######################################### + def test_local_tablespace_without_mapping(self): + """ + Test that we detect absence of needed --tablespace-mapping option + """ + if self.remote: + return unittest.skip('Skipped because this test tests local catchup error handling') + + src_pg = self.make_simple_node(base_dir = os.path.join(module_name, self.fname, 'src')) + src_pg.slow_start() + + tblspace_path = self.get_tblspace_path(src_pg, 'tblspace') + self.create_tblspace_in_node( + src_pg, 'tblspace', + tblspc_path = tblspace_path) + + src_pg.safe_psql( + "postgres", + "CREATE TABLE ultimate_question TABLESPACE tblspace AS SELECT 42 AS answer") + + dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + try: + self.catchup_node( + backup_mode = 'FULL', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = [ + '-d', 'postgres', + '-p', str(src_pg.port), + '--stream', + ] + ) + self.assertEqual(1, 0, "Expecting Error because '-T' parameter is not specified.\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: Local catchup executed, but source database contains tablespace', + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) + + # Cleanup + src_pg.stop() + self.del_test_dir(module_name, self.fname) + + def test_running_dest_postmaster(self): + """ + Test that we detect running postmaster in destination + """ + # preparation 1: source + src_pg = self.make_simple_node( + base_dir = os.path.join(module_name, self.fname, 'src'), + set_replication = True, + pg_options = { 'wal_log_hints': 'on' } + ) + src_pg.slow_start() + + # preparation 2: destination + dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + self.catchup_node( + backup_mode = 'FULL', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream'] + ) + dst_options = {} + dst_options['port'] = str(dst_pg.port) + self.set_auto_conf(dst_pg, dst_options) + dst_pg.slow_start() + # leave running destination postmaster + # so don't call dst_pg.stop() + + # try delta catchup + try: + self.catchup_node( + backup_mode = 'DELTA', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream'] + ) + self.assertEqual(1, 0, "Expecting Error because postmaster in destination is running.\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: Postmaster with pid ', + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) + + # Cleanup + src_pg.stop() + self.del_test_dir(module_name, self.fname) + + def test_same_db_id(self): + """ + Test that we detect different id's of source and destination + """ + # preparation: + # source + src_pg = self.make_simple_node( + base_dir = os.path.join(module_name, self.fname, 'src'), + set_replication = True + ) + src_pg.slow_start() + # destination + dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + self.catchup_node( + backup_mode = 'FULL', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream'] + ) + dst_options = {} + dst_options['port'] = str(dst_pg.port) + self.set_auto_conf(dst_pg, dst_options) + dst_pg.slow_start() + dst_pg.stop() + # fake destination + fake_dst_pg = self.make_simple_node(base_dir = os.path.join(module_name, self.fname, 'fake_dst')) + # fake source + fake_src_pg = self.make_simple_node(base_dir = os.path.join(module_name, self.fname, 'fake_src')) + + # try delta catchup (src (with correct src conn), fake_dst) + try: + self.catchup_node( + backup_mode = 'DELTA', + source_pgdata = src_pg.data_dir, + destination_node = fake_dst_pg, + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream'] + ) + self.assertEqual(1, 0, "Expecting Error because database identifiers mismatch.\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: Database identifiers mismatch: ', + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) + + # try delta catchup (fake_src (with wrong src conn), dst) + try: + self.catchup_node( + backup_mode = 'DELTA', + source_pgdata = fake_src_pg.data_dir, + destination_node = dst_pg, + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream'] + ) + self.assertEqual(1, 0, "Expecting Error because database identifiers mismatch.\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: Database identifiers mismatch: ', + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) + + # Cleanup + src_pg.stop() + self.del_test_dir(module_name, self.fname) + + def test_destination_dbstate(self): + """ + Test that we detect that destination pg is not cleanly shutdowned + """ + # preparation 1: source + src_pg = self.make_simple_node( + base_dir = os.path.join(module_name, self.fname, 'src'), + set_replication = True, + pg_options = { 'wal_log_hints': 'on' } + ) + src_pg.slow_start() + + # preparation 2: destination + dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + self.catchup_node( + backup_mode = 'FULL', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream'] + ) + + # try #1 + try: + self.catchup_node( + backup_mode = 'DELTA', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream'] + ) + self.assertEqual(1, 0, "Expecting Error because destination pg is not cleanly shutdowned.\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: Destination directory contains "backup_label" file', + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) + + # try #2 + dst_options = {} + dst_options['port'] = str(dst_pg.port) + self.set_auto_conf(dst_pg, dst_options) + dst_pg.slow_start() + self.assertNotEqual(dst_pg.pid, 0, "Cannot detect pid of running postgres") + os.kill(dst_pg.pid, signal.SIGKILL) + try: + self.catchup_node( + backup_mode = 'DELTA', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream'] + ) + self.assertEqual(1, 0, "Expecting Error because destination pg is not cleanly shutdowned.\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'must be stopped cleanly', + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) + + # Cleanup + src_pg.stop() + self.del_test_dir(module_name, self.fname) + + def test_tli_destination_mismatch(self): + """ + Test that we detect TLI mismatch in destination + """ + # preparation 1: source + src_pg = self.make_simple_node( + base_dir = os.path.join(module_name, self.fname, 'src'), + set_replication = True, + pg_options = { 'wal_log_hints': 'on' } + ) + src_pg.slow_start() + + # preparation 2: destination + dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + self.catchup_node( + backup_mode = 'FULL', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream'] + ) + dst_options = {} + dst_options['port'] = str(dst_pg.port) + self.set_auto_conf(dst_pg, dst_options) + self.set_replica(src_pg, dst_pg) + dst_pg.slow_start(replica = True) + dst_pg.promote() + dst_pg.stop() + + # preparation 3: "useful" changes + src_pg.safe_psql("postgres", "CREATE TABLE ultimate_question AS SELECT 42 AS answer") + src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + + # try catchup + try: + self.catchup_node( + backup_mode = 'DELTA', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream'] + ) + dst_options = {} + dst_options['port'] = str(dst_pg.port) + self.set_auto_conf(dst_pg, dst_options) + dst_pg.slow_start() + dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + dst_pg.stop() + self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy') + except ProbackupException as e: + self.assertIn( + 'ERROR: Source is behind destination in timeline history', + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) + + # Cleanup + src_pg.stop() + self.del_test_dir(module_name, self.fname) + + def test_tli_source_mismatch(self): + """ + Test that we detect TLI mismatch in source history + """ + # preparation 1: source + src_pg = self.make_simple_node( + base_dir = os.path.join(module_name, self.fname, 'src'), + set_replication = True, + pg_options = { 'wal_log_hints': 'on' } + ) + src_pg.slow_start() + + # preparation 2: fake source (promouted copy) + fake_src_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'fake_src')) + self.catchup_node( + backup_mode = 'FULL', + source_pgdata = src_pg.data_dir, + destination_node = fake_src_pg, + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream'] + ) + fake_src_options = {} + fake_src_options['port'] = str(fake_src_pg.port) + self.set_auto_conf(fake_src_pg, fake_src_options) + self.set_replica(src_pg, fake_src_pg) + fake_src_pg.slow_start(replica = True) + fake_src_pg.promote() + self.switch_wal_segment(fake_src_pg) + fake_src_pg.safe_psql( + "postgres", + "CREATE TABLE t_heap AS SELECT i AS id, " + "md5(i::text) AS text, " + "md5(repeat(i::text, 10))::tsvector AS tsvector " + "FROM generate_series(0, 256) i") + self.switch_wal_segment(fake_src_pg) + fake_src_pg.safe_psql("postgres", "CREATE TABLE ultimate_question AS SELECT 'trash' AS garbage") + + # preparation 3: destination + dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + self.catchup_node( + backup_mode = 'FULL', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream'] + ) + dst_options = {} + dst_options['port'] = str(dst_pg.port) + self.set_auto_conf(dst_pg, dst_options) + dst_pg.slow_start() + dst_pg.stop() + + # preparation 4: "useful" changes + src_pg.safe_psql("postgres", "CREATE TABLE ultimate_question AS SELECT 42 AS answer") + src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + + # try catchup + try: + self.catchup_node( + backup_mode = 'DELTA', + source_pgdata = fake_src_pg.data_dir, + destination_node = dst_pg, + options = ['-d', 'postgres', '-p', str(fake_src_pg.port), '--stream'] + ) + dst_options = {} + dst_options['port'] = str(dst_pg.port) + self.set_auto_conf(dst_pg, dst_options) + dst_pg.slow_start() + dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + dst_pg.stop() + self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy') + except ProbackupException as e: + self.assertIn( + 'ERROR: Destination is not in source timeline history', + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) + + # Cleanup + src_pg.stop() + fake_src_pg.stop() + self.del_test_dir(module_name, self.fname) diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index af27669b1..1de004250 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -345,14 +345,9 @@ def pg_config_version(self): # print('PGPROBACKUP_SSH_USER is not set') # exit(1) - def make_simple_node( + def make_empty_node( self, - base_dir=None, - set_replication=False, - ptrack_enable=False, - initdb_params=[], - pg_options={}): - + base_dir=None): real_base_dir = os.path.join(self.tmp_path, base_dir) shutil.rmtree(real_base_dir, ignore_errors=True) os.makedirs(real_base_dir) @@ -361,6 +356,17 @@ def make_simple_node( # bound method slow_start() to 'node' class instance node.slow_start = slow_start.__get__(node) node.should_rm_dirs = True + return node + + def make_simple_node( + self, + base_dir=None, + set_replication=False, + ptrack_enable=False, + initdb_params=[], + pg_options={}): + + node = self.make_empty_node(base_dir) node.init( initdb_params=initdb_params, allow_streaming=set_replication) @@ -1036,6 +1042,28 @@ def restore_node( return self.run_pb(cmd_list + options, gdb=gdb, old_binary=old_binary) + def catchup_node( + self, + backup_mode, source_pgdata, destination_node, + options = [] + ): + + cmd_list = [ + 'catchup', + '--backup-mode={0}'.format(backup_mode), + '--source-pgdata={0}'.format(source_pgdata), + '--destination-pgdata={0}'.format(destination_node.data_dir) + ] + if self.remote: + cmd_list += ['--remote-proto=ssh', '--remote-host=localhost'] + if self.verbose: + cmd_list += [ + '--log-level-file=VERBOSE', + '--log-directory={0}'.format(destination_node.logs_dir) + ] + + return self.run_pb(cmd_list + options) + def show_pb( self, backup_dir, instance=None, backup_id=None, options=[], as_text=False, as_json=True, old_binary=False, @@ -1736,10 +1764,10 @@ def compare_pgdata(self, original_pgdata, restored_pgdata): ): fail = True error_message += '\nFile permissions mismatch:\n' - error_message += ' File_old: {0} Permissions: {1}\n'.format( + error_message += ' File_old: {0} Permissions: {1:o}\n'.format( os.path.join(original_pgdata['pgdata'], file), original_pgdata['files'][file]['mode']) - error_message += ' File_new: {0} Permissions: {1}\n'.format( + error_message += ' File_new: {0} Permissions: {1:o}\n'.format( os.path.join(restored_pgdata['pgdata'], file), restored_pgdata['files'][file]['mode']) From 07127b8eb5dad28b9dd48ff2a8c51321006a1958 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Mon, 21 Jun 2021 23:55:29 +0300 Subject: [PATCH 180/525] [Issue #400] some leftovers --- src/catchup.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/catchup.c b/src/catchup.c index f80a0f0f9..58ce13c10 100644 --- a/src/catchup.c +++ b/src/catchup.c @@ -52,7 +52,7 @@ catchup_collect_info(PGNodeInfo *source_node_info, const char *source_pgdata, co instance_config.system_identifier = get_system_identifier(source_pgdata, FIO_DB_HOST); current.start_time = time(NULL); - StrNCpy(current.program_version, PROGRAM_VERSION, sizeof(current.program_version)); + strlcpy(current.program_version, PROGRAM_VERSION, sizeof(current.program_version)); /* Do some compatibility checks and fill basic info about PG instance */ source_conn = pgdata_basic_setup(instance_config.conn_opt, source_node_info); From 57f871accce26046430e7ec3c54bdd7d13563907 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Thu, 15 Jul 2021 00:48:21 +0300 Subject: [PATCH 181/525] Version 2.4.17 --- src/pg_probackup.h | 2 +- tests/expected/option_version.out | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/pg_probackup.h b/src/pg_probackup.h index fca08bdac..c9792ba1f 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -308,7 +308,7 @@ typedef enum ShowFormat #define BYTES_INVALID (-1) /* file didn`t changed since previous backup, DELTA backup do not rely on it */ #define FILE_NOT_FOUND (-2) /* file disappeared during backup */ #define BLOCKNUM_INVALID (-1) -#define PROGRAM_VERSION "2.4.16" +#define PROGRAM_VERSION "2.4.17" /* update when remote agent API or behaviour changes */ #define AGENT_PROTOCOL_VERSION 20409 diff --git a/tests/expected/option_version.out b/tests/expected/option_version.out index 1330acb5a..5c86262a8 100644 --- a/tests/expected/option_version.out +++ b/tests/expected/option_version.out @@ -1 +1 @@ -pg_probackup 2.4.16 \ No newline at end of file +pg_probackup 2.4.17 \ No newline at end of file From f63faad5e77a3293f6510045da40ea75f69957e1 Mon Sep 17 00:00:00 2001 From: AndrewBille <83072690+AndrewBille@users.noreply.github.com> Date: Thu, 15 Jul 2021 14:44:00 +0700 Subject: [PATCH 182/525] tests: Run compatibility tests only if PGPROBACKUPBIN_OLD set (#408) --- tests/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/__init__.py b/tests/__init__.py index dbf84feea..5c8231ffb 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -23,7 +23,8 @@ def load_tests(loader, tests, pattern): # suite.addTests(loader.loadTestsFromModule(auth_test)) suite.addTests(loader.loadTestsFromModule(archive)) suite.addTests(loader.loadTestsFromModule(backup)) - suite.addTests(loader.loadTestsFromModule(compatibility)) + if 'PGPROBACKUPBIN_OLD' in os.environ and os.environ['PGPROBACKUPBIN_OLD']: + suite.addTests(loader.loadTestsFromModule(compatibility)) suite.addTests(loader.loadTestsFromModule(checkdb)) suite.addTests(loader.loadTestsFromModule(config)) # suite.addTests(loader.loadTestsFromModule(cfs_backup)) From 817b79b00f5dfa45ab676c8acaf6da6e6e4c5762 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" <16117281+kulaginm@users.noreply.github.com> Date: Thu, 15 Jul 2021 12:25:32 +0300 Subject: [PATCH 183/525] Add compatibility with postgres master (upcoming PG-15). (#410) Upstream commit cda03cfed6b changed CreateReplicationSlot() signature --- src/stream.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/stream.c b/src/stream.c index 01161f720..3b84236c3 100644 --- a/src/stream.c +++ b/src/stream.c @@ -185,7 +185,12 @@ StreamLog(void *arg) #endif -#if PG_VERSION_NUM >= 110000 +#if PG_VERSION_NUM >= 150000 + /* Create temp repslot */ + if (temp_slot) + CreateReplicationSlot(stream_arg->conn, replication_slot, + NULL, temp_slot, true, true, false, false); +#elif PG_VERSION_NUM >= 110000 /* Create temp repslot */ if (temp_slot) CreateReplicationSlot(stream_arg->conn, replication_slot, From 0f3ae09bfd3b648dc6617a5fea58882f9b1a2ca0 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Mon, 2 Aug 2021 23:39:08 +0300 Subject: [PATCH 184/525] add REL_14_STABLE and master PG branches to travis --- .travis.yml | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index c66cf6439..6d98d8ca3 100644 --- a/.travis.yml +++ b/.travis.yml @@ -26,6 +26,7 @@ notifications: # Default MODE is basic, i.e. all tests with PG_PROBACKUP_TEST_BASIC=ON env: + - PG_VERSION=14 PG_BRANCH=REL_14_STABLE - PG_VERSION=13 PG_BRANCH=REL_13_STABLE - PG_VERSION=12 PG_BRANCH=REL_12_STABLE - PG_VERSION=11 PG_BRANCH=REL_11_STABLE @@ -42,9 +43,11 @@ env: # - PG_VERSION=12 PG_BRANCH=REL_12_STABLE MODE=replica # - PG_VERSION=12 PG_BRANCH=REL_12_STABLE MODE=retention # - PG_VERSION=12 PG_BRANCH=REL_12_STABLE MODE=restore + - PG_VERSION=15 PG_BRANCH=master -#jobs: -# allow_failures: +jobs: + allow_failures: + - env: PG_BRANCH=master # - if: env(MODE) IN (archive, backup, delta, locking, merge, replica, retention, restore) # Only run CI for master branch commits to limit our travis usage From 4de1607e0805eef162d9164b3ecc128fd897c2ff Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" <16117281+kulaginm@users.noreply.github.com> Date: Tue, 3 Aug 2021 13:20:07 +0300 Subject: [PATCH 185/525] "fix" unstable backup.BackupTest.test_backup_with_less_privileges_role (disable tests in archive mode from replica) (#414) --- tests/backup.py | 40 +++++++++++++++++++++------------------- 1 file changed, 21 insertions(+), 19 deletions(-) diff --git a/tests/backup.py b/tests/backup.py index d713263c3..60e70cc28 100644 --- a/tests/backup.py +++ b/tests/backup.py @@ -2518,45 +2518,47 @@ def test_backup_with_less_privileges_role(self): replica.slow_start(replica=True) + # Archive backups from replica in this test are disabled, + # because WAL archiving on replica in idle DB in PostgreSQL is broken: + # replica will not archive the previous WAL until it receives new records in the next WAL file, + # this "lazy" archiving can be seen in src/backend/replication/walreceiver.c:XLogWalRcvWrite() + # (see !XLByteInSeg checking and XLogArchiveNotify() calling). + # # self.switch_wal_segment(node) - # self.switch_wal_segment(node) - - self.backup_node( - backup_dir, 'replica', replica, - datname='backupdb', options=['-U', 'backup']) + #self.backup_node( + # backup_dir, 'replica', replica, + # datname='backupdb', options=['-U', 'backup']) # stream full backup from replica self.backup_node( backup_dir, 'replica', replica, datname='backupdb', options=['--stream', '-U', 'backup']) -# self.switch_wal_segment(node) - # PAGE backup from replica - self.switch_wal_segment(node) - self.backup_node( - backup_dir, 'replica', replica, backup_type='page', - datname='backupdb', options=['-U', 'backup', '--archive-timeout=30s']) + #self.switch_wal_segment(node) + #self.backup_node( + # backup_dir, 'replica', replica, backup_type='page', + # datname='backupdb', options=['-U', 'backup', '--archive-timeout=30s']) self.backup_node( backup_dir, 'replica', replica, backup_type='page', datname='backupdb', options=['--stream', '-U', 'backup']) # DELTA backup from replica - self.switch_wal_segment(node) - self.backup_node( - backup_dir, 'replica', replica, backup_type='delta', - datname='backupdb', options=['-U', 'backup']) + #self.switch_wal_segment(node) + #self.backup_node( + # backup_dir, 'replica', replica, backup_type='delta', + # datname='backupdb', options=['-U', 'backup']) self.backup_node( backup_dir, 'replica', replica, backup_type='delta', datname='backupdb', options=['--stream', '-U', 'backup']) # PTRACK backup from replica if self.ptrack: - self.switch_wal_segment(node) - self.backup_node( - backup_dir, 'replica', replica, backup_type='ptrack', - datname='backupdb', options=['-U', 'backup']) + #self.switch_wal_segment(node) + #self.backup_node( + # backup_dir, 'replica', replica, backup_type='ptrack', + # datname='backupdb', options=['-U', 'backup']) self.backup_node( backup_dir, 'replica', replica, backup_type='ptrack', datname='backupdb', options=['--stream', '-U', 'backup']) From 475513996eee38bc3747b1e120b4a0a4a464ae43 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Tue, 3 Aug 2021 18:54:21 +0300 Subject: [PATCH 186/525] travis: refine allow_failures condition (allow fail with postgresql master branch) --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 6d98d8ca3..b6b8fd217 100644 --- a/.travis.yml +++ b/.travis.yml @@ -47,7 +47,7 @@ env: jobs: allow_failures: - - env: PG_BRANCH=master + - if: env(PG_BRANCH) = master # - if: env(MODE) IN (archive, backup, delta, locking, merge, replica, retention, restore) # Only run CI for master branch commits to limit our travis usage From 8846e1997a7299aa9e11e7ea74805a3801258cf1 Mon Sep 17 00:00:00 2001 From: AndrewBille <83072690+AndrewBille@users.noreply.github.com> Date: Wed, 4 Aug 2021 01:59:49 +0700 Subject: [PATCH 187/525] Stabilizy tests. (#411) * tests: Introduced a new flag for tests -- PGPROBACKUP_GDB * tests: Do travis builds with CFLAGS="-O0" (stabilization of gdb tests) * tests: Run compatibility tests only if PGPROBACKUPBIN_OLD set * tests: Running some tests now depends on PGPROBACKUP_SSH_REMOTE --- tests/Readme.md | 5 +++ tests/archive.py | 11 +++--- tests/checkdb.py | 5 +++ tests/delta.py | 7 +++- tests/external.py | 12 +++++++ tests/helpers/ptrack_helpers.py | 18 +++++----- tests/pgpro2068.py | 5 +++ tests/replica.py | 20 +++++++++++ tests/restore.py | 16 +++++++-- tests/show.py | 59 +++++++++++---------------------- tests/snapfs.py | 3 +- tests/validate.py | 5 +++ travis/run_tests.sh | 2 +- 13 files changed, 109 insertions(+), 59 deletions(-) diff --git a/tests/Readme.md b/tests/Readme.md index f8dd91db0..668552c94 100644 --- a/tests/Readme.md +++ b/tests/Readme.md @@ -30,6 +30,11 @@ Specify path to pg_probackup binary file. By default tests use = 12 for this test') diff --git a/tests/show.py b/tests/show.py index 2a13a768b..b1ebebf18 100644 --- a/tests/show.py +++ b/tests/show.py @@ -212,6 +212,9 @@ def test_corrupt_control_file(self): # @unittest.expectedFailure def test_corrupt_correctness(self): """backup.control contains invalid option""" + if not self.remote: + self.skipTest("You must enable PGPROBACKUP_SSH_REMOTE" + " for run this test") fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( @@ -232,12 +235,7 @@ def test_corrupt_correctness(self): output_local = self.show_pb( backup_dir, 'node', as_json=False, backup_id=backup_local_id) - if self.remote: - backup_remote_id = self.backup_node(backup_dir, 'node', node) - else: - backup_remote_id = self.backup_node( - backup_dir, 'node', node, - options=['--remote-proto=ssh', '--remote-host=localhost']) + backup_remote_id = self.backup_node(backup_dir, 'node', node) output_remote = self.show_pb( backup_dir, 'node', as_json=False, backup_id=backup_remote_id) @@ -260,13 +258,8 @@ def test_corrupt_correctness(self): backup_dir, 'node', as_json=False, backup_id=backup_local_id) self.delete_pb(backup_dir, 'node', backup_local_id) - if self.remote: - backup_remote_id = self.backup_node( - backup_dir, 'node', node, backup_type='delta') - else: - backup_remote_id = self.backup_node( - backup_dir, 'node', node, backup_type='delta', - options=['--remote-proto=ssh', '--remote-host=localhost']) + backup_remote_id = self.backup_node( + backup_dir, 'node', node, backup_type='delta') output_remote = self.show_pb( backup_dir, 'node', as_json=False, backup_id=backup_remote_id) @@ -290,13 +283,8 @@ def test_corrupt_correctness(self): backup_dir, 'node', as_json=False, backup_id=backup_local_id) self.delete_pb(backup_dir, 'node', backup_local_id) - if self.remote: - backup_remote_id = self.backup_node( - backup_dir, 'node', node, backup_type='page') - else: - backup_remote_id = self.backup_node( - backup_dir, 'node', node, backup_type='page', - options=['--remote-proto=ssh', '--remote-host=localhost']) + backup_remote_id = self.backup_node( + backup_dir, 'node', node, backup_type='page') output_remote = self.show_pb( backup_dir, 'node', as_json=False, backup_id=backup_remote_id) @@ -318,6 +306,9 @@ def test_corrupt_correctness(self): # @unittest.expectedFailure def test_corrupt_correctness_1(self): """backup.control contains invalid option""" + if not self.remote: + self.skipTest("You must enable PGPROBACKUP_SSH_REMOTE" + " for run this test") fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( @@ -338,12 +329,7 @@ def test_corrupt_correctness_1(self): output_local = self.show_pb( backup_dir, 'node', as_json=False, backup_id=backup_local_id) - if self.remote: - backup_remote_id = self.backup_node(backup_dir, 'node', node) - else: - backup_remote_id = self.backup_node( - backup_dir, 'node', node, - options=['--remote-proto=ssh', '--remote-host=localhost']) + backup_remote_id = self.backup_node(backup_dir, 'node', node) output_remote = self.show_pb( backup_dir, 'node', as_json=False, backup_id=backup_remote_id) @@ -370,13 +356,8 @@ def test_corrupt_correctness_1(self): backup_dir, 'node', as_json=False, backup_id=backup_local_id) self.delete_pb(backup_dir, 'node', backup_local_id) - if self.remote: - backup_remote_id = self.backup_node( - backup_dir, 'node', node, backup_type='delta') - else: - backup_remote_id = self.backup_node( - backup_dir, 'node', node, backup_type='delta', - options=['--remote-proto=ssh', '--remote-host=localhost']) + backup_remote_id = self.backup_node( + backup_dir, 'node', node, backup_type='delta') output_remote = self.show_pb( backup_dir, 'node', as_json=False, backup_id=backup_remote_id) @@ -400,13 +381,8 @@ def test_corrupt_correctness_1(self): backup_dir, 'node', as_json=False, backup_id=backup_local_id) self.delete_pb(backup_dir, 'node', backup_local_id) - if self.remote: - backup_remote_id = self.backup_node( - backup_dir, 'node', node, backup_type='page') - else: - backup_remote_id = self.backup_node( - backup_dir, 'node', node, backup_type='page', - options=['--remote-proto=ssh', '--remote-host=localhost']) + backup_remote_id = self.backup_node( + backup_dir, 'node', node, backup_type='page') output_remote = self.show_pb( backup_dir, 'node', as_json=False, backup_id=backup_remote_id) @@ -428,6 +404,9 @@ def test_corrupt_correctness_1(self): # @unittest.expectedFailure def test_corrupt_correctness_2(self): """backup.control contains invalid option""" + if not self.remote: + self.skipTest("You must enable PGPROBACKUP_SSH_REMOTE" + " for run this test") fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( diff --git a/tests/snapfs.py b/tests/snapfs.py index a7f926c4c..991741952 100644 --- a/tests/snapfs.py +++ b/tests/snapfs.py @@ -10,9 +10,10 @@ class SnapFSTest(ProbackupTest, unittest.TestCase): # @unittest.expectedFailure - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') def test_snapfs_simple(self): """standart backup modes with ARCHIVE WAL method""" + if not self.enterprise: + self.skipTest('This test must be run on enterprise') fname = self.id().split('.')[3] node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), diff --git a/tests/validate.py b/tests/validate.py index c5cc80733..0b04d92fe 100644 --- a/tests/validate.py +++ b/tests/validate.py @@ -1088,6 +1088,11 @@ def test_validate_instance_with_several_corrupt_backups_interrupt(self): """ check that interrupt during validation is handled correctly """ + if not self.gdb: + self.skipTest( + "Specify PGPROBACKUP_GDB and build without " + "optimizations for run this test" + ) fname = self.id().split('.')[3] node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), diff --git a/travis/run_tests.sh b/travis/run_tests.sh index 635b9f422..325b89060 100755 --- a/travis/run_tests.sh +++ b/travis/run_tests.sh @@ -35,7 +35,7 @@ git clone https://github.com/postgres/postgres.git -b $PG_BRANCH --depth=1 # Compile and install Postgres echo "############### Compiling Postgres:" cd postgres # Go to postgres dir -./configure --prefix=$PGHOME --enable-debug --enable-cassert --enable-depend --enable-tap-tests +CFLAGS="-O0" ./configure --prefix=$PGHOME --enable-debug --enable-cassert --enable-depend --enable-tap-tests make -s -j$(nproc) install #make -s -j$(nproc) -C 'src/common' install #make -s -j$(nproc) -C 'src/port' install From 384cf6dcfd87060a0a705fe5ac647dc2d223555a Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" <16117281+kulaginm@users.noreply.github.com> Date: Thu, 12 Aug 2021 14:50:07 +0300 Subject: [PATCH 188/525] CVE-2018-1058 fix (#415) * CVE-2018-1058 fix --- src/backup.c | 6 +- src/checkdb.c | 10 +-- src/ptrack.c | 8 +-- src/util.c | 2 +- src/utils/pgut.c | 30 ++++++++- tests/CVE_2018_1058.py | 143 +++++++++++++++++++++++++++++++++++++++++ tests/__init__.py | 4 +- 7 files changed, 188 insertions(+), 15 deletions(-) create mode 100644 tests/CVE_2018_1058.py diff --git a/src/backup.c b/src/backup.c index 83785c1cb..71fd9670e 100644 --- a/src/backup.c +++ b/src/backup.c @@ -928,7 +928,7 @@ check_server_version(PGconn *conn, PGNodeInfo *nodeInfo) nodeInfo->server_version_str, "9.6"); if (nodeInfo->pgpro_support) - res = pgut_execute(conn, "SELECT pgpro_edition()", 0, NULL); + res = pgut_execute(conn, "SELECT pg_catalog.pgpro_edition()", 0, NULL); /* * Check major version of connected PostgreSQL and major version of @@ -1120,7 +1120,7 @@ pgpro_support(PGconn *conn) PGresult *res; res = pgut_execute(conn, - "SELECT proname FROM pg_proc WHERE proname='pgpro_edition'", + "SELECT proname FROM pg_catalog.pg_proc WHERE proname='pgpro_edition'::name AND pronamespace='pg_catalog'::regnamespace::oid", 0, NULL); if (PQresultStatus(res) == PGRES_TUPLES_OK && @@ -1159,7 +1159,7 @@ get_database_map(PGconn *conn) */ res = pgut_execute_extended(conn, "SELECT oid, datname FROM pg_catalog.pg_database " - "WHERE datname NOT IN ('template1', 'template0')", + "WHERE datname NOT IN ('template1'::name, 'template0'::name)", 0, NULL, true, true); /* Don't error out, simply return NULL. See comment above. */ diff --git a/src/checkdb.c b/src/checkdb.c index 4ea1d0800..e3f2df538 100644 --- a/src/checkdb.c +++ b/src/checkdb.c @@ -357,10 +357,10 @@ get_index_list(const char *dbname, bool first_db_with_amcheck, res = pgut_execute(db_conn, "SELECT " "extname, nspname, extversion " - "FROM pg_namespace n " - "JOIN pg_extension e " + "FROM pg_catalog.pg_namespace n " + "JOIN pg_catalog.pg_extension e " "ON n.oid=e.extnamespace " - "WHERE e.extname IN ('amcheck', 'amcheck_next') " + "WHERE e.extname IN ('amcheck'::name, 'amcheck_next'::name) " "ORDER BY extversion DESC " "LIMIT 1", 0, NULL); @@ -556,8 +556,8 @@ do_amcheck(ConnectionOptions conn_opt, PGconn *conn) res_db = pgut_execute(conn, "SELECT datname, oid, dattablespace " - "FROM pg_database " - "WHERE datname NOT IN ('template0', 'template1')", + "FROM pg_catalog.pg_database " + "WHERE datname NOT IN ('template0'::name, 'template1'::name)", 0, NULL); /* we don't need this connection anymore */ diff --git a/src/ptrack.c b/src/ptrack.c index 5a2b9f046..b5f3a88a6 100644 --- a/src/ptrack.c +++ b/src/ptrack.c @@ -169,7 +169,7 @@ get_ptrack_version(PGconn *backup_conn, PGNodeInfo *nodeInfo) res_db = pgut_execute(backup_conn, "SELECT extnamespace::regnamespace, extversion " - "FROM pg_catalog.pg_extension WHERE extname = 'ptrack'", + "FROM pg_catalog.pg_extension WHERE extname = 'ptrack'::name", 0, NULL); if (PQntuples(res_db) > 0) @@ -187,7 +187,7 @@ get_ptrack_version(PGconn *backup_conn, PGNodeInfo *nodeInfo) /* ptrack 1.x is supported, save version */ PQclear(res_db); res_db = pgut_execute(backup_conn, - "SELECT proname FROM pg_proc WHERE proname='ptrack_version'", + "SELECT proname FROM pg_catalog.pg_proc WHERE proname='ptrack_version'::name", 0, NULL); if (PQntuples(res_db) == 0) @@ -285,7 +285,7 @@ pg_ptrack_clear(PGconn *backup_conn, int ptrack_version_num) params[0] = palloc(64); params[1] = palloc(64); - res_db = pgut_execute(backup_conn, "SELECT datname, oid, dattablespace FROM pg_database", + res_db = pgut_execute(backup_conn, "SELECT datname, oid, dattablespace FROM pg_catalog.pg_database", 0, NULL); for(i = 0; i < PQntuples(res_db); i++) @@ -335,7 +335,7 @@ pg_ptrack_get_and_clear_db(Oid dbOid, Oid tblspcOid, PGconn *backup_conn) sprintf(params[0], "%i", dbOid); res_db = pgut_execute(backup_conn, - "SELECT datname FROM pg_database WHERE oid=$1", + "SELECT datname FROM pg_catalog.pg_database WHERE oid=$1", 1, (const char **) params); /* * If database is not found, it's not an error. diff --git a/src/util.c b/src/util.c index 9fd0114bb..1e540a974 100644 --- a/src/util.c +++ b/src/util.c @@ -169,7 +169,7 @@ get_current_timeline(PGconn *conn) char *val; res = pgut_execute_extended(conn, - "SELECT timeline_id FROM pg_control_checkpoint()", 0, NULL, true, true); + "SELECT timeline_id FROM pg_catalog.pg_control_checkpoint()", 0, NULL, true, true); if (PQresultStatus(res) == PGRES_TUPLES_OK) val = PQgetvalue(res, 0, 0); diff --git a/src/utils/pgut.c b/src/utils/pgut.c index 1d8845c23..e9f902c0e 100644 --- a/src/utils/pgut.c +++ b/src/utils/pgut.c @@ -20,6 +20,12 @@ #include "common/string.h" #endif +#if PG_VERSION_NUM >= 100000 +#include "common/connect.h" +#else +#include "fe_utils/connect.h" +#endif + #include #include "pgut.h" @@ -257,7 +263,7 @@ pgut_connect(const char *host, const char *port, pthread_lock(&atexit_callback_disconnect_mutex); pgut_atexit_push(pgut_disconnect_callback, conn); pthread_mutex_unlock(&atexit_callback_disconnect_mutex); - return conn; + break; } if (conn && PQconnectionNeedsPassword(conn) && prompt_password) @@ -279,6 +285,28 @@ pgut_connect(const char *host, const char *port, PQfinish(conn); return NULL; } + + /* + * Fix for CVE-2018-1058. This code was taken with small modification from + * src/bin/pg_basebackup/streamutil.c:GetConnection() + */ + if (dbname != NULL) + { + PGresult *res; + + res = PQexec(conn, ALWAYS_SECURE_SEARCH_PATH_SQL); + if (PQresultStatus(res) != PGRES_TUPLES_OK) + { + elog(ERROR, "could not clear search_path: %s", + PQerrorMessage(conn)); + PQclear(res); + PQfinish(conn); + return NULL; + } + PQclear(res); + } + + return conn; } PGconn * diff --git a/tests/CVE_2018_1058.py b/tests/CVE_2018_1058.py new file mode 100644 index 000000000..3da41f116 --- /dev/null +++ b/tests/CVE_2018_1058.py @@ -0,0 +1,143 @@ +import os +import unittest +from .helpers.ptrack_helpers import ProbackupTest, ProbackupException + +module_name = 'CVE-2018-1058' + +class CVE_2018_1058(ProbackupTest, unittest.TestCase): + + # @unittest.skip("skip") + def test_basic_default_search_path(self): + """""" + fname = self.id().split('.')[3] + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node'), + set_replication=True) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + 'postgres', + "CREATE FUNCTION public.pgpro_edition() " + "RETURNS text " + "AS $$ " + "BEGIN " + " RAISE 'pg_probackup vulnerable!'; " + "END " + "$$ LANGUAGE plpgsql") + + self.backup_node(backup_dir, 'node', node, backup_type='full', options=['--stream']) + + # Clean after yourself + self.del_test_dir(module_name, fname) + + # @unittest.skip("skip") + def test_basic_backup_modified_search_path(self): + """""" + fname = self.id().split('.')[3] + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node'), + set_replication=True) + self.set_auto_conf(node, options={'search_path': 'public,pg_catalog'}) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + 'postgres', + "CREATE FUNCTION public.pg_control_checkpoint(OUT timeline_id integer, OUT dummy integer) " + "RETURNS record " + "AS $$ " + "BEGIN " + " RAISE '% vulnerable!', 'pg_probackup'; " + "END " + "$$ LANGUAGE plpgsql") + + node.safe_psql( + 'postgres', + "CREATE FUNCTION public.pg_proc(OUT proname name, OUT dummy integer) " + "RETURNS record " + "AS $$ " + "BEGIN " + " RAISE '% vulnerable!', 'pg_probackup'; " + "END " + "$$ LANGUAGE plpgsql; " + "CREATE VIEW public.pg_proc AS SELECT proname FROM public.pg_proc()") + + self.backup_node(backup_dir, 'node', node, backup_type='full', options=['--stream']) + + log_file = os.path.join(node.logs_dir, 'postgresql.log') + with open(log_file, 'r') as f: + log_content = f.read() + self.assertFalse( + 'pg_probackup vulnerable!' in log_content) + + # Clean after yourself + self.del_test_dir(module_name, fname) + + # @unittest.skip("skip") + def test_basic_checkdb_modified_search_path(self): + """""" + fname = self.id().split('.')[3] + node = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node'), + initdb_params=['--data-checksums']) + self.set_auto_conf(node, options={'search_path': 'public,pg_catalog'}) + node.slow_start() + + node.safe_psql( + 'postgres', + "CREATE FUNCTION public.pg_database(OUT datname name, OUT oid oid, OUT dattablespace oid) " + "RETURNS record " + "AS $$ " + "BEGIN " + " RAISE 'pg_probackup vulnerable!'; " + "END " + "$$ LANGUAGE plpgsql; " + "CREATE VIEW public.pg_database AS SELECT * FROM public.pg_database()") + + node.safe_psql( + 'postgres', + "CREATE FUNCTION public.pg_extension(OUT extname name, OUT extnamespace oid, OUT extversion text) " + "RETURNS record " + "AS $$ " + "BEGIN " + " RAISE 'pg_probackup vulnerable!'; " + "END " + "$$ LANGUAGE plpgsql; " + "CREATE FUNCTION public.pg_namespace(OUT oid oid, OUT nspname name) " + "RETURNS record " + "AS $$ " + "BEGIN " + " RAISE 'pg_probackup vulnerable!'; " + "END " + "$$ LANGUAGE plpgsql; " + "CREATE VIEW public.pg_extension AS SELECT * FROM public.pg_extension();" + "CREATE VIEW public.pg_namespace AS SELECT * FROM public.pg_namespace();" + ) + + try: + self.checkdb_node( + options=[ + '--amcheck', + '--skip-block-validation', + '-d', 'postgres', '-p', str(node.port)]) + self.assertEqual( + 1, 0, + "Expecting Error because amcheck{,_next} not installed\n" + " Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + "WARNING: Extension 'amcheck' or 'amcheck_next' are not installed in database postgres", + e.message, + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) + + # Clean after yourself + self.del_test_dir(module_name, fname) diff --git a/tests/__init__.py b/tests/__init__.py index 5c8231ffb..3a297c45e 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -6,7 +6,8 @@ retention, pgpro560, pgpro589, pgpro2068, false_positive, replica, \ compression, page, ptrack, archive, exclude, cfs_backup, cfs_restore, \ cfs_validate_backup, auth_test, time_stamp, snapfs, logging, \ - locking, remote, external, config, checkdb, set_backup, incr_restore + locking, remote, external, config, checkdb, set_backup, incr_restore, \ + CVE_2018_1058 def load_tests(loader, tests, pattern): @@ -55,6 +56,7 @@ def load_tests(loader, tests, pattern): suite.addTests(loader.loadTestsFromModule(snapfs)) suite.addTests(loader.loadTestsFromModule(time_stamp)) suite.addTests(loader.loadTestsFromModule(validate)) + suite.addTests(loader.loadTestsFromModule(CVE_2018_1058)) return suite From 002d7b53b982bf06567272c0a89f5cbeda1025d7 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" <16117281+kulaginm@users.noreply.github.com> Date: Fri, 13 Aug 2021 02:59:20 +0300 Subject: [PATCH 189/525] [PGPRO-5378] fix various tests (#420) * [PGPRO-5378] tests.replica.ReplicaTest.test_replica_archive_page_backup stabilization * Skip some tests on PG-9.5 (test_replica_switchover, test_replica_promote_archive_delta, test_replica_promote_archive_page, test_parent_choosing) * travis: Fix compatibility issues with GDB --- tests/replica.py | 43 ++++++++++++++++++++++++++++++++++++--- travis/Dockerfile.in | 1 + travis/docker-compose.yml | 19 +++++++++++++++-- travis/make_dockerfile.sh | 6 ++++++ travis/run_tests.sh | 6 ++++++ 5 files changed, 70 insertions(+), 5 deletions(-) diff --git a/tests/replica.py b/tests/replica.py index 828305da7..383a4979a 100644 --- a/tests/replica.py +++ b/tests/replica.py @@ -21,7 +21,6 @@ def test_replica_switchover(self): over the course of several switchovers https://www.postgresql.org/message-id/54b059d4-2b48-13a4-6f43-95a087c92367%40postgrespro.ru """ - fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node1 = self.make_simple_node( @@ -29,6 +28,11 @@ def test_replica_switchover(self): set_replication=True, initdb_params=['--data-checksums']) + if self.get_version(node1) < self.version_to_num('9.6.0'): + self.del_test_dir(module_name, fname) + return unittest.skip( + 'Skipped because backup from replica is not supported in PG 9.5') + self.init_pb(backup_dir) self.add_instance(backup_dir, 'node1', node1) @@ -287,6 +291,16 @@ def test_replica_archive_page_backup(self): self.wait_until_replica_catch_with_master(master, replica) + master.pgbench_init(scale=5) + # Continuous making some changes on master, + # because WAL archiving on replica in idle DB in PostgreSQL is broken: + # replica will not archive the previous WAL until it receives new records in the next WAL file, + # this "lazy" archiving can be seen in src/backend/replication/walreceiver.c:XLogWalRcvWrite() + # (see !XLByteInSeg checking and XLogArchiveNotify() calling). + pgbench = master.pgbench( + stdout=subprocess.PIPE, stderr=subprocess.STDOUT, + options=['-T', '3', '-c', '1', '--no-vacuum']) + backup_id = self.backup_node( backup_dir, 'replica', replica, options=[ @@ -295,6 +309,9 @@ def test_replica_archive_page_backup(self): '--master-db=postgres', '--master-port={0}'.format(master.port)]) + pgbench.wait() + pgbench.stdout.close() + self.validate_pb(backup_dir, 'replica') self.assertEqual( 'OK', self.show_pb(backup_dir, 'replica', backup_id)['status']) @@ -317,8 +334,6 @@ def test_replica_archive_page_backup(self): # Change data on master, make PAGE backup from replica, # restore taken backup and check that restored data equal # to original data - master.pgbench_init(scale=5) - pgbench = master.pgbench( options=['-T', '30', '-c', '2', '--no-vacuum']) @@ -535,6 +550,11 @@ def test_replica_promote(self): start backup from replica, during backup promote replica check that backup is failed """ + if not self.gdb: + self.skipTest( + "Specify PGPROBACKUP_GDB and build without " + "optimizations for run this test" + ) fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') master = self.make_simple_node( @@ -706,6 +726,7 @@ def test_replica_stop_lsn_null_offset(self): output) # Clean after yourself + gdb_checkpointer.kill() self.del_test_dir(module_name, fname) # @unittest.skip("skip") @@ -1085,6 +1106,7 @@ def test_replica_toast(self): self.compare_pgdata(pgdata, pgdata_restored) # Clean after yourself + gdb_checkpointer.kill() self.del_test_dir(module_name, fname) # @unittest.skip("skip") @@ -1313,6 +1335,11 @@ def test_replica_promote_archive_delta(self): 'checkpoint_timeout': '30s', 'archive_timeout': '30s'}) + if self.get_version(node1) < self.version_to_num('9.6.0'): + self.del_test_dir(module_name, fname) + return unittest.skip( + 'Skipped because backup from replica is not supported in PG 9.5') + self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node1) self.set_config( @@ -1433,6 +1460,11 @@ def test_replica_promote_archive_page(self): 'checkpoint_timeout': '30s', 'archive_timeout': '30s'}) + if self.get_version(node1) < self.version_to_num('9.6.0'): + self.del_test_dir(module_name, fname) + return unittest.skip( + 'Skipped because backup from replica is not supported in PG 9.5') + self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node1) self.set_archiving(backup_dir, 'node', node1) @@ -1550,6 +1582,11 @@ def test_parent_choosing(self): set_replication=True, initdb_params=['--data-checksums']) + if self.get_version(master) < self.version_to_num('9.6.0'): + self.del_test_dir(module_name, fname) + return unittest.skip( + 'Skipped because backup from replica is not supported in PG 9.5') + self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) diff --git a/travis/Dockerfile.in b/travis/Dockerfile.in index a1f30d7f6..3e451e24f 100644 --- a/travis/Dockerfile.in +++ b/travis/Dockerfile.in @@ -10,6 +10,7 @@ RUN python3 -m pip install virtualenv # Environment ENV PG_MAJOR=${PG_VERSION} PG_BRANCH=${PG_BRANCH} +ENV PGPROBACKUP_GDB=${PGPROBACKUP_GDB} ENV LANG=C.UTF-8 PGHOME=/pg/testdir/pgbin # Make directories diff --git a/travis/docker-compose.yml b/travis/docker-compose.yml index 471ab779f..fc6545567 100644 --- a/travis/docker-compose.yml +++ b/travis/docker-compose.yml @@ -1,2 +1,17 @@ -tests: - build: . +version: "3.7" +services: + tests: + build: + context: . + + cap_add: + - SYS_PTRACE + + security_opt: + - seccomp=unconfined + + # don't work + #sysctls: + # kernel.yama.ptrace_scope: 0 + privileged: true + diff --git a/travis/make_dockerfile.sh b/travis/make_dockerfile.sh index 3e6938bd9..2e8ccd5a3 100755 --- a/travis/make_dockerfile.sh +++ b/travis/make_dockerfile.sh @@ -14,12 +14,18 @@ if [ -z ${MODE+x} ]; then MODE=basic fi +if [ -z ${PGPROBACKUP_GDB+x} ]; then + PGPROBACKUP_GDB=ON +fi + echo PG_VERSION=${PG_VERSION} echo PG_BRANCH=${PG_BRANCH} echo MODE=${MODE} +echo PGPROBACKUP_GDB=${PGPROBACKUP_GDB} sed \ -e 's/${PG_VERSION}/'${PG_VERSION}/g \ -e 's/${PG_BRANCH}/'${PG_BRANCH}/g \ -e 's/${MODE}/'${MODE}/g \ + -e 's/${PGPROBACKUP_GDB}/'${PGPROBACKUP_GDB}/g \ Dockerfile.in > Dockerfile diff --git a/travis/run_tests.sh b/travis/run_tests.sh index 325b89060..488d8ee45 100755 --- a/travis/run_tests.sh +++ b/travis/run_tests.sh @@ -65,6 +65,12 @@ which pg_config echo "############### pg_config:" pg_config +# Show kernel parameters +echo "############### kernel params:" +cat /proc/sys/kernel/yama/ptrace_scope +sudo sysctl kernel.yama.ptrace_scope=0 +cat /proc/sys/kernel/yama/ptrace_scope + # Build and install pg_probackup (using PG_CPPFLAGS and SHLIB_LINK for gcov) echo "############### Compiling and installing pg_probackup:" # make USE_PGXS=1 PG_CPPFLAGS="-coverage" SHLIB_LINK="-coverage" top_srcdir=$CUSTOM_PG_SRC install From c00ffe27906824b1110c86625ff4ef6a6fef35d0 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" <16117281+kulaginm@users.noreply.github.com> Date: Fri, 13 Aug 2021 15:05:42 +0300 Subject: [PATCH 190/525] catchup functionality expansion (#419) [PR #419] New command 'catchup' for synchronizing stale standby with master by transfering only changes blocks, or creating standby from scratch. Co-authored-by: Elena Indrupskaya Co-authored-by: Grigory Smolkin --- .travis.yml | 33 ++- doc/pgprobackup.xml | 179 +++++++++--- src/backup.c | 6 +- src/catchup.c | 301 ++++++++++++-------- src/data.c | 332 ++++++++++------------ src/dir.c | 39 ++- src/help.c | 14 +- src/merge.c | 2 +- src/pg_probackup.c | 73 +++-- src/pg_probackup.h | 34 ++- src/ptrack.c | 15 +- src/stream.c | 166 +++++++---- src/utils/configuration.h | 14 +- src/utils/file.c | 9 +- tests/backup.py | 13 +- tests/catchup.py | 581 +++++++++++++++++++++++++++++++++----- travis/Dockerfile.in | 1 + travis/make_dockerfile.sh | 6 + travis/run_tests.sh | 17 ++ 19 files changed, 1311 insertions(+), 524 deletions(-) diff --git a/.travis.yml b/.travis.yml index b6b8fd217..873dd8f20 100644 --- a/.travis.yml +++ b/.travis.yml @@ -26,24 +26,26 @@ notifications: # Default MODE is basic, i.e. all tests with PG_PROBACKUP_TEST_BASIC=ON env: - - PG_VERSION=14 PG_BRANCH=REL_14_STABLE - - PG_VERSION=13 PG_BRANCH=REL_13_STABLE - - PG_VERSION=12 PG_BRANCH=REL_12_STABLE - - PG_VERSION=11 PG_BRANCH=REL_11_STABLE + - PG_VERSION=15 PG_BRANCH=master PTRACK_PATCH_PG_VERSION=13 + - PG_VERSION=14 PG_BRANCH=REL_14_STABLE PTRACK_PATCH_PG_VERSION=13 + - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_VERSION=13 + - PG_VERSION=12 PG_BRANCH=REL_12_STABLE PTRACK_PATCH_PG_VERSION=12 + - PG_VERSION=11 PG_BRANCH=REL_11_STABLE PTRACK_PATCH_PG_VERSION=11 - PG_VERSION=10 PG_BRANCH=REL_10_STABLE - PG_VERSION=9.6 PG_BRANCH=REL9_6_STABLE - PG_VERSION=9.5 PG_BRANCH=REL9_5_STABLE -# - PG_VERSION=12 PG_BRANCH=REL_12_STABLE MODE=archive -# - PG_VERSION=12 PG_BRANCH=REL_12_STABLE MODE=backup -# - PG_VERSION=12 PG_BRANCH=REL_12_STABLE MODE=compression -# - PG_VERSION=12 PG_BRANCH=REL_12_STABLE MODE=delta -# - PG_VERSION=12 PG_BRANCH=REL_12_STABLE MODE=locking -# - PG_VERSION=12 PG_BRANCH=REL_12_STABLE MODE=merge -# - PG_VERSION=12 PG_BRANCH=REL_12_STABLE MODE=page -# - PG_VERSION=12 PG_BRANCH=REL_12_STABLE MODE=replica -# - PG_VERSION=12 PG_BRANCH=REL_12_STABLE MODE=retention -# - PG_VERSION=12 PG_BRANCH=REL_12_STABLE MODE=restore - - PG_VERSION=15 PG_BRANCH=master +# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_VERSION=off MODE=archive +# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_VERSION=13 MODE=backup +# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_VERSION=13 MODE=catchup +# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_VERSION=off MODE=compression +# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_VERSION=off MODE=delta +# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_VERSION=off MODE=locking +# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_VERSION=13 MODE=merge +# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_VERSION=off MODE=page +# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_VERSION=13 MODE=ptrack +# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_VERSION=13 MODE=replica +# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_VERSION=off MODE=retention +# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_VERSION=13 MODE=restore jobs: allow_failures: @@ -54,3 +56,4 @@ jobs: #branches: # only: # - master + diff --git a/doc/pgprobackup.xml b/doc/pgprobackup.xml index f7814c2d2..7178cb14c 100644 --- a/doc/pgprobackup.xml +++ b/doc/pgprobackup.xml @@ -3409,16 +3409,29 @@ pg_probackup delete -B backup_dir --instance - Cloning <productname>PostgreSQL</productname> Instance + Cloning and Synchronizing <productname>PostgreSQL</productname> Instance pg_probackup can create a copy of a PostgreSQL - instance directly, without using the backup catalog. This allows you - to add a new standby server in a parallel mode or to have a standby - server that has fallen behind catch up with master. + instance directly, without using the backup catalog. To do this, you can run the command. + It can be useful in the following cases: + + + To add a new standby server. + Usually, pg_basebackup + is used to create a copy of a PostgreSQL instance. If the data directory of the destination instance + is empty, the catchup command works similarly, but it can be faster if run in parallel mode. + + + To have a fallen-behind standby server catch up with master. + Under high write load, replicas may fail to replay WAL fast enough to keep up with master and hence may lag behind. + A usual solution to create a new replica and switch to it requires a lot of extra space and data transfer. The catchup + command allows you to update an existing replica much faster by bringing differences from master. + + - Cloning a PostgreSQL instance is different from other pg_probackup + catchup is different from other pg_probackup operations: @@ -3439,12 +3452,12 @@ pg_probackup delete -B backup_dir --instance - No SQL commands involving tablespaces, such as + DDL commands CREATE TABLESPACE/DROP TABLESPACE, - can be run simultaneously with catchup. + >DROP TABLESPACE + cannot be run simultaneously with catchup. @@ -3452,14 +3465,16 @@ pg_probackup delete -B backup_dir --instance catchup takes configuration files, such as postgresql.conf, postgresql.auto.conf, or pg_hba.conf, from the source server and overwrites them - on the target server. + on the target server. The option allows you to keep + the configuration files intact. - Before cloning a PostgreSQL instance, set up the source database server as follows: + To prepare for cloning/synchronizing a PostgreSQL instance, + set up the source instance server as follows: @@ -3481,9 +3496,10 @@ pg_probackup delete -B backup_dir --instance - To clone a PostgreSQL instance, ensure that the source - database server is running and accepting connections and - on the server with the destination database, run the following command: + Before cloning/synchronizing a PostgreSQL instance, ensure that the source + instance server is running and accepting connections. To clone/sync a PostgreSQL instance, + on the server with the destination instance, you can run + the command as follows: pg_probackup catchup -b catchup-mode --source-pgdata=path_to_pgdata_on_remote_server --destination-pgdata=path_to_local_dir --stream [connection_options] [remote_options] @@ -3496,33 +3512,43 @@ pg_probackup catchup -b catchup-mode --source-pgdata= FULL — creates a full copy of the PostgreSQL instance. - The destination directory must be empty for this mode. + The data directory of the destination instance must be empty for this mode. DELTA — reads all data files in the data directory and creates an incremental copy for pages that have changed - since the destination database was shut down cleanly. - For this mode, the destination directory must contain a previous - copy of the database that was shut down cleanly. + since the destination instance was shut down. PTRACK — tracking page changes on the fly, - only copies pages that have changed since the point of divergence - of the source and destination databases. - For this mode, the destination directory must contain a previous - copy of the database that was shut down cleanly. + only reads and copies pages that have changed since the point of divergence + of the source and destination instances. + + + PTRACK catchup mode requires PTRACK + not earlier than 2.0 and hence, PostgreSQL not earlier than 11. + + + + By specifying the option, you can set + STREAM WAL delivery mode + of copying, which will include all the necessary WAL files by streaming them from + the instance server via replication protocol. + You can use connection_options to specify the connection to the source database cluster. If it is located on a different server, also specify remote_options. - If the source database contains tablespaces that must be located in + + + If the source database cluster contains tablespaces that must be located in a different directory, additionally specify the option: @@ -3538,8 +3564,9 @@ pg_probackup catchup -b catchup-mode --source-pgdata= For example, assume that a remote standby server with the PostgreSQL instance having /replica-pgdata data directory has fallen behind. To sync this instance with the one in /master-pgdata data directory, you can run the catchup command in the PTRACK mode on four parallel threads as follows: -pg_probackup catchup --source-pgdata=/master-pgdata --destination-pgdata=/replica-pgdata -p 5432 -d postgres -U remote-postgres-user --stream --backup-mode=PTRACK --remote-host=remote-hostname --remote-user=remote-unix-username -j 4 +pg_probackup catchup --source-pgdata=/master-pgdata --destination-pgdata=/replica-pgdata -p 5432 -d postgres -U remote-postgres-user --stream --backup-mode=PTRACK --remote-host=remote-hostname --remote-user=remote-unix-username -j 4 --exclude-path=postgresql.conf --exclude-path=postgresql.auto.conf --exclude-path=pg_hba.conf --exclude-path=pg_ident.conf + Note that in this example, the configuration files will not be overwritten during synchronization. Another example shows how you can add a new remote standby server with the PostgreSQL data directory /replica-pgdata by running the catchup command in the FULL mode @@ -4428,7 +4455,9 @@ pg_probackup archive-get -B backup_dir --instance catchup_mode --source-pgdata=path_to_pgdata_on_remote_server --destination-pgdata=path_to_local_dir -[--help] [--stream] [-j num_threads] +[--help] [-j | --threads=num_threads] [--stream] +[--temp-slot] [-P | --perm-slot] [-S | --slot=slot_name] +[--exclude-path=PATHNAME] [-T OLDDIR=NEWDIR] [connection_options] [remote_options] @@ -4454,14 +4483,20 @@ pg_probackup catchup -b catchup_mode DELTA — reads all data files in the data directory and creates an incremental copy for pages that have changed - since the destination database was shut down cleanly. + since the destination instance was shut down. PTRACK — tracking page changes on the fly, - only copies pages that have changed since the point of divergence - of the source and destination databases. + only reads and copies pages that have changed since the point of divergence + of the source and destination instances. + + + PTRACK catchup mode requires PTRACK + not earlier than 2.0 and hence, PostgreSQL not earlier than 11. + + @@ -4487,24 +4522,98 @@ pg_probackup catchup -b catchup_mode + + + + + + Sets the number of parallel threads for + catchup process. + + + + - Makes a STREAM backup, which - includes all the necessary WAL files by streaming them from - the database server via replication protocol. + Copies the instance in STREAM WAL delivery mode, + including all the necessary WAL files by streaming them from + the instance server via replication protocol. - - +=path_prefix +=path_prefix - Sets the number of parallel threads for - catchup process. + Specifies a prefix for files to exclude from the synchronization of PostgreSQL + instances during copying. The prefix must contain a path relative to the data directory of an instance. + If the prefix specifies a directory, + all files in this directory will not be synchronized. + + + This option is dangerous since excluding files from synchronization can result in + incomplete synchronization; use with care. + + + + + + + + + + + Copies the instance in STREAM WAL delivery mode, + including all the necessary WAL files by streaming them from + the instance server via replication protocol. + + + + + + + + + Creates a temporary physical replication slot for streaming + WAL from the PostgreSQL instance being copied. It ensures that + all the required WAL segments remain available if WAL is + rotated while the backup is in progress. This flag can only be + used together with the flag and + cannot be used together with the flag. + The default slot name is pg_probackup_slot, + which can be changed using the / option. + + + + + + + + + + Creates a permanent physical replication slot for streaming + WAL from the PostgreSQL instance being copied. This flag can only be + used together with the flag and + cannot be used together with the flag. + The default slot name is pg_probackup_perm_slot, + which can be changed using the / option. + + + + + + + + + + Specifies the replication slot for WAL streaming. This option + can only be used together with the + flag. @@ -4533,7 +4642,7 @@ pg_probackup catchup -b catchup_mode For details on usage, see the section - Cloning PostgreSQL Instance. + Cloning and Synchronizing PostgreSQL Instance. diff --git a/src/backup.c b/src/backup.c index e9c8a22d1..1d08c3828 100644 --- a/src/backup.c +++ b/src/backup.c @@ -263,7 +263,7 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, fio_mkdir(stream_xlog_path, DIR_PERMISSION, FIO_BACKUP_HOST); start_WAL_streaming(backup_conn, stream_xlog_path, &instance_config.conn_opt, - current.start_lsn, current.tli); + current.start_lsn, current.tli, true); /* Make sure that WAL streaming is working * PAGE backup in stream mode is waited twice, first for @@ -2051,8 +2051,6 @@ backup_files(void *arg) instance_config.compress_alg, instance_config.compress_level, arguments->nodeInfo->checksum_version, - arguments->nodeInfo->ptrack_version_num, - arguments->nodeInfo->ptrack_schema, arguments->hdr_map, false); } else @@ -2350,7 +2348,7 @@ calculate_datasize_of_filelist(parray *filelist) { pgFile *file = (pgFile *) parray_get(filelist, i); - if (file->external_dir_num != 0) + if (file->external_dir_num != 0 || file->excluded) continue; if (S_ISDIR(file->mode)) diff --git a/src/catchup.c b/src/catchup.c index 58ce13c10..5a0c8e45a 100644 --- a/src/catchup.c +++ b/src/catchup.c @@ -27,20 +27,19 @@ /* * Catchup routines */ -static PGconn *catchup_collect_info(PGNodeInfo *source_node_info, const char *source_pgdata, const char *dest_pgdata); +static PGconn *catchup_init_state(PGNodeInfo *source_node_info, const char *source_pgdata, const char *dest_pgdata); static void catchup_preflight_checks(PGNodeInfo *source_node_info, PGconn *source_conn, const char *source_pgdata, const char *dest_pgdata); static void catchup_check_tablespaces_existance_in_tbsmapping(PGconn *conn); static parray* catchup_get_tli_history(ConnectionOptions *conn_opt, TimeLineID tli); -//REVIEW The name of this function looks strange to me. -//Maybe catchup_init_state() or catchup_setup() will do better? -//I'd also suggest to wrap all these fields into some CatchupState, but it isn't urgent. +//REVIEW I'd also suggest to wrap all these fields into some CatchupState, but it isn't urgent. +//REVIEW_ANSWER what for? /* * Prepare for work: fill some globals, open connection to source database */ static PGconn * -catchup_collect_info(PGNodeInfo *source_node_info, const char *source_pgdata, const char *dest_pgdata) +catchup_init_state(PGNodeInfo *source_node_info, const char *source_pgdata, const char *dest_pgdata) { PGconn *source_conn; @@ -159,17 +158,6 @@ catchup_preflight_checks(PGNodeInfo *source_node_info, PGconn *source_conn, elog(ERROR, "Destination directory contains \"" PG_BACKUP_LABEL_FILE "\" file"); } - /* check that destination database is shutdowned cleanly */ - if (current.backup_mode != BACKUP_MODE_FULL) - { - DBState state; - state = get_system_dbstate(dest_pgdata, FIO_LOCAL_HOST); - /* see states in postgres sources (src/include/catalog/pg_control.h) */ - if (state != DB_SHUTDOWNED && state != DB_SHUTDOWNED_IN_RECOVERY) - elog(ERROR, "Postmaster in destination directory \"%s\" must be stopped cleanly", - dest_pgdata); - } - /* Check that connected PG instance, source and destination PGDATA are the same */ { uint64 source_conn_id, source_id, dest_id; @@ -366,6 +354,7 @@ typedef struct XLogRecPtr sync_lsn; BackupMode backup_mode; int thread_num; + size_t transfered_bytes; bool completed; } catchup_thread_runner_arg; @@ -390,6 +379,9 @@ catchup_thread_runner(void *arg) if (S_ISDIR(file->mode)) continue; + if (file->excluded) + continue; + if (!pg_atomic_test_set_flag(&file->lock)) continue; @@ -431,12 +423,7 @@ catchup_thread_runner(void *arg) catchup_data_file(file, from_fullpath, to_fullpath, arguments->sync_lsn, arguments->backup_mode, - NONE_COMPRESS, - 0, arguments->nodeInfo->checksum_version, - arguments->nodeInfo->ptrack_version_num, - arguments->nodeInfo->ptrack_schema, - false, dest_file != NULL ? dest_file->size : 0); } else @@ -445,6 +432,7 @@ catchup_thread_runner(void *arg) arguments->backup_mode, current.parent_backup, true); } + /* file went missing during catchup */ if (file->write_size == FILE_NOT_FOUND) continue; @@ -454,6 +442,7 @@ catchup_thread_runner(void *arg) continue; } + arguments->transfered_bytes += file->write_size; elog(VERBOSE, "File \"%s\". Copied "INT64_FORMAT " bytes", from_fullpath, file->write_size); } @@ -469,8 +458,10 @@ catchup_thread_runner(void *arg) /* * main multithreaded copier + * returns size of transfered data file + * or -1 in case of error */ -static bool +static ssize_t catchup_multithreaded_copy(int num_threads, PGNodeInfo *source_node_info, const char *source_pgdata_path, @@ -485,6 +476,7 @@ catchup_multithreaded_copy(int num_threads, pthread_t *threads; bool all_threads_successful = true; + ssize_t transfered_bytes_result = 0; int i; /* init thread args */ @@ -499,6 +491,7 @@ catchup_multithreaded_copy(int num_threads, .sync_lsn = sync_lsn, .backup_mode = backup_mode, .thread_num = i + 1, + .transfered_bytes = 0, .completed = false, }; @@ -516,15 +509,16 @@ catchup_multithreaded_copy(int num_threads, { pthread_join(threads[i], NULL); all_threads_successful &= threads_args[i].completed; + transfered_bytes_result += threads_args[i].transfered_bytes; } free(threads); free(threads_args); - return all_threads_successful; + return all_threads_successful ? transfered_bytes_result : -1; } /* - * + * Sync every file in destination directory to disk */ static void catchup_sync_destination_files(const char* pgdata_path, fio_location location, parray *filelist, pgFile *pg_control_file) @@ -541,8 +535,13 @@ catchup_sync_destination_files(const char* pgdata_path, fio_location location, p { pgFile *file = (pgFile *) parray_get(filelist, i); - /* TODO: sync directory ? */ - if (S_ISDIR(file->mode)) + /* TODO: sync directory ? + * - at first glance we can rely on fs journaling, + * which is enabled by default on most platforms + * - but PG itself is not relying on fs, its durable_sync + * includes directory sync + */ + if (S_ISDIR(file->mode) || file->excluded) continue; Assert(file->external_dir_num == 0); @@ -564,11 +563,50 @@ catchup_sync_destination_files(const char* pgdata_path, fio_location location, p elog(INFO, "Files are synced, time elapsed: %s", pretty_time); } +/* + * Filter filelist helper function (used to process --exclude-path's) + * filelist -- parray of pgFile *, can't be NULL + * exclude_absolute_paths_list -- sorted parray of char * (absolute paths, starting with '/'), can be NULL + * exclude_relative_paths_list -- sorted parray of char * (relative paths), can be NULL + * logging_string -- helper parameter, used for generating verbose log messages ("Source" or "Destination") + */ +static void +filter_filelist(parray *filelist, const char *pgdata, + parray *exclude_absolute_paths_list, parray *exclude_relative_paths_list, + const char *logging_string) +{ + int i; + + if (exclude_absolute_paths_list == NULL && exclude_relative_paths_list == NULL) + return; + + for (i = 0; i < parray_num(filelist); ++i) + { + char full_path[MAXPGPATH]; + pgFile *file = (pgFile *) parray_get(filelist, i); + join_path_components(full_path, pgdata, file->rel_path); + + if ( + (exclude_absolute_paths_list != NULL + && parray_bsearch(exclude_absolute_paths_list, full_path, pgPrefixCompareString)!= NULL + ) || ( + exclude_relative_paths_list != NULL + && parray_bsearch(exclude_relative_paths_list, file->rel_path, pgPrefixCompareString)!= NULL) + ) + { + elog(LOG, "%s file \"%s\" excluded with --exclude-path option", logging_string, full_path); + file->excluded = true; + } + } +} + /* * Entry point of pg_probackup CATCHUP subcommand. + * exclude_*_paths_list are parray's of char * */ int -do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, bool sync_dest_files) +do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, bool sync_dest_files, + parray *exclude_absolute_paths_list, parray *exclude_relative_paths_list) { PGconn *source_conn = NULL; PGNodeInfo source_node_info; @@ -586,33 +624,27 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, /* for fancy reporting */ time_t start_time, end_time; - char pretty_time[20]; - char pretty_bytes[20]; + ssize_t transfered_datafiles_bytes = 0; + ssize_t transfered_walfiles_bytes = 0; + char pretty_source_bytes[20]; - source_conn = catchup_collect_info(&source_node_info, source_pgdata, dest_pgdata); + source_conn = catchup_init_state(&source_node_info, source_pgdata, dest_pgdata); catchup_preflight_checks(&source_node_info, source_conn, source_pgdata, dest_pgdata); - elog(LOG, "Database catchup start"); + /* we need to sort --exclude_path's for future searching */ + if (exclude_absolute_paths_list != NULL) + parray_qsort(exclude_absolute_paths_list, pgCompareString); + if (exclude_relative_paths_list != NULL) + parray_qsort(exclude_relative_paths_list, pgCompareString); - { - char label[1024]; - /* notify start of backup to PostgreSQL server */ - time2iso(label, lengthof(label), current.start_time, false); - strncat(label, " with pg_probackup", lengthof(label) - - strlen(" with pg_probackup")); - - /* Call pg_start_backup function in PostgreSQL connect */ - pg_start_backup(label, smooth_checkpoint, ¤t, &source_node_info, source_conn); - elog(LOG, "pg_start_backup START LSN %X/%X", (uint32) (current.start_lsn >> 32), (uint32) (current.start_lsn)); - } + elog(LOG, "Database catchup start"); - //REVIEW I wonder, if we can move this piece above and call before pg_start backup()? - //It seems to be a part of setup phase. if (current.backup_mode != BACKUP_MODE_FULL) { dest_filelist = parray_new(); dir_list_file(dest_filelist, dest_pgdata, true, true, false, backup_logs, true, 0, FIO_LOCAL_HOST); + filter_filelist(dest_filelist, dest_pgdata, exclude_absolute_paths_list, exclude_relative_paths_list, "Destination"); // fill dest_redo.lsn and dest_redo.tli get_redo(dest_pgdata, FIO_LOCAL_HOST, &dest_redo); @@ -627,16 +659,14 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, */ } - //REVIEW I wonder, if we can move this piece above and call before pg_start backup()? - //It seems to be a part of setup phase. /* + * Make sure that sync point is withing ptrack tracking range * TODO: move to separate function to use in both backup.c and catchup.c */ if (current.backup_mode == BACKUP_MODE_DIFF_PTRACK) { XLogRecPtr ptrack_lsn = get_last_ptrack_lsn(source_conn, &source_node_info); - // new ptrack is more robust and checks Start LSN if (ptrack_lsn > dest_redo.lsn || ptrack_lsn == InvalidXLogRecPtr) elog(ERROR, "LSN from ptrack_control in source %X/%X is greater than checkpoint LSN in destination %X/%X.\n" "You can perform only FULL catchup.", @@ -645,7 +675,19 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, (uint32) (dest_redo.lsn)); } - /* Check that dest_redo.lsn is less than current.start_lsn */ + { + char label[1024]; + /* notify start of backup to PostgreSQL server */ + time2iso(label, lengthof(label), current.start_time, false); + strncat(label, " with pg_probackup", lengthof(label) - + strlen(" with pg_probackup")); + + /* Call pg_start_backup function in PostgreSQL connect */ + pg_start_backup(label, smooth_checkpoint, ¤t, &source_node_info, source_conn); + elog(LOG, "pg_start_backup START LSN %X/%X", (uint32) (current.start_lsn >> 32), (uint32) (current.start_lsn)); + } + + /* Sanity: source cluster must be "in future" relatively to dest cluster */ if (current.backup_mode != BACKUP_MODE_FULL && dest_redo.lsn > current.start_lsn) elog(ERROR, "Current START LSN %X/%X is lower than SYNC LSN %X/%X, " @@ -657,7 +699,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, join_path_components(dest_xlog_path, dest_pgdata, PG_XLOG_DIR); fio_mkdir(dest_xlog_path, DIR_PERMISSION, FIO_LOCAL_HOST); start_WAL_streaming(source_conn, dest_xlog_path, &instance_config.conn_opt, - current.start_lsn, current.tli); + current.start_lsn, current.tli, false); source_filelist = parray_new(); @@ -670,17 +712,16 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, true, true, false, backup_logs, true, 0, FIO_LOCAL_HOST); //REVIEW FIXME. Let's fix that before release. - // TODO filter pg_xlog/wal? // TODO what if wal is not a dir (symlink to a dir)? + // - Currently backup/restore transform pg_wal symlink to directory + // so the problem is not only with catchup. + // if we want to make it right - we must provide the way + // for symlink remapping during restore and catchup. + // By default everything must be left as it is. /* close ssh session in main thread */ fio_disconnect(); - //REVIEW Do we want to do similar calculation for dest? - current.pgdata_bytes += calculate_datasize_of_filelist(source_filelist); - pretty_size(current.pgdata_bytes, pretty_bytes, lengthof(pretty_bytes)); - elog(INFO, "Source PGDATA size: %s", pretty_bytes); - /* * Sort pathname ascending. It is necessary to create intermediate * directories sequentially. @@ -694,8 +735,24 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, */ parray_qsort(source_filelist, pgFileCompareRelPathWithExternal); - /* Extract information about files in source_filelist parsing their names:*/ - parse_filelist_filenames(source_filelist, source_pgdata); + //REVIEW Do we want to do similar calculation for dest? + //REVIEW_ANSWER what for? + { + ssize_t source_bytes = 0; + char pretty_bytes[20]; + + source_bytes += calculate_datasize_of_filelist(source_filelist); + + /* Extract information about files in source_filelist parsing their names:*/ + parse_filelist_filenames(source_filelist, source_pgdata); + filter_filelist(source_filelist, source_pgdata, exclude_absolute_paths_list, exclude_relative_paths_list, "Source"); + + current.pgdata_bytes += calculate_datasize_of_filelist(source_filelist); + + pretty_size(current.pgdata_bytes, pretty_source_bytes, lengthof(pretty_source_bytes)); + pretty_size(source_bytes - current.pgdata_bytes, pretty_bytes, lengthof(pretty_bytes)); + elog(INFO, "Source PGDATA size: %s (excluded %s)", pretty_source_bytes, pretty_bytes); + } elog(LOG, "Start LSN (source): %X/%X, TLI: %X", (uint32) (current.start_lsn >> 32), (uint32) (current.start_lsn), @@ -728,7 +785,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, * We iterate over source_filelist and for every directory with parent 'pg_tblspc' * we must lookup this directory name in tablespace map. * If we got a match, we treat this directory as tablespace. - * It means that we create directory specified in tablespace_map and + * It means that we create directory specified in tablespace map and * original directory created as symlink to it. */ for (i = 0; i < parray_num(source_filelist); i++) @@ -736,7 +793,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, pgFile *file = (pgFile *) parray_get(source_filelist, i); char parent_dir[MAXPGPATH]; - if (!S_ISDIR(file->mode)) + if (!S_ISDIR(file->mode) || file->excluded) continue; /* @@ -816,9 +873,22 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, source_pg_control_file = parray_remove(source_filelist, control_file_elem_index); } + /* TODO before public release: must be more careful with pg_control. + * when running catchup or incremental restore + * cluster is actually in two states + * simultaneously - old and new, so + * it must contain both pg_control files + * describing those states: global/pg_control_old, global/pg_control_new + * 1. This approach will provide us with means of + * robust detection of previos failures and thus correct operation retrying (or forbidding). + * 2. We will have the ability of preventing instance from starting + * in the middle of our operations. + */ + /* * remove absent source files in dest (dropped tables, etc...) * note: global/pg_control will also be deleted here + * mark dest files (that excluded with source --exclude-path) also for exclusion */ if (current.backup_mode != BACKUP_MODE_FULL) { @@ -828,33 +898,33 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, { bool redundant = true; pgFile *file = (pgFile *) parray_get(dest_filelist, i); + pgFile **src_file = NULL; //TODO optimize it and use some merge-like algorithm //instead of bsearch for each file. - if (parray_bsearch(source_filelist, file, pgFileCompareRelPathWithExternal)) + src_file = (pgFile **) parray_bsearch(source_filelist, file, pgFileCompareRelPathWithExternal); + + if (src_file!= NULL && !(*src_file)->excluded && file->excluded) + (*src_file)->excluded = true; + + if (src_file!= NULL || file->excluded) redundant = false; - /* pg_filenode.map are always restored, because it's crc cannot be trusted */ + /* pg_filenode.map are always copied, because it's crc cannot be trusted */ Assert(file->external_dir_num == 0); if (pg_strcasecmp(file->name, RELMAPPER_FILENAME) == 0) redundant = true; - //REVIEW This check seems unneded. Anyway we delete only redundant stuff below. - /* do not delete the useful internal directories */ - if (S_ISDIR(file->mode) && !redundant) - continue; - /* if file does not exists in destination list, then we can safely unlink it */ if (redundant) { char fullpath[MAXPGPATH]; join_path_components(fullpath, dest_pgdata, file->rel_path); - fio_delete(file->mode, fullpath, FIO_DB_HOST); elog(VERBOSE, "Deleted file \"%s\"", fullpath); - /* shrink pgdata list */ + /* shrink dest pgdata list */ pgFileFree(file); parray_remove(dest_filelist, i); i--; @@ -875,10 +945,11 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, /* run copy threads */ elog(INFO, "Start transferring data files"); time(&start_time); - catchup_isok = catchup_multithreaded_copy(num_threads, &source_node_info, + transfered_datafiles_bytes = catchup_multithreaded_copy(num_threads, &source_node_info, source_pgdata, dest_pgdata, source_filelist, dest_filelist, dest_redo.lsn, current.backup_mode); + catchup_isok = transfered_datafiles_bytes != -1; /* at last copy control file */ if (catchup_isok) @@ -889,17 +960,22 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, join_path_components(to_fullpath, dest_pgdata, source_pg_control_file->rel_path); copy_pgcontrol_file(from_fullpath, FIO_DB_HOST, to_fullpath, FIO_LOCAL_HOST, source_pg_control_file); + transfered_datafiles_bytes += source_pg_control_file->size; } - time(&end_time); - pretty_time_interval(difftime(end_time, start_time), + if (!catchup_isok) + { + char pretty_time[20]; + char pretty_transfered_data_bytes[20]; + + time(&end_time); + pretty_time_interval(difftime(end_time, start_time), pretty_time, lengthof(pretty_time)); - if (catchup_isok) - elog(INFO, "Data files are transferred, time elapsed: %s", - pretty_time); - else - elog(ERROR, "Data files transferring failed, time elapsed: %s", - pretty_time); + pretty_size(transfered_datafiles_bytes, pretty_transfered_data_bytes, lengthof(pretty_transfered_data_bytes)); + + elog(ERROR, "Catchup failed. Transfered: %s, time elapsed: %s", + pretty_transfered_data_bytes, pretty_time); + } /* Notify end of backup */ { @@ -912,17 +988,6 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, pg_silent_client_messages(source_conn); - //REVIEW. Do we want to support pg 9.5? I suppose we never test it... - //Maybe check it and error out early? - /* Create restore point - * Only if backup is from master. - * For PG 9.5 create restore point only if pguser is superuser. - */ - if (!current.from_replica && - !(source_node_info.server_version < 90600 && - !source_node_info.is_superuser)) //TODO: check correctness - pg_create_restore_point(source_conn, current.start_time); - /* Execute pg_stop_backup using PostgreSQL connection */ pg_stop_backup_send(source_conn, source_node_info.server_version, current.from_replica, exclusive_backup, &stop_backup_query_text); @@ -965,22 +1030,23 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, } #endif - if(wait_WAL_streaming_end(NULL)) - elog(ERROR, "WAL streaming failed"); + /* wait for end of wal streaming and calculate wal size transfered */ + { + parray *wal_files_list = NULL; + wal_files_list = parray_new(); - //REVIEW Please add a comment about these lsns. It is a crutial part of the algorithm. - current.recovery_xid = stop_backup_result.snapshot_xid; + if (wait_WAL_streaming_end(wal_files_list)) + elog(ERROR, "WAL streaming failed"); - elog(LOG, "Getting the Recovery Time from WAL"); + for (i = 0; i < parray_num(wal_files_list); i++) + { + pgFile *file = (pgFile *) parray_get(wal_files_list, i); + transfered_walfiles_bytes += file->size; + } - /* iterate over WAL from stop_backup lsn to start_backup lsn */ - if (!read_recovery_info(dest_xlog_path, current.tli, - instance_config.xlog_seg_size, - current.start_lsn, current.stop_lsn, - ¤t.recovery_time)) - { - elog(LOG, "Failed to find Recovery Time in WAL, forced to trust current_timestamp"); - current.recovery_time = stop_backup_result.invocation_time; + parray_walk(wal_files_list, pgFileFree); + parray_free(wal_files_list); + wal_files_list = NULL; } /* @@ -994,15 +1060,33 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, /* close ssh session in main thread */ fio_disconnect(); - /* Sync all copied files unless '--no-sync' flag is used */ - if (catchup_isok) + /* fancy reporting */ { - if (sync_dest_files) - catchup_sync_destination_files(dest_pgdata, FIO_LOCAL_HOST, source_filelist, source_pg_control_file); - else - elog(WARNING, "Files are not synced to disk"); + char pretty_transfered_data_bytes[20]; + char pretty_transfered_wal_bytes[20]; + char pretty_time[20]; + + time(&end_time); + pretty_time_interval(difftime(end_time, start_time), + pretty_time, lengthof(pretty_time)); + pretty_size(transfered_datafiles_bytes, pretty_transfered_data_bytes, lengthof(pretty_transfered_data_bytes)); + pretty_size(transfered_walfiles_bytes, pretty_transfered_wal_bytes, lengthof(pretty_transfered_wal_bytes)); + + elog(INFO, "Databases synchronized. Transfered datafiles size: %s, transfered wal size: %s, time elapsed: %s", + pretty_transfered_data_bytes, pretty_transfered_wal_bytes, pretty_time); + + if (current.backup_mode != BACKUP_MODE_FULL) + elog(INFO, "Catchup incremental ratio (less is better): %.f%% (%s/%s)", + ((float) transfered_datafiles_bytes / current.pgdata_bytes) * 100, + pretty_transfered_data_bytes, pretty_source_bytes); } + /* Sync all copied files unless '--no-sync' flag is used */ + if (sync_dest_files) + catchup_sync_destination_files(dest_pgdata, FIO_LOCAL_HOST, source_filelist, source_pg_control_file); + else + elog(WARNING, "Files are not synced to disk"); + /* Cleanup */ if (dest_filelist) { @@ -1013,8 +1097,5 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, parray_free(source_filelist); pgFileFree(source_pg_control_file); - //REVIEW: Are we going to do that before release? - /* TODO: show the amount of transfered data in bytes and calculate incremental ratio */ - return 0; } diff --git a/src/data.c b/src/data.c index 49b696059..f02e3fd14 100644 --- a/src/data.c +++ b/src/data.c @@ -28,10 +28,10 @@ typedef struct DataPage { BackupPageHeader bph; - char data[BLCKSZ]; + char data[BLCKSZ]; } DataPage; -static bool get_page_header(FILE *in, const char *fullpath, BackupPageHeader* bph, +static bool get_page_header(FILE *in, const char *fullpath, BackupPageHeader *bph, pg_crc32 *crc, bool use_crc32c); #ifdef HAVE_LIBZ @@ -40,9 +40,9 @@ static int32 zlib_compress(void *dst, size_t dst_size, void const *src, size_t src_size, int level) { - uLongf compressed_size = dst_size; - int rc = compress2(dst, &compressed_size, src, src_size, - level); + uLongf compressed_size = dst_size; + int rc = compress2(dst, &compressed_size, src, src_size, + level); return rc == Z_OK ? compressed_size : rc; } @@ -51,8 +51,8 @@ zlib_compress(void *dst, size_t dst_size, void const *src, size_t src_size, static int32 zlib_decompress(void *dst, size_t dst_size, void const *src, size_t src_size) { - uLongf dest_len = dst_size; - int rc = uncompress(dst, &dest_len, src, src_size); + uLongf dest_len = dst_size; + int rc = uncompress(dst, &dest_len, src, src_size); return rc == Z_OK ? dest_len : rc; } @@ -63,7 +63,7 @@ zlib_decompress(void *dst, size_t dst_size, void const *src, size_t src_size) * written in the destination buffer, or -1 if compression fails. */ int32 -do_compress(void* dst, size_t dst_size, void const* src, size_t src_size, +do_compress(void *dst, size_t dst_size, void const *src, size_t src_size, CompressAlg alg, int level, const char **errormsg) { switch (alg) @@ -73,13 +73,13 @@ do_compress(void* dst, size_t dst_size, void const* src, size_t src_size, return -1; #ifdef HAVE_LIBZ case ZLIB_COMPRESS: - { - int32 ret; - ret = zlib_compress(dst, dst_size, src, src_size, level); - if (ret < Z_OK && errormsg) - *errormsg = zError(ret); - return ret; - } + { + int32 ret; + ret = zlib_compress(dst, dst_size, src, src_size, level); + if (ret < Z_OK && errormsg) + *errormsg = zError(ret); + return ret; + } #endif case PGLZ_COMPRESS: return pglz_compress(src, src_size, dst, PGLZ_strategy_always); @@ -93,25 +93,25 @@ do_compress(void* dst, size_t dst_size, void const* src, size_t src_size, * decompressed in the destination buffer, or -1 if decompression fails. */ int32 -do_decompress(void* dst, size_t dst_size, void const* src, size_t src_size, +do_decompress(void *dst, size_t dst_size, void const *src, size_t src_size, CompressAlg alg, const char **errormsg) { switch (alg) { case NONE_COMPRESS: case NOT_DEFINED_COMPRESS: - if (errormsg) + if (errormsg) *errormsg = "Invalid compression algorithm"; return -1; #ifdef HAVE_LIBZ case ZLIB_COMPRESS: - { - int32 ret; - ret = zlib_decompress(dst, dst_size, src, src_size); - if (ret < Z_OK && errormsg) - *errormsg = zError(ret); - return ret; - } + { + int32 ret; + ret = zlib_decompress(dst, dst_size, src, src_size); + if (ret < Z_OK && errormsg) + *errormsg = zError(ret); + return ret; + } #endif case PGLZ_COMPRESS: @@ -125,7 +125,6 @@ do_decompress(void* dst, size_t dst_size, void const* src, size_t src_size, return -1; } - #define ZLIB_MAGIC 0x78 /* @@ -162,7 +161,7 @@ page_may_be_compressed(Page page, CompressAlg alg, uint32 backup_version) /* For zlib we can check page magic: * https://stackoverflow.com/questions/9050260/what-does-a-zlib-header-look-like */ - if (alg == ZLIB_COMPRESS && *(char*)page != ZLIB_MAGIC) + if (alg == ZLIB_COMPRESS && *(char *)page != ZLIB_MAGIC) { return false; } @@ -281,8 +280,6 @@ prepare_page(pgFile *file, XLogRecPtr prev_backup_start_lsn, BackupMode backup_mode, Page page, bool strict, uint32 checksum_version, - int ptrack_version_num, - const char *ptrack_schema, const char *from_fullpath, PageState *page_st) { @@ -404,8 +401,7 @@ prepare_page(pgFile *file, XLogRecPtr prev_backup_start_lsn, blknum, from_fullpath, file->exists_in_prev ? "true" : "false", (uint32) (page_st->lsn >> 32), (uint32) page_st->lsn, - (uint32) (prev_backup_start_lsn >> 32), (uint32) prev_backup_start_lsn - ); + (uint32) (prev_backup_start_lsn >> 32), (uint32) prev_backup_start_lsn); return SkipCurrentPage; } @@ -422,7 +418,7 @@ compress_and_backup_page(pgFile *file, BlockNumber blknum, { int compressed_size = 0; size_t write_buffer_size = 0; - char write_buffer[BLCKSZ*2]; /* compressed page may require more space than uncompressed */ + char write_buffer[BLCKSZ*2]; /* compressed page may require more space than uncompressed */ BackupPageHeader* bph = (BackupPageHeader*)write_buffer; const char *errormsg = NULL; @@ -463,16 +459,13 @@ compress_and_backup_page(pgFile *file, BlockNumber blknum, return compressed_size; } -/* взята из compress_and_backup_page, но выпилена вся магия заголовков и компрессии, просто копирование 1-в-1 */ +/* Write page as-is. TODO: make it fastpath option in compress_and_backup_page() */ static int -copy_page(pgFile *file, BlockNumber blknum, - FILE *in, FILE *out, Page page, - const char *to_fullpath) +write_page(pgFile *file, FILE *out, Page page) { /* write data page */ if (fio_fwrite(out, page, BLCKSZ) != BLCKSZ) - elog(ERROR, "File: \"%s\", cannot write at block %u: %s", - to_fullpath, blknum, strerror(errno)); + return -1; file->write_size += BLCKSZ; file->uncompressed_size += BLCKSZ; @@ -492,13 +485,12 @@ void backup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpath, XLogRecPtr prev_backup_start_lsn, BackupMode backup_mode, CompressAlg calg, int clevel, uint32 checksum_version, - int ptrack_version_num, const char *ptrack_schema, HeaderMap *hdr_map, bool is_merge) { int rc; bool use_pagemap; - char *errmsg = NULL; - BlockNumber err_blknum = 0; + char *errmsg = NULL; + BlockNumber err_blknum = 0; /* page headers */ BackupPageHeader2 *headers = NULL; @@ -547,7 +539,7 @@ backup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpat * Such files should be fully copied. */ - if (file->pagemap.bitmapsize == PageBitmapIsEmpty || + if (file->pagemap.bitmapsize == PageBitmapIsEmpty || file->pagemap_isabsent || !file->exists_in_prev || !file->pagemap.bitmap) use_pagemap = false; @@ -557,7 +549,6 @@ backup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpat /* Remote mode */ if (fio_is_remote(FIO_DB_HOST)) { - rc = fio_send_pages(to_fullpath, from_fullpath, file, /* send prev backup START_LSN */ (backup_mode == BACKUP_MODE_DIFF_DELTA || backup_mode == BACKUP_MODE_DIFF_PTRACK) && @@ -576,7 +567,7 @@ backup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpat (backup_mode == BACKUP_MODE_DIFF_DELTA || backup_mode == BACKUP_MODE_DIFF_PTRACK) && file->exists_in_prev ? prev_backup_start_lsn : InvalidXLogRecPtr, calg, clevel, checksum_version, use_pagemap, - &headers, backup_mode, ptrack_version_num, ptrack_schema); + &headers, backup_mode); } /* check for errors */ @@ -646,30 +637,21 @@ backup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpat } /* - * Backup data file in the from_root directory to the to_root directory with - * same relative path. If prev_backup_start_lsn is not NULL, only pages with + * Catchup data file in the from_root directory to the to_root directory with + * same relative path. If sync_lsn is not NULL, only pages with equal or * higher lsn will be copied. * Not just copy file, but read it block by block (use bitmap in case of - * incremental backup), validate checksum, optionally compress and write to - * backup with special header. + * incremental catchup), validate page checksum. */ void catchup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpath, - XLogRecPtr prev_backup_start_lsn, BackupMode backup_mode, - CompressAlg calg, int clevel, uint32 checksum_version, - int ptrack_version_num, const char *ptrack_schema, - bool is_merge, size_t prev_size) + XLogRecPtr sync_lsn, BackupMode backup_mode, + uint32 checksum_version, size_t prev_size) { int rc; bool use_pagemap; char *errmsg = NULL; BlockNumber err_blknum = 0; - /* page headers */ - BackupPageHeader2 *headers = NULL; - - /* sanity */ - if (file->size % BLCKSZ != 0) - elog(WARNING, "File: \"%s\", invalid file size %zu", from_fullpath, file->size); /* * Compute expected number of blocks in the file. @@ -679,7 +661,7 @@ catchup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpa file->n_blocks = file->size/BLCKSZ; /* - * Skip unchanged file only if it exists in previous backup. + * Skip unchanged file only if it exists in destination directory. * This way we can correctly handle null-sized files which are * not tracked by pagemap and thus always marked as unchanged. */ @@ -688,8 +670,7 @@ catchup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpa file->exists_in_prev && file->size == prev_size && !file->pagemap_isabsent) { /* - * There are no changed blocks since last backup. We want to make - * incremental backup, so we should exit. + * There are none changed pages. */ file->write_size = BYTES_INVALID; return; @@ -699,16 +680,10 @@ catchup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpa file->read_size = 0; file->write_size = 0; file->uncompressed_size = 0; - INIT_FILE_CRC32(true, file->crc); /* - * Read each page, verify checksum and write it to backup. - * If page map is empty or file is not present in previous backup - * backup all pages of the relation. - * - * In PTRACK 1.x there was a problem - * of data files with missing _ptrack map. - * Such files should be fully copied. + * If page map is empty or file is not present in destination directory, + * then copy backup all pages of the relation. */ if (file->pagemap.bitmapsize == PageBitmapIsEmpty || @@ -726,29 +701,28 @@ catchup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpa { rc = fio_copy_pages(to_fullpath, from_fullpath, file, /* send prev backup START_LSN */ - (backup_mode == BACKUP_MODE_DIFF_DELTA || backup_mode == BACKUP_MODE_DIFF_PTRACK) && - file->exists_in_prev ? prev_backup_start_lsn : InvalidXLogRecPtr, - calg, clevel, checksum_version, + ((backup_mode == BACKUP_MODE_DIFF_DELTA || backup_mode == BACKUP_MODE_DIFF_PTRACK) && + file->exists_in_prev) ? sync_lsn : InvalidXLogRecPtr, + NONE_COMPRESS, 1, checksum_version, /* send pagemap if any */ use_pagemap, /* variables for error reporting */ - &err_blknum, &errmsg, &headers); + &err_blknum, &errmsg); } else { /* TODO: stop handling errors internally */ rc = copy_pages(to_fullpath, from_fullpath, file, /* send prev backup START_LSN */ - (backup_mode == BACKUP_MODE_DIFF_DELTA || backup_mode == BACKUP_MODE_DIFF_PTRACK) && - file->exists_in_prev ? prev_backup_start_lsn : InvalidXLogRecPtr, - checksum_version, use_pagemap, - backup_mode, ptrack_version_num, ptrack_schema); + ((backup_mode == BACKUP_MODE_DIFF_DELTA || backup_mode == BACKUP_MODE_DIFF_PTRACK) && + file->exists_in_prev) ? sync_lsn : InvalidXLogRecPtr, + checksum_version, use_pagemap, backup_mode); } /* check for errors */ if (rc == FILE_MISSING) { - elog(is_merge ? ERROR : LOG, "File not found: \"%s\"", from_fullpath); + elog(LOG, "File not found: \"%s\"", from_fullpath); file->write_size = FILE_NOT_FOUND; goto cleanup; } @@ -784,11 +758,6 @@ catchup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpa file->read_size = rc * BLCKSZ; - /* refresh n_blocks for FULL and DELTA */ - if (backup_mode == BACKUP_MODE_FULL || - backup_mode == BACKUP_MODE_DIFF_DELTA) - file->n_blocks = file->read_size / BLCKSZ; - /* Determine that file didn`t changed in case of incremental catchup */ if (backup_mode != BACKUP_MODE_FULL && file->exists_in_prev && @@ -799,13 +768,8 @@ catchup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpa } cleanup: - - /* finish CRC calculation */ - FIN_FILE_CRC32(true, file->crc); - pg_free(errmsg); pg_free(file->pagemap.bitmap); - pg_free(headers); } /* @@ -816,9 +780,9 @@ catchup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpa */ void backup_non_data_file(pgFile *file, pgFile *prev_file, - const char *from_fullpath, const char *to_fullpath, - BackupMode backup_mode, time_t parent_backup_time, - bool missing_ok) + const char *from_fullpath, const char *to_fullpath, + BackupMode backup_mode, time_t parent_backup_time, + bool missing_ok) { /* special treatment for global/pg_control */ if (file->external_dir_num == 0 && strcmp(file->rel_path, XLOG_CONTROL_FILE) == 0) @@ -891,7 +855,7 @@ restore_data_file(parray *parent_chain, pgFile *dest_file, FILE *out, /* page headers */ BackupPageHeader2 *headers = NULL; - pgBackup *backup = (pgBackup *) parray_get(parent_chain, backup_seq); + pgBackup *backup = (pgBackup *) parray_get(parent_chain, backup_seq); if (use_bitmap) backup_seq++; @@ -899,7 +863,7 @@ restore_data_file(parray *parent_chain, pgFile *dest_file, FILE *out, backup_seq--; /* lookup file in intermediate backup */ - res_file = parray_bsearch(backup->files, dest_file, pgFileCompareRelPathWithExternal); + res_file = parray_bsearch(backup->files, dest_file, pgFileCompareRelPathWithExternal); tmp_file = (res_file) ? *res_file : NULL; /* Destination file is not exists yet at this moment */ @@ -951,13 +915,13 @@ restore_data_file(parray *parent_chain, pgFile *dest_file, FILE *out, * copy the file from backup. */ total_write_len += restore_data_file_internal(in, out, tmp_file, - parse_program_version(backup->program_version), - from_fullpath, to_fullpath, dest_file->n_blocks, - use_bitmap ? &(dest_file)->pagemap : NULL, - checksum_map, backup->checksum_version, - /* shiftmap can be used only if backup state precedes the shift */ - backup->stop_lsn <= shift_lsn ? lsn_map : NULL, - headers); + parse_program_version(backup->program_version), + from_fullpath, to_fullpath, dest_file->n_blocks, + use_bitmap ? &(dest_file)->pagemap : NULL, + checksum_map, backup->checksum_version, + /* shiftmap can be used only if backup state precedes the shift */ + backup->stop_lsn <= shift_lsn ? lsn_map : NULL, + headers); if (fclose(in) != 0) elog(ERROR, "Cannot close file \"%s\": %s", from_fullpath, @@ -983,15 +947,15 @@ restore_data_file(parray *parent_chain, pgFile *dest_file, FILE *out, */ size_t restore_data_file_internal(FILE *in, FILE *out, pgFile *file, uint32 backup_version, - const char *from_fullpath, const char *to_fullpath, int nblocks, - datapagemap_t *map, PageState *checksum_map, int checksum_version, - datapagemap_t *lsn_map, BackupPageHeader2 *headers) + const char *from_fullpath, const char *to_fullpath, int nblocks, + datapagemap_t *map, PageState *checksum_map, int checksum_version, + datapagemap_t *lsn_map, BackupPageHeader2 *headers) { BlockNumber blknum = 0; - int n_hdr = -1; - size_t write_len = 0; - off_t cur_pos_out = 0; - off_t cur_pos_in = 0; + int n_hdr = -1; + size_t write_len = 0; + off_t cur_pos_out = 0; + off_t cur_pos_in = 0; /* should not be possible */ Assert(!(backup_version >= 20400 && file->n_headers <= 0)); @@ -1007,7 +971,7 @@ restore_data_file_internal(FILE *in, FILE *out, pgFile *file, uint32 backup_vers * but should never happen in case of blocks from FULL backup. */ if (fio_fseek(out, cur_pos_out) < 0) - elog(ERROR, "Cannot seek block %u of \"%s\": %s", + elog(ERROR, "Cannot seek block %u of \"%s\": %s", blknum, to_fullpath, strerror(errno)); for (;;) @@ -1020,7 +984,7 @@ restore_data_file_internal(FILE *in, FILE *out, pgFile *file, uint32 backup_vers bool is_compressed = false; /* incremental restore vars */ - uint16 page_crc = 0; + uint16 page_crc = 0; XLogRecPtr page_lsn = InvalidXLogRecPtr; /* check for interrupt */ @@ -1072,7 +1036,6 @@ restore_data_file_internal(FILE *in, FILE *out, pgFile *file, uint32 backup_vers * Now we have to deal with backward compatibility. */ read_len = MAXALIGN(compressed_size); - } else break; @@ -1183,8 +1146,7 @@ restore_data_file_internal(FILE *in, FILE *out, pgFile *file, uint32 backup_vers * page_may_be_compressed() function. */ if (compressed_size != BLCKSZ - || page_may_be_compressed(page.data, file->compress_alg, - backup_version)) + || page_may_be_compressed(page.data, file->compress_alg, backup_version)) { is_compressed = true; } @@ -1244,10 +1206,10 @@ restore_data_file_internal(FILE *in, FILE *out, pgFile *file, uint32 backup_vers */ void restore_non_data_file_internal(FILE *in, FILE *out, pgFile *file, - const char *from_fullpath, const char *to_fullpath) + const char *from_fullpath, const char *to_fullpath) { - size_t read_len = 0; - char *buf = pgut_malloc(STDIO_BUFSIZE); /* 64kB buffer */ + size_t read_len = 0; + char *buf = pgut_malloc(STDIO_BUFSIZE); /* 64kB buffer */ /* copy content */ for (;;) @@ -1310,7 +1272,7 @@ restore_non_data_file(parray *parent_chain, pgBackup *dest_backup, tmp_backup = dest_backup->parent_backup_link; while (tmp_backup) { - pgFile **res_file = NULL; + pgFile **res_file = NULL; /* lookup file in intermediate backup */ res_file = parray_bsearch(tmp_backup->files, dest_file, pgFileCompareRelPathWithExternal); @@ -1420,10 +1382,10 @@ backup_non_data_file_internal(const char *from_fullpath, const char *to_fullpath, pgFile *file, bool missing_ok) { - FILE *in = NULL; - FILE *out = NULL; - ssize_t read_len = 0; - char *buf = NULL; + FILE *in = NULL; + FILE *out = NULL; + ssize_t read_len = 0; + char *buf = NULL; INIT_FILE_CRC32(true, file->crc); @@ -1553,7 +1515,7 @@ backup_non_data_file_internal(const char *from_fullpath, */ bool create_empty_file(fio_location from_location, const char *to_root, - fio_location to_location, pgFile *file) + fio_location to_location, pgFile *file) { char to_path[MAXPGPATH]; FILE *out; @@ -1650,7 +1612,7 @@ check_data_file(ConnectionArgs *arguments, pgFile *file, BlockNumber nblocks = 0; int page_state; char curr_page[BLCKSZ]; - bool is_valid = true; + bool is_valid = true; in = fopen(from_fullpath, PG_BINARY_R); if (in == NULL) @@ -1686,7 +1648,7 @@ check_data_file(ConnectionArgs *arguments, pgFile *file, page_state = prepare_page(file, InvalidXLogRecPtr, blknum, in, BACKUP_MODE_FULL, curr_page, false, checksum_version, - 0, NULL, from_fullpath, &page_st); + from_fullpath, &page_st); if (page_state == PageIsTruncated) break; @@ -1744,9 +1706,9 @@ validate_file_pages(pgFile *file, const char *fullpath, XLogRecPtr stop_lsn, while (true) { int rc = 0; - size_t len = 0; + size_t len = 0; DataPage compressed_page; /* used as read buffer */ - int compressed_size = 0; + int compressed_size = 0; DataPage page; BlockNumber blknum = 0; PageState page_st; @@ -1834,7 +1796,7 @@ validate_file_pages(pgFile *file, const char *fullpath, XLogRecPtr stop_lsn, || page_may_be_compressed(compressed_page.data, file->compress_alg, backup_version)) { - int32 uncompressed_size = 0; + int32 uncompressed_size = 0; const char *errormsg = NULL; uncompressed_size = do_decompress(page.data, BLCKSZ, @@ -1862,13 +1824,13 @@ validate_file_pages(pgFile *file, const char *fullpath, XLogRecPtr stop_lsn, } rc = validate_one_page(page.data, - file->segno * RELSEG_SIZE + blknum, - stop_lsn, &page_st, checksum_version); + file->segno * RELSEG_SIZE + blknum, + stop_lsn, &page_st, checksum_version); } else rc = validate_one_page(compressed_page.data, - file->segno * RELSEG_SIZE + blknum, - stop_lsn, &page_st, checksum_version); + file->segno * RELSEG_SIZE + blknum, + stop_lsn, &page_st, checksum_version); switch (rc) { @@ -1986,11 +1948,11 @@ datapagemap_t * get_lsn_map(const char *fullpath, uint32 checksum_version, int n_blocks, XLogRecPtr shift_lsn, BlockNumber segmentno) { - FILE *in = NULL; - BlockNumber blknum = 0; - char read_buffer[BLCKSZ]; - char in_buf[STDIO_BUFSIZE]; - datapagemap_t *lsn_map = NULL; + FILE *in = NULL; + BlockNumber blknum = 0; + char read_buffer[BLCKSZ]; + char in_buf[STDIO_BUFSIZE]; + datapagemap_t *lsn_map = NULL; Assert(shift_lsn > 0); @@ -2069,10 +2031,10 @@ get_page_header(FILE *in, const char *fullpath, BackupPageHeader* bph, else if (read_len != 0 && feof(in)) elog(ERROR, "Odd size page found at offset %lu of \"%s\"", - ftell(in), fullpath); + ftello(in), fullpath); else elog(ERROR, "Cannot read header at offset %lu of \"%s\": %s", - ftell(in), fullpath, strerror(errno)); + ftello(in), fullpath, strerror(errno)); } /* In older versions < 2.4.0, when crc for file was calculated, header was @@ -2117,7 +2079,7 @@ int send_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, XLogRecPtr prev_backup_start_lsn, CompressAlg calg, int clevel, uint32 checksum_version, bool use_pagemap, BackupPageHeader2 **headers, - BackupMode backup_mode, int ptrack_version_num, const char *ptrack_schema) + BackupMode backup_mode) { FILE *in = NULL; FILE *out = NULL; @@ -2175,7 +2137,6 @@ send_pages(const char *to_fullpath, const char *from_fullpath, int rc = prepare_page(file, prev_backup_start_lsn, blknum, in, backup_mode, curr_page, true, checksum_version, - ptrack_version_num, ptrack_schema, from_fullpath, &page_st); if (rc == PageIsTruncated) @@ -2254,17 +2215,19 @@ send_pages(const char *to_fullpath, const char *from_fullpath, return n_blocks_read; } -/* copy local file (взята из send_pages, но используется простое копирование странички, без добавления заголовков и компрессии) */ +/* + * Copy local data file just as send_pages but without attaching additional header and compression + */ int copy_pages(const char *to_fullpath, const char *from_fullpath, - pgFile *file, XLogRecPtr sync_lsn, - uint32 checksum_version, bool use_pagemap, - BackupMode backup_mode, int ptrack_version_num, const char *ptrack_schema) + pgFile *file, XLogRecPtr sync_lsn, + uint32 checksum_version, bool use_pagemap, + BackupMode backup_mode) { FILE *in = NULL; FILE *out = NULL; - char curr_page[BLCKSZ]; - int n_blocks_read = 0; + char curr_page[BLCKSZ]; + int n_blocks_read = 0; BlockNumber blknum = 0; datapagemap_iterator_t *iter = NULL; @@ -2308,44 +2271,36 @@ copy_pages(const char *to_fullpath, const char *from_fullpath, out = fio_fopen(to_fullpath, PG_BINARY_R "+", FIO_BACKUP_HOST); if (out == NULL) elog(ERROR, "Cannot open destination file \"%s\": %s", - to_fullpath, strerror(errno)); + to_fullpath, strerror(errno)); /* update file permission */ - if (fio_chmod(to_fullpath, file->mode, FIO_BACKUP_HOST) == -1) + if (chmod(to_fullpath, file->mode) == -1) elog(ERROR, "Cannot change mode of \"%s\": %s", to_fullpath, - strerror(errno)); + strerror(errno)); - elog(VERBOSE, "ftruncate file \"%s\" to size %lu", - to_fullpath, file->size); - if (fio_ftruncate(out, file->size) == -1) - elog(ERROR, "Cannot ftruncate file \"%s\" to size %lu: %s", - to_fullpath, file->size, strerror(errno)); - - if (!fio_is_remote_file(out)) - { - out_buf = pgut_malloc(STDIO_BUFSIZE); - setvbuf(out, out_buf, _IOFBF, STDIO_BUFSIZE); - } + /* Enable buffering for output file */ + out_buf = pgut_malloc(STDIO_BUFSIZE); + setvbuf(out, out_buf, _IOFBF, STDIO_BUFSIZE); while (blknum < file->n_blocks) { PageState page_st; int rc = prepare_page(file, sync_lsn, - blknum, in, backup_mode, curr_page, - true, checksum_version, - ptrack_version_num, ptrack_schema, - from_fullpath, &page_st); + blknum, in, backup_mode, curr_page, + true, checksum_version, + from_fullpath, &page_st); if (rc == PageIsTruncated) break; else if (rc == PageIsOk) { - if (fio_fseek(out, blknum * BLCKSZ) < 0) - { - elog(ERROR, "Cannot seek block %u of \"%s\": %s", - blknum, to_fullpath, strerror(errno)); - } - copy_page(file, blknum, in, out, curr_page, to_fullpath); + if (fseek(out, blknum * BLCKSZ, SEEK_SET) != 0) + elog(ERROR, "Cannot seek to position %u in destination file \"%s\": %s", + blknum * BLCKSZ, to_fullpath, strerror(errno)); + + if (write_page(file, out, curr_page) != BLCKSZ) + elog(ERROR, "File: \"%s\", cannot write at block %u: %s", + to_fullpath, blknum, strerror(errno)); } n_blocks_read++; @@ -2361,13 +2316,36 @@ copy_pages(const char *to_fullpath, const char *from_fullpath, blknum++; } + /* truncate output file if required */ + if (fseek(out, 0, SEEK_END) != 0) + elog(ERROR, "Cannot seek to end of file position in destination file \"%s\": %s", + to_fullpath, strerror(errno)); + { + size_t pos = ftell(out); + + if (pos < 0) + elog(ERROR, "Cannot get position in destination file \"%s\": %s", + to_fullpath, strerror(errno)); + + if (pos != file->size) + { + if (fflush(out) != 0) + elog(ERROR, "Cannot flush destination file \"%s\": %s", + to_fullpath, strerror(errno)); + + if (ftruncate(fileno(out), file->size) == -1) + elog(ERROR, "Cannot ftruncate file \"%s\" to size %lu: %s", + to_fullpath, file->size, strerror(errno)); + } + } + /* cleanup */ - if (in && fclose(in)) + if (fclose(in)) elog(ERROR, "Cannot close the source file \"%s\": %s", to_fullpath, strerror(errno)); - /* close local output file */ - if (out && fio_fclose(out)) + /* close output file */ + if (fclose(out)) elog(ERROR, "Cannot close the destination file \"%s\": %s", to_fullpath, strerror(errno)); @@ -2503,19 +2481,19 @@ write_page_headers(BackupPageHeader2 *headers, pgFile *file, HeaderMap *hdr_map, /* when running merge we must write headers into temp map */ map_path = (is_merge) ? hdr_map->path_tmp : hdr_map->path; - read_len = (file->n_headers+1) * sizeof(BackupPageHeader2); + read_len = (file->n_headers + 1) * sizeof(BackupPageHeader2); /* calculate checksums */ INIT_FILE_CRC32(true, file->hdr_crc); COMP_FILE_CRC32(true, file->hdr_crc, headers, read_len); FIN_FILE_CRC32(true, file->hdr_crc); - zheaders = pgut_malloc(read_len*2); - memset(zheaders, 0, read_len*2); + zheaders = pgut_malloc(read_len * 2); + memset(zheaders, 0, read_len * 2); /* compress headers */ - z_len = do_compress(zheaders, read_len*2, headers, - read_len, ZLIB_COMPRESS, 1, &errormsg); + z_len = do_compress(zheaders, read_len * 2, headers, + read_len, ZLIB_COMPRESS, 1, &errormsg); /* writing to header map must be serialized */ pthread_lock(&(hdr_map->mutex)); /* what if we crash while trying to obtain mutex? */ @@ -2559,7 +2537,7 @@ write_page_headers(BackupPageHeader2 *headers, pgFile *file, HeaderMap *hdr_map, if (fwrite(zheaders, 1, z_len, hdr_map->fp) != z_len) elog(ERROR, "Cannot write to file \"%s\": %s", map_path, strerror(errno)); - file->hdr_size = z_len; /* save the length of compressed headers */ + file->hdr_size = z_len; /* save the length of compressed headers */ hdr_map->offset += z_len; /* update current offset in map */ /* End critical section */ diff --git a/src/dir.c b/src/dir.c index 473534c8b..bac583b4d 100644 --- a/src/dir.c +++ b/src/dir.c @@ -121,8 +121,6 @@ typedef struct TablespaceCreatedList TablespaceCreatedListCell *tail; } TablespaceCreatedList; -static int pgCompareString(const void *str1, const void *str2); - static char dir_check_file(pgFile *file, bool backup_logs); static void dir_list_file_internal(parray *files, pgFile *parent, const char *parent_dir, @@ -224,6 +222,7 @@ pgFileInit(const char *rel_path) // May be add? // pg_atomic_clear_flag(file->lock); + file->excluded = false; return file; } @@ -426,6 +425,26 @@ pgFileCompareName(const void *f1, const void *f2) return strcmp(f1p->name, f2p->name); } +/* Compare pgFile->name with string in ascending order of ASCII code. */ +int +pgFileCompareNameWithString(const void *f1, const void *f2) +{ + pgFile *f1p = *(pgFile **)f1; + char *f2s = *(char **)f2; + + return strcmp(f1p->name, f2s); +} + +/* Compare pgFile->rel_path with string in ascending order of ASCII code. */ +int +pgFileCompareRelPathWithString(const void *f1, const void *f2) +{ + pgFile *f1p = *(pgFile **)f1; + char *f2s = *(char **)f2; + + return strcmp(f1p->rel_path, f2s); +} + /* * Compare two pgFile with their relative path and external_dir_num in ascending * order of ASСII code. @@ -492,12 +511,26 @@ pgFileCompareSizeDesc(const void *f1, const void *f2) return -1 * pgFileCompareSize(f1, f2); } -static int +int pgCompareString(const void *str1, const void *str2) { return strcmp(*(char **) str1, *(char **) str2); } +/* + * From bsearch(3): "The compar routine is expected to have two argu‐ + * ments which point to the key object and to an array member, in that order" + * But in practice this is opposite, so we took strlen from second string (search key) + * This is checked by tests.catchup.CatchupTest.test_catchup_with_exclude_path + */ +int +pgPrefixCompareString(const void *str1, const void *str2) +{ + const char *s1 = *(char **) str1; + const char *s2 = *(char **) str2; + return strncmp(s1, s2, strlen(s2)); +} + /* Compare two Oids */ int pgCompareOid(const void *f1, const void *f2) diff --git a/src/help.c b/src/help.c index 921feaec0..1515359e4 100644 --- a/src/help.c +++ b/src/help.c @@ -124,7 +124,7 @@ help_pg_probackup(void) printf(_("\n %s backup -B backup-path -b backup-mode --instance=instance_name\n"), PROGRAM_NAME); printf(_(" [-D pgdata-path] [-C]\n")); - printf(_(" [--stream [-S slot-name]] [--temp-slot]\n")); + printf(_(" [--stream [-S slot-name] [--temp-slot]]\n")); printf(_(" [--backup-pg-log] [-j num-threads] [--progress]\n")); printf(_(" [--no-validate] [--skip-block-validation]\n")); printf(_(" [--external-dirs=external-directories-paths]\n")); @@ -251,9 +251,10 @@ help_pg_probackup(void) printf(_("\n %s catchup -b catchup-mode\n"), PROGRAM_NAME); printf(_(" --source-pgdata=path_to_pgdata_on_remote_server\n")); printf(_(" --destination-pgdata=path_to_local_dir\n")); - printf(_(" [--stream [-S slot-name]] [--temp-slot]\n")); + printf(_(" [--stream [-S slot-name] [--temp-slot | --perm-slot]]\n")); printf(_(" [-j num-threads]\n")); printf(_(" [-T OLDDIR=NEWDIR]\n")); + printf(_(" [--exclude-path=path_prefix]\n")); printf(_(" [-d dbname] [-h host] [-p port] [-U username]\n")); printf(_(" [-w --no-password] [-W --password]\n")); printf(_(" [--remote-proto] [--remote-host]\n")); @@ -295,7 +296,7 @@ help_backup(void) { printf(_("\n%s backup -B backup-path -b backup-mode --instance=instance_name\n"), PROGRAM_NAME); printf(_(" [-D pgdata-path] [-C]\n")); - printf(_(" [--stream [-S slot-name] [--temp-slot]\n")); + printf(_(" [--stream [-S slot-name] [--temp-slot]]\n")); printf(_(" [--backup-pg-log] [-j num-threads] [--progress]\n")); printf(_(" [--no-validate] [--skip-block-validation]\n")); printf(_(" [-E external-directories-paths]\n")); @@ -1031,9 +1032,10 @@ help_catchup(void) printf(_("\n%s catchup -b catchup-mode\n"), PROGRAM_NAME); printf(_(" --source-pgdata=path_to_pgdata_on_remote_server\n")); printf(_(" --destination-pgdata=path_to_local_dir\n")); - printf(_(" [--stream [-S slot-name]] [--temp-slot]\n")); + printf(_(" [--stream [-S slot-name]] [--temp-slot | --perm-slot]\n")); printf(_(" [-j num-threads]\n")); printf(_(" [-T OLDDIR=NEWDIR]\n")); + printf(_(" [--exclude-path=path_prefix]\n")); printf(_(" [-d dbname] [-h host] [-p port] [-U username]\n")); printf(_(" [-w --no-password] [-W --password]\n")); printf(_(" [--remote-proto] [--remote-host]\n")); @@ -1045,11 +1047,15 @@ help_catchup(void) printf(_(" --stream stream the transaction log (only supported mode)\n")); printf(_(" -S, --slot=SLOTNAME replication slot to use\n")); printf(_(" --temp-slot use temporary replication slot\n")); + printf(_(" -P --perm-slot create permanent replication slot\n")); printf(_(" -j, --threads=NUM number of parallel threads\n")); printf(_(" -T, --tablespace-mapping=OLDDIR=NEWDIR\n")); printf(_(" relocate the tablespace from directory OLDDIR to NEWDIR\n")); + printf(_(" -x, --exclude-path=path_prefix files with path_prefix (relative to pgdata) will be\n")); + printf(_(" excluded from catchup (can be used multiple times)\n")); + printf(_(" Dangerous option! Use at your own risk!\n")); printf(_("\n Connection options:\n")); printf(_(" -U, --pguser=USERNAME user name to connect as (default: current local user)\n")); diff --git a/src/merge.c b/src/merge.c index cd070fce4..ff39c2510 100644 --- a/src/merge.c +++ b/src/merge.c @@ -1256,7 +1256,7 @@ merge_data_file(parray *parent_chain, pgBackup *full_backup, backup_data_file(tmp_file, to_fullpath_tmp1, to_fullpath_tmp2, InvalidXLogRecPtr, BACKUP_MODE_FULL, dest_backup->compress_alg, dest_backup->compress_level, - dest_backup->checksum_version, 0, NULL, + dest_backup->checksum_version, &(full_backup->hdr_map), true); /* drop restored temp file */ diff --git a/src/pg_probackup.c b/src/pg_probackup.c index 00796be04..d629d838d 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -80,8 +80,9 @@ bool progress = false; bool no_sync = false; #if PG_VERSION_NUM >= 100000 char *replication_slot = NULL; -#endif bool temp_slot = false; +#endif +bool perm_slot = false; /* backup options */ bool backup_logs = false; @@ -118,6 +119,9 @@ bool skip_external_dirs = false; /* array for datnames, provided via db-include and db-exclude */ static parray *datname_exclude_list = NULL; static parray *datname_include_list = NULL; +/* arrays for --exclude-path's */ +static parray *exclude_absolute_paths_list = NULL; +static parray *exclude_relative_paths_list = NULL; /* checkdb options */ bool need_amcheck = false; @@ -176,6 +180,7 @@ static void compress_init(ProbackupSubcmd const subcmd); static void opt_datname_exclude_list(ConfigOption *opt, const char *arg); static void opt_datname_include_list(ConfigOption *opt, const char *arg); +static void opt_exclude_path(ConfigOption *opt, const char *arg); /* * Short name should be non-printable ASCII character. @@ -198,7 +203,10 @@ static ConfigOption cmd_options[] = { 'f', 'b', "backup-mode", opt_backup_mode, SOURCE_CMD_STRICT }, { 'b', 'C', "smooth-checkpoint", &smooth_checkpoint, SOURCE_CMD_STRICT }, { 's', 'S', "slot", &replication_slot, SOURCE_CMD_STRICT }, +#if PG_VERSION_NUM >= 100000 { 'b', 181, "temp-slot", &temp_slot, SOURCE_CMD_STRICT }, +#endif + { 'b', 'P', "perm-slot", &perm_slot, SOURCE_CMD_STRICT }, { 'b', 182, "delete-wal", &delete_wal, SOURCE_CMD_STRICT }, { 'b', 183, "delete-expired", &delete_expired, SOURCE_CMD_STRICT }, { 'b', 184, "merge-expired", &merge_expired, SOURCE_CMD_STRICT }, @@ -207,6 +215,7 @@ static ConfigOption cmd_options[] = /* catchup options */ { 's', 239, "source-pgdata", &catchup_source_pgdata, SOURCE_CMD_STRICT }, { 's', 240, "destination-pgdata", &catchup_destination_pgdata, SOURCE_CMD_STRICT }, + { 'f', 'x', "exclude-path", opt_exclude_path, SOURCE_CMD_STRICT }, /* restore options */ { 's', 136, "recovery-target-time", &target_time, SOURCE_CMD_STRICT }, { 's', 137, "recovery-target-xid", &target_xid, SOURCE_CMD_STRICT }, @@ -787,6 +796,17 @@ main(int argc, char *argv[]) elog(ERROR, "You cannot specify \"--no-validate\" option with the \"%s\" command", get_subcmd_name(backup_subcmd)); +#if PG_VERSION_NUM >= 100000 + if (temp_slot && perm_slot) + elog(ERROR, "You cannot specify \"--perm-slot\" option with the \"--temp-slot\" option"); + + /* if slot name was not provided for temp slot, use default slot name */ + if (!replication_slot && temp_slot) + replication_slot = DEFAULT_TEMP_SLOT_NAME; +#endif + if (!replication_slot && perm_slot) + replication_slot = DEFAULT_PERMANENT_SLOT_NAME; + if (num_threads < 1) num_threads = 1; @@ -825,7 +845,8 @@ main(int argc, char *argv[]) no_validate, no_sync, backup_logs); } case CATCHUP_CMD: - return do_catchup(catchup_source_pgdata, catchup_destination_pgdata, num_threads, !no_sync); + return do_catchup(catchup_source_pgdata, catchup_destination_pgdata, num_threads, !no_sync, + exclude_absolute_paths_list, exclude_relative_paths_list); case RESTORE_CMD: return do_restore_or_validate(instanceState, current.backup_id, recovery_target_options, @@ -990,39 +1011,45 @@ compress_init(ProbackupSubcmd const subcmd) } } -/* Construct array of datnames, provided by user via db-exclude option */ -void -opt_datname_exclude_list(ConfigOption *opt, const char *arg) +static void +opt_parser_add_to_parray_helper(parray **list, const char *str) { - char *dbname = NULL; + char *elem = NULL; - if (!datname_exclude_list) - datname_exclude_list = parray_new(); + if (*list == NULL) + *list = parray_new(); - dbname = pgut_malloc(strlen(arg) + 1); + elem = pgut_malloc(strlen(str) + 1); + strcpy(elem, str); - /* TODO add sanity for database name */ - strcpy(dbname, arg); + parray_append(*list, elem); +} - parray_append(datname_exclude_list, dbname); +/* Construct array of datnames, provided by user via db-exclude option */ +void +opt_datname_exclude_list(ConfigOption *opt, const char *arg) +{ + /* TODO add sanity for database name */ + opt_parser_add_to_parray_helper(&datname_exclude_list, arg); } /* Construct array of datnames, provided by user via db-include option */ void opt_datname_include_list(ConfigOption *opt, const char *arg) { - char *dbname = NULL; - - if (!datname_include_list) - datname_include_list = parray_new(); - - dbname = pgut_malloc(strlen(arg) + 1); - - if (strcmp(dbname, "tempate0") == 0 || - strcmp(dbname, "tempate1") == 0) + if (strcmp(arg, "tempate0") == 0 || + strcmp(arg, "tempate1") == 0) elog(ERROR, "Databases 'template0' and 'template1' cannot be used for partial restore or validation"); - strcpy(dbname, arg); + opt_parser_add_to_parray_helper(&datname_include_list, arg); +} - parray_append(datname_include_list, dbname); +/* Parse --exclude-path option */ +void +opt_exclude_path(ConfigOption *opt, const char *arg) +{ + if (is_absolute_path(arg)) + opt_parser_add_to_parray_helper(&exclude_absolute_paths_list, arg); + else + opt_parser_add_to_parray_helper(&exclude_relative_paths_list, arg); } diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 1cad526dd..19f6feff0 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -86,6 +86,10 @@ extern const char *PROGRAM_EMAIL; #define HEADER_MAP "page_header_map" #define HEADER_MAP_TMP "page_header_map_tmp" +/* default replication slot names */ +#define DEFAULT_TEMP_SLOT_NAME "pg_probackup_slot"; +#define DEFAULT_PERMANENT_SLOT_NAME "pg_probackup_perm_slot"; + /* Timeout defaults */ #define ARCHIVE_TIMEOUT_DEFAULT 300 #define REPLICA_TIMEOUT_DEFAULT 300 @@ -278,6 +282,7 @@ typedef struct pgFile pg_crc32 hdr_crc; /* CRC value of header file: name_hdr */ pg_off_t hdr_off; /* offset in header map */ int hdr_size; /* length of headers */ + bool excluded; /* excluded via --exclude-path option */ } pgFile; typedef struct page_map_entry @@ -771,11 +776,12 @@ extern bool stream_wal; extern bool show_color; extern bool progress; extern bool is_archive_cmd; /* true for archive-{get,push} */ -#if PG_VERSION_NUM >= 100000 /* In pre-10 'replication_slot' is defined in receivelog.h */ extern char *replication_slot; -#endif +#if PG_VERSION_NUM >= 100000 extern bool temp_slot; +#endif +extern bool perm_slot; /* backup options */ extern bool smooth_checkpoint; @@ -842,7 +848,8 @@ extern void process_block_change(ForkNumber forknum, RelFileNode rnode, BlockNumber blkno); /* in catchup.c */ -extern int do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, bool sync_dest_files); +extern int do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, bool sync_dest_files, + parray *exclude_absolute_paths_list, parray *exclude_relative_paths_list); /* in restore.c */ extern int do_restore_or_validate(InstanceState *instanceState, @@ -1057,11 +1064,15 @@ extern pg_crc32 pgFileGetCRCgz(const char *file_path, bool use_crc32c, bool miss extern int pgFileMapComparePath(const void *f1, const void *f2); extern int pgFileCompareName(const void *f1, const void *f2); +extern int pgFileCompareNameWithString(const void *f1, const void *f2); +extern int pgFileCompareRelPathWithString(const void *f1, const void *f2); extern int pgFileCompareRelPathWithExternal(const void *f1, const void *f2); extern int pgFileCompareRelPathWithExternalDesc(const void *f1, const void *f2); extern int pgFileCompareLinked(const void *f1, const void *f2); extern int pgFileCompareSize(const void *f1, const void *f2); extern int pgFileCompareSizeDesc(const void *f1, const void *f2); +extern int pgCompareString(const void *str1, const void *str2); +extern int pgPrefixCompareString(const void *str1, const void *str2); extern int pgCompareOid(const void *f1, const void *f2); extern void pfilearray_clear_locks(parray *file_list); @@ -1071,14 +1082,11 @@ extern bool check_data_file(ConnectionArgs *arguments, pgFile *file, extern void catchup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpath, - XLogRecPtr prev_backup_start_lsn, BackupMode backup_mode, - CompressAlg calg, int clevel, uint32 checksum_version, - int ptrack_version_num, const char *ptrack_schema, - bool is_merge, size_t prev_size); + XLogRecPtr sync_lsn, BackupMode backup_mode, + uint32 checksum_version, size_t prev_size); extern void backup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpath, XLogRecPtr prev_backup_start_lsn, BackupMode backup_mode, CompressAlg calg, int clevel, uint32 checksum_version, - int ptrack_version_num, const char *ptrack_schema, HeaderMap *hdr_map, bool missing_ok); extern void backup_non_data_file(pgFile *file, pgFile *prev_file, const char *from_fullpath, const char *to_fullpath, @@ -1197,11 +1205,11 @@ extern FILE* open_local_file_rw(const char *to_fullpath, char **out_buf, uint32 extern int send_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, XLogRecPtr prev_backup_start_lsn, CompressAlg calg, int clevel, uint32 checksum_version, bool use_pagemap, BackupPageHeader2 **headers, - BackupMode backup_mode, int ptrack_version_num, const char *ptrack_schema); + BackupMode backup_mode); extern int copy_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, XLogRecPtr prev_backup_start_lsn, uint32 checksum_version, bool use_pagemap, - BackupMode backup_mode, int ptrack_version_num, const char *ptrack_schema); + BackupMode backup_mode); /* FIO */ extern void setMyLocation(ProbackupSubcmd const subcmd); @@ -1212,8 +1220,7 @@ extern int fio_send_pages(const char *to_fullpath, const char *from_fullpath, pg BackupPageHeader2 **headers); extern int fio_copy_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, XLogRecPtr horizonLsn, int calg, int clevel, uint32 checksum_version, - bool use_pagemap, BlockNumber *err_blknum, char **errormsg, - BackupPageHeader2 **headers); + bool use_pagemap, BlockNumber *err_blknum, char **errormsg); /* return codes for fio_send_pages */ extern int fio_send_file_gz(const char *from_fullpath, const char *to_fullpath, FILE* out, char **errormsg); extern int fio_send_file(const char *from_fullpath, const char *to_fullpath, FILE* out, @@ -1265,7 +1272,8 @@ datapagemap_print_debug(datapagemap_t *map); extern XLogRecPtr stop_backup_lsn; extern void start_WAL_streaming(PGconn *backup_conn, char *stream_dst_path, ConnectionOptions *conn_opt, - XLogRecPtr startpos, TimeLineID starttli); + XLogRecPtr startpos, TimeLineID starttli, + bool is_backup); extern int wait_WAL_streaming_end(parray *backup_files_list); extern parray* parse_tli_history_buffer(char *history, TimeLineID tli); diff --git a/src/ptrack.c b/src/ptrack.c index 191f988a3..3f395b286 100644 --- a/src/ptrack.c +++ b/src/ptrack.c @@ -123,19 +123,24 @@ pg_is_ptrack_enabled(PGconn *backup_conn, int ptrack_version_num) PGresult *res_db; bool result = false; - if (ptrack_version_num == 200) + if (ptrack_version_num > 200) + { + res_db = pgut_execute(backup_conn, "SHOW ptrack.map_size", 0, NULL); + result = strcmp(PQgetvalue(res_db, 0, 0), "0") != 0 && + strcmp(PQgetvalue(res_db, 0, 0), "-1") != 0; + PQclear(res_db); + } + else if (ptrack_version_num == 200) { res_db = pgut_execute(backup_conn, "SHOW ptrack_map_size", 0, NULL); result = strcmp(PQgetvalue(res_db, 0, 0), "0") != 0; + PQclear(res_db); } else { - res_db = pgut_execute(backup_conn, "SHOW ptrack.map_size", 0, NULL); - result = strcmp(PQgetvalue(res_db, 0, 0), "0") != 0 && - strcmp(PQgetvalue(res_db, 0, 0), "-1") != 0; + result = false; } - PQclear(res_db); return result; } diff --git a/src/stream.c b/src/stream.c index 5912ff44b..570108cde 100644 --- a/src/stream.c +++ b/src/stream.c @@ -59,6 +59,7 @@ static pthread_t stream_thread; static StreamThreadArg stream_thread_arg = {"", NULL, 1}; static parray *xlog_files_list = NULL; +static bool do_crc = true; static void IdentifySystem(StreamThreadArg *stream_thread_arg); static int checkpoint_timeout(PGconn *backup_conn); @@ -159,6 +160,56 @@ checkpoint_timeout(PGconn *backup_conn) return val_int; } +/* + * CreateReplicationSlot_compat() -- wrapper for CreateReplicationSlot() used in StreamLog() + * src/bin/pg_basebackup/streamutil.c + * CreateReplicationSlot() has different signatures on different PG versions: + * PG 15 + * bool + * CreateReplicationSlot(PGconn *conn, const char *slot_name, const char *plugin, + * bool is_temporary, bool is_physical, bool reserve_wal, + * bool slot_exists_ok, bool two_phase) + * PG 11-14 + * bool + * CreateReplicationSlot(PGconn *conn, const char *slot_name, const char *plugin, + * bool is_temporary, bool is_physical, bool reserve_wal, + * bool slot_exists_ok) + * PG 9.5-10 + * CreateReplicationSlot(PGconn *conn, const char *slot_name, const char *plugin, + * bool is_physical, bool slot_exists_ok) + * NOTE: PG 9.6 and 10 support reserve_wal in + * pg_catalog.pg_create_physical_replication_slot(slot_name name [, immediately_reserve boolean]) + * and + * CREATE_REPLICATION_SLOT slot_name { PHYSICAL [ RESERVE_WAL ] | LOGICAL output_plugin } + * replication protocol command, but CreateReplicationSlot() C function doesn't + */ +static bool +CreateReplicationSlot_compat(PGconn *conn, const char *slot_name, const char *plugin, + bool is_temporary, bool is_physical, + bool slot_exists_ok) +{ +#if PG_VERSION_NUM >= 150000 + return CreateReplicationSlot(conn, slot_name, plugin, is_temporary, is_physical, + /* reserve_wal = */ true, slot_exists_ok, /* two_phase = */ false); +#elif PG_VERSION_NUM >= 110000 + return CreateReplicationSlot(conn, slot_name, plugin, is_temporary, is_physical, + /* reserve_wal = */ true, slot_exists_ok); +#elif PG_VERSION_NUM >= 100000 + /* + * PG-10 doesn't support creating temp_slot by calling CreateReplicationSlot(), but + * it will be created by setting StreamCtl.temp_slot later in StreamLog() + */ + if (!is_temporary) + return CreateReplicationSlot(conn, slot_name, plugin, /*is_temporary,*/ is_physical, /*reserve_wal,*/ slot_exists_ok); + else + return true; +#else + /* these parameters not supported in PG < 10 */ + Assert(!is_temporary); + return CreateReplicationSlot(conn, slot_name, plugin, /*is_temporary,*/ is_physical, /*reserve_wal,*/ slot_exists_ok); +#endif +} + /* * Start the log streaming */ @@ -177,31 +228,36 @@ StreamLog(void *arg) /* Initialize timeout */ stream_stop_begin = 0; + /* Create repslot */ #if PG_VERSION_NUM >= 100000 - /* if slot name was not provided for temp slot, use default slot name */ - if (!replication_slot && temp_slot) - replication_slot = "pg_probackup_slot"; -#endif - - -#if PG_VERSION_NUM >= 150000 - /* Create temp repslot */ - if (temp_slot) - CreateReplicationSlot(stream_arg->conn, replication_slot, - NULL, temp_slot, true, true, false, false); -#elif PG_VERSION_NUM >= 110000 - /* Create temp repslot */ - if (temp_slot) - CreateReplicationSlot(stream_arg->conn, replication_slot, - NULL, temp_slot, true, true, false); + if (temp_slot || perm_slot) + if (!CreateReplicationSlot_compat(stream_arg->conn, replication_slot, NULL, temp_slot, true, false)) +#else + if (perm_slot) + if (!CreateReplicationSlot_compat(stream_arg->conn, replication_slot, NULL, false, true, false)) #endif + { + interrupted = true; + elog(ERROR, "Couldn't create physical replication slot %s", replication_slot); + } /* * Start the replication */ - elog(LOG, "started streaming WAL at %X/%X (timeline %u)", - (uint32) (stream_arg->startpos >> 32), (uint32) stream_arg->startpos, - stream_arg->starttli); + if (replication_slot) + elog(LOG, "started streaming WAL at %X/%X (timeline %u) using%s slot %s", + (uint32) (stream_arg->startpos >> 32), (uint32) stream_arg->startpos, + stream_arg->starttli, +#if PG_VERSION_NUM >= 100000 + temp_slot ? " temporary" : "", +#else + "", +#endif + replication_slot); + else + elog(LOG, "started streaming WAL at %X/%X (timeline %u)", + (uint32) (stream_arg->startpos >> 32), (uint32) stream_arg->startpos, + stream_arg->starttli); #if PG_VERSION_NUM >= 90600 { @@ -212,6 +268,11 @@ StreamLog(void *arg) ctl.startpos = stream_arg->startpos; ctl.timeline = stream_arg->starttli; ctl.sysidentifier = NULL; + ctl.stream_stop = stop_streaming; + ctl.standby_message_timeout = standby_message_timeout; + ctl.partial_suffix = NULL; + ctl.synchronous = false; + ctl.mark_done = false; #if PG_VERSION_NUM >= 100000 ctl.walmethod = CreateWalDirectoryMethod( @@ -224,19 +285,14 @@ StreamLog(void *arg) ctl.do_sync = false; /* We sync all files at the end of backup */ // ctl.mark_done /* for future use in s3 */ #if PG_VERSION_NUM >= 100000 && PG_VERSION_NUM < 110000 + /* StreamCtl.temp_slot used only for PG-10, in PG>10, temp_slots are created by calling CreateReplicationSlot() */ ctl.temp_slot = temp_slot; -#endif -#else +#endif /* PG_VERSION_NUM >= 100000 && PG_VERSION_NUM < 110000 */ +#else /* PG_VERSION_NUM < 100000 */ ctl.basedir = (char *) stream_arg->basedir; -#endif - - ctl.stream_stop = stop_streaming; - ctl.standby_message_timeout = standby_message_timeout; - ctl.partial_suffix = NULL; - ctl.synchronous = false; - ctl.mark_done = false; +#endif /* PG_VERSION_NUM >= 100000 */ - if(ReceiveXlogStream(stream_arg->conn, &ctl) == false) + if (ReceiveXlogStream(stream_arg->conn, &ctl) == false) { interrupted = true; elog(ERROR, "Problem in receivexlog"); @@ -244,38 +300,42 @@ StreamLog(void *arg) #if PG_VERSION_NUM >= 100000 if (!ctl.walmethod->finish()) + { + interrupted = true; elog(ERROR, "Could not finish writing WAL files: %s", strerror(errno)); -#endif + } +#endif /* PG_VERSION_NUM >= 100000 */ } -#else - if(ReceiveXlogStream(stream_arg->conn, stream_arg->startpos, stream_arg->starttli, +#else /* PG_VERSION_NUM < 90600 */ + /* PG-9.5 */ + if (ReceiveXlogStream(stream_arg->conn, stream_arg->startpos, stream_arg->starttli, NULL, (char *) stream_arg->basedir, stop_streaming, standby_message_timeout, NULL, false, false) == false) { interrupted = true; elog(ERROR, "Problem in receivexlog"); } -#endif +#endif /* PG_VERSION_NUM >= 90600 */ - /* be paranoid and sort xlog_files_list, - * so if stop_lsn segno is already in the list, - * then list must be sorted to detect duplicates. - */ - parray_qsort(xlog_files_list, pgFileCompareRelPathWithExternal); + /* be paranoid and sort xlog_files_list, + * so if stop_lsn segno is already in the list, + * then list must be sorted to detect duplicates. + */ + parray_qsort(xlog_files_list, pgFileCompareRelPathWithExternal); - /* Add the last segment to the list */ - add_walsegment_to_filelist(xlog_files_list, stream_arg->starttli, + /* Add the last segment to the list */ + add_walsegment_to_filelist(xlog_files_list, stream_arg->starttli, stop_stream_lsn, (char *) stream_arg->basedir, instance_config.xlog_seg_size); - /* append history file to walsegment filelist */ - add_history_file_to_filelist(xlog_files_list, stream_arg->starttli, (char *) stream_arg->basedir); + /* append history file to walsegment filelist */ + add_history_file_to_filelist(xlog_files_list, stream_arg->starttli, (char *) stream_arg->basedir); - /* - * TODO: remove redundant WAL segments - * walk pg_wal and remove files with segno greater that of stop_lsn`s segno +1 - */ + /* + * TODO: remove redundant WAL segments + * walk pg_wal and remove files with segno greater that of stop_lsn`s segno +1 + */ elog(LOG, "finished streaming WAL at %X/%X (timeline %u)", (uint32) (stop_stream_lsn >> 32), (uint32) stop_stream_lsn, stream_arg->starttli); @@ -569,8 +629,10 @@ parse_tli_history_buffer(char *history, TimeLineID tli) */ void start_WAL_streaming(PGconn *backup_conn, char *stream_dst_path, ConnectionOptions *conn_opt, - XLogRecPtr startpos, TimeLineID starttli) + XLogRecPtr startpos, TimeLineID starttli, bool is_backup) { + /* calculate crc only when running backup, catchup has no need for it */ + do_crc = is_backup; /* How long we should wait for streaming end after pg_stop_backup */ stream_stop_timeout = checkpoint_timeout(backup_conn); //TODO Add a comment about this calculation @@ -654,15 +716,16 @@ add_walsegment_to_filelist(parray *filelist, uint32 timeline, XLogRecPtr xlogpos if (existing_file) { - (*existing_file)->crc = pgFileGetCRC(wal_segment_fullpath, true, false); + if (do_crc) + (*existing_file)->crc = pgFileGetCRC(wal_segment_fullpath, true, false); (*existing_file)->write_size = xlog_seg_size; (*existing_file)->uncompressed_size = xlog_seg_size; return; } - /* calculate crc */ - file->crc = pgFileGetCRC(wal_segment_fullpath, true, false); + if (do_crc) + file->crc = pgFileGetCRC(wal_segment_fullpath, true, false); /* Should we recheck it using stat? */ file->write_size = xlog_seg_size; @@ -692,7 +755,8 @@ add_history_file_to_filelist(parray *filelist, uint32 timeline, char *basedir) file = pgFileNew(fullpath, relpath, false, 0, FIO_BACKUP_HOST); /* calculate crc */ - file->crc = pgFileGetCRC(fullpath, true, false); + if (do_crc) + file->crc = pgFileGetCRC(fullpath, true, false); file->write_size = file->size; file->uncompressed_size = file->size; diff --git a/src/utils/configuration.h b/src/utils/configuration.h index 3a5de4b83..2c6ea3eec 100644 --- a/src/utils/configuration.h +++ b/src/utils/configuration.h @@ -61,14 +61,14 @@ typedef char *(*option_get_fn) (ConfigOption *opt); /* * type: - * b: bool (true) - * B: bool (false) + * b: bool (true) + * B: bool (false) * f: option_fn - * i: 32bit signed integer - * u: 32bit unsigned integer - * I: 64bit signed integer - * U: 64bit unsigned integer - * s: string + * i: 32bit signed integer + * u: 32bit unsigned integer + * I: 64bit signed integer + * U: 64bit unsigned integer + * s: string * t: time_t */ struct ConfigOption diff --git a/src/utils/file.c b/src/utils/file.c index b808d6293..f86e605cb 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -1963,8 +1963,7 @@ fio_send_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, int fio_copy_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, XLogRecPtr horizonLsn, int calg, int clevel, uint32 checksum_version, - bool use_pagemap, BlockNumber* err_blknum, char **errormsg, - BackupPageHeader2 **headers) + bool use_pagemap, BlockNumber* err_blknum, char **errormsg) { FILE *out = NULL; char *out_buf = NULL; @@ -2092,9 +2091,9 @@ fio_copy_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, /* receive headers if any */ if (hdr.size > 0) { - *headers = pgut_malloc(hdr.size); - IO_CHECK(fio_read_all(fio_stdin, *headers, hdr.size), hdr.size); - file->n_headers = (hdr.size / sizeof(BackupPageHeader2)) -1; + char *tmp = pgut_malloc(hdr.size); + IO_CHECK(fio_read_all(fio_stdin, tmp, hdr.size), hdr.size); + pg_free(tmp); } break; diff --git a/tests/backup.py b/tests/backup.py index 0bfd0c1b9..558c62de3 100644 --- a/tests/backup.py +++ b/tests/backup.py @@ -1421,8 +1421,10 @@ def test_basic_temp_slot_for_stream_backup(self): base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], - pg_options={ - 'max_wal_size': '40MB', 'default_transaction_read_only': 'on'}) + pg_options={'max_wal_size': '40MB'}) + + if self.get_version(node) < self.version_to_num('10.0'): + return unittest.skip('You need PostgreSQL >= 10 for this test') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -1434,11 +1436,6 @@ def test_basic_temp_slot_for_stream_backup(self): backup_dir, 'node', node, options=['--stream', '--temp-slot']) - if self.get_version(node) < self.version_to_num('10.0'): - return unittest.skip('You need PostgreSQL >= 10 for this test') - else: - pg_receivexlog_path = self.get_bin_path('pg_receivewal') - # FULL backup self.backup_node( backup_dir, 'node', node, @@ -3274,7 +3271,7 @@ def test_basic_backup_default_transaction_read_only(self): # FULL backup self.backup_node( backup_dir, 'node', node, - options=['--stream', '--temp-slot']) + options=['--stream']) # DELTA backup self.backup_node( diff --git a/tests/catchup.py b/tests/catchup.py index 5df538e42..45d999629 100644 --- a/tests/catchup.py +++ b/tests/catchup.py @@ -1,4 +1,5 @@ import os +from pathlib import Path import signal import unittest from .helpers.ptrack_helpers import ProbackupTest, ProbackupException @@ -55,6 +56,7 @@ def test_basic_full_catchup(self): # Cleanup dst_pg.stop() + #self.assertEqual(1, 0, 'Stop test') self.del_test_dir(module_name, self.fname) def test_full_catchup_with_tablespace(self): @@ -180,6 +182,7 @@ def test_basic_delta_catchup(self): # Cleanup dst_pg.stop() + #self.assertEqual(1, 0, 'Stop test') self.del_test_dir(module_name, self.fname) def test_basic_ptrack_catchup(self): @@ -252,6 +255,7 @@ def test_basic_ptrack_catchup(self): # Cleanup dst_pg.stop() + #self.assertEqual(1, 0, 'Stop test') self.del_test_dir(module_name, self.fname) def test_tli_delta_catchup(self): @@ -776,69 +780,6 @@ def test_same_db_id(self): src_pg.stop() self.del_test_dir(module_name, self.fname) - def test_destination_dbstate(self): - """ - Test that we detect that destination pg is not cleanly shutdowned - """ - # preparation 1: source - src_pg = self.make_simple_node( - base_dir = os.path.join(module_name, self.fname, 'src'), - set_replication = True, - pg_options = { 'wal_log_hints': 'on' } - ) - src_pg.slow_start() - - # preparation 2: destination - dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) - self.catchup_node( - backup_mode = 'FULL', - source_pgdata = src_pg.data_dir, - destination_node = dst_pg, - options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream'] - ) - - # try #1 - try: - self.catchup_node( - backup_mode = 'DELTA', - source_pgdata = src_pg.data_dir, - destination_node = dst_pg, - options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream'] - ) - self.assertEqual(1, 0, "Expecting Error because destination pg is not cleanly shutdowned.\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Destination directory contains "backup_label" file', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) - - # try #2 - dst_options = {} - dst_options['port'] = str(dst_pg.port) - self.set_auto_conf(dst_pg, dst_options) - dst_pg.slow_start() - self.assertNotEqual(dst_pg.pid, 0, "Cannot detect pid of running postgres") - os.kill(dst_pg.pid, signal.SIGKILL) - try: - self.catchup_node( - backup_mode = 'DELTA', - source_pgdata = src_pg.data_dir, - destination_node = dst_pg, - options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream'] - ) - self.assertEqual(1, 0, "Expecting Error because destination pg is not cleanly shutdowned.\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'must be stopped cleanly', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) - - # Cleanup - src_pg.stop() - self.del_test_dir(module_name, self.fname) - def test_tli_destination_mismatch(self): """ Test that we detect TLI mismatch in destination @@ -975,3 +916,517 @@ def test_tli_source_mismatch(self): src_pg.stop() fake_src_pg.stop() self.del_test_dir(module_name, self.fname) + +######################################### +# Test unclean destination +######################################### + def test_unclean_delta_catchup(self): + """ + Test that we correctly recover uncleanly shutdowned destination + """ + # preparation 1: source + src_pg = self.make_simple_node( + base_dir = os.path.join(module_name, self.fname, 'src'), + set_replication = True, + pg_options = { 'wal_log_hints': 'on' } + ) + src_pg.slow_start() + src_pg.safe_psql( + "postgres", + "CREATE TABLE ultimate_question(answer int)") + + # preparation 2: destination + dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + self.catchup_node( + backup_mode = 'FULL', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream'] + ) + + # try #1 + try: + self.catchup_node( + backup_mode = 'DELTA', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream'] + ) + self.assertEqual(1, 0, "Expecting Error because destination pg is not cleanly shutdowned.\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: Destination directory contains "backup_label" file', + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) + + # try #2 + dst_options = {} + dst_options['port'] = str(dst_pg.port) + self.set_auto_conf(dst_pg, dst_options) + dst_pg.slow_start() + self.assertNotEqual(dst_pg.pid, 0, "Cannot detect pid of running postgres") + os.kill(dst_pg.pid, signal.SIGKILL) + + # preparation 3: make changes on master (source) + src_pg.pgbench_init(scale = 10) + pgbench = src_pg.pgbench(options=['-T', '10', '--no-vacuum']) + pgbench.wait() + src_pg.safe_psql("postgres", "INSERT INTO ultimate_question VALUES(42)") + src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + + # do delta catchup + self.catchup_node( + backup_mode = 'DELTA', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream'] + ) + + # 1st check: compare data directories + self.compare_pgdata( + self.pgdata_content(src_pg.data_dir), + self.pgdata_content(dst_pg.data_dir) + ) + + # run&recover catchup'ed instance + src_pg.stop() + self.set_replica(master = src_pg, replica = dst_pg) + dst_options = {} + dst_options['port'] = str(dst_pg.port) + self.set_auto_conf(dst_pg, dst_options) + dst_pg.slow_start(replica = True) + + # 2nd check: run verification query + dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy') + + # Cleanup + dst_pg.stop() + self.del_test_dir(module_name, self.fname) + + def test_unclean_ptrack_catchup(self): + """ + Test that we correctly recover uncleanly shutdowned destination + """ + if not self.ptrack: + return unittest.skip('Skipped because ptrack support is disabled') + + # preparation 1: source + src_pg = self.make_simple_node( + base_dir = os.path.join(module_name, self.fname, 'src'), + set_replication = True, + ptrack_enable = True, + pg_options = { 'wal_log_hints': 'on' } + ) + src_pg.slow_start() + src_pg.safe_psql("postgres", "CREATE EXTENSION ptrack") + src_pg.safe_psql( + "postgres", + "CREATE TABLE ultimate_question(answer int)") + + # preparation 2: destination + dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + self.catchup_node( + backup_mode = 'FULL', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream'] + ) + + # try #1 + try: + self.catchup_node( + backup_mode = 'PTRACK', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream'] + ) + self.assertEqual(1, 0, "Expecting Error because destination pg is not cleanly shutdowned.\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: Destination directory contains "backup_label" file', + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) + + # try #2 + dst_options = {} + dst_options['port'] = str(dst_pg.port) + self.set_auto_conf(dst_pg, dst_options) + dst_pg.slow_start() + self.assertNotEqual(dst_pg.pid, 0, "Cannot detect pid of running postgres") + os.kill(dst_pg.pid, signal.SIGKILL) + + # preparation 3: make changes on master (source) + src_pg.pgbench_init(scale = 10) + pgbench = src_pg.pgbench(options=['-T', '10', '--no-vacuum']) + pgbench.wait() + src_pg.safe_psql("postgres", "INSERT INTO ultimate_question VALUES(42)") + src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + + # do delta catchup + self.catchup_node( + backup_mode = 'PTRACK', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream'] + ) + + # 1st check: compare data directories + self.compare_pgdata( + self.pgdata_content(src_pg.data_dir), + self.pgdata_content(dst_pg.data_dir) + ) + + # run&recover catchup'ed instance + src_pg.stop() + self.set_replica(master = src_pg, replica = dst_pg) + dst_options = {} + dst_options['port'] = str(dst_pg.port) + self.set_auto_conf(dst_pg, dst_options) + dst_pg.slow_start(replica = True) + + # 2nd check: run verification query + dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy') + + # Cleanup + dst_pg.stop() + self.del_test_dir(module_name, self.fname) + +######################################### +# Test replication slot logic +# +# -S, --slot=SLOTNAME replication slot to use +# --temp-slot use temporary replication slot +# -P --perm-slot create permanent replication slot +# --primary-slot-name=SLOTNAME value for primary_slot_name parameter +# +# 1. if "--slot" is used - try to use already existing slot with given name +# 2. if "--slot" and "--perm-slot" used - try to create permanent slot and use it. +# 3. If "--perm-slot " flag is used without "--slot" option - use generic slot name like "pg_probackup_perm_slot" +# 4. If "--perm-slot " flag is used and permanent slot already exists - fail with error. +# 5. "--perm-slot" and "--temp-slot" flags cannot be used together. +######################################### + def test_catchup_with_replication_slot(self): + """ + """ + # preparation + src_pg = self.make_simple_node( + base_dir = os.path.join(module_name, self.fname, 'src'), + set_replication = True + ) + src_pg.slow_start() + + # 1a. --slot option + dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst_1a')) + try: + self.catchup_node( + backup_mode = 'FULL', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = [ + '-d', 'postgres', '-p', str(src_pg.port), '--stream', + '--slot=nonexistentslot_1a' + ] + ) + self.assertEqual(1, 0, "Expecting Error because replication slot does not exist.\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: replication slot "nonexistentslot_1a" does not exist', + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) + + # 1b. --slot option + dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst_1b')) + src_pg.safe_psql("postgres", "SELECT pg_catalog.pg_create_physical_replication_slot('existentslot_1b')") + self.catchup_node( + backup_mode = 'FULL', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = [ + '-d', 'postgres', '-p', str(src_pg.port), '--stream', + '--slot=existentslot_1b' + ] + ) + + # 2a. --slot --perm-slot + dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst_2a')) + self.catchup_node( + backup_mode = 'FULL', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = [ + '-d', 'postgres', '-p', str(src_pg.port), '--stream', + '--slot=nonexistentslot_2a', + '--perm-slot' + ] + ) + + # 2b. and 4. --slot --perm-slot + dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst_2b')) + src_pg.safe_psql("postgres", "SELECT pg_catalog.pg_create_physical_replication_slot('existentslot_2b')") + try: + self.catchup_node( + backup_mode = 'FULL', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = [ + '-d', 'postgres', '-p', str(src_pg.port), '--stream', + '--slot=existentslot_2b', + '--perm-slot' + ] + ) + self.assertEqual(1, 0, "Expecting Error because replication slot already exist.\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: replication slot "existentslot_2b" already exists', + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) + + # 3. --perm-slot --slot + dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst_3')) + self.catchup_node( + backup_mode = 'FULL', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = [ + '-d', 'postgres', '-p', str(src_pg.port), '--stream', + '--perm-slot' + ] + ) + slot_name = src_pg.safe_psql( + "postgres", + "SELECT slot_name FROM pg_catalog.pg_replication_slots " + "WHERE slot_name NOT LIKE '%existentslot%' " + "AND slot_type = 'physical'" + ).decode('utf-8').rstrip() + self.assertEqual(slot_name, 'pg_probackup_perm_slot', 'Slot name mismatch') + + # 5. --perm-slot --temp-slot (PG>=10) + if self.get_version(src_pg) >= self.version_to_num('10.0'): + dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst_5')) + try: + self.catchup_node( + backup_mode = 'FULL', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = [ + '-d', 'postgres', '-p', str(src_pg.port), '--stream', + '--perm-slot', + '--temp-slot' + ] + ) + self.assertEqual(1, 0, "Expecting Error because conflicting options --perm-slot and --temp-slot used together\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: You cannot specify "--perm-slot" option with the "--temp-slot" option', + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) + + #self.assertEqual(1, 0, 'Stop test') + self.del_test_dir(module_name, self.fname) + +######################################### +# --exclude-path +######################################### + def test_catchup_with_exclude_path(self): + """ + various syntetic tests for --exclude-path option + """ + # preparation + src_pg = self.make_simple_node( + base_dir = os.path.join(module_name, self.fname, 'src'), + set_replication = True + ) + src_pg.slow_start() + + # test 1 + os.mkdir(os.path.join(src_pg.data_dir, 'src_usefull_dir')) + with open(os.path.join(os.path.join(src_pg.data_dir, 'src_usefull_dir', 'src_garbage_file')), 'w') as f: + f.write('garbage') + f.flush() + f.close + os.mkdir(os.path.join(src_pg.data_dir, 'src_garbage_dir')) + with open(os.path.join(os.path.join(src_pg.data_dir, 'src_garbage_dir', 'src_garbage_file')), 'w') as f: + f.write('garbage') + f.flush() + f.close + + dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + self.catchup_node( + backup_mode = 'FULL', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = [ + '-d', 'postgres', '-p', str(src_pg.port), '--stream', + '--exclude-path={0}'.format(os.path.join(src_pg.data_dir, 'src_usefull_dir', 'src_garbage_file')), + '-x', '{0}'.format(os.path.join(src_pg.data_dir, 'src_garbage_dir')), + ] + ) + + self.assertTrue(Path(os.path.join(dst_pg.data_dir, 'src_usefull_dir')).exists()) + self.assertFalse(Path(os.path.join(dst_pg.data_dir, 'src_usefull_dir', 'src_garbage_file')).exists()) + self.assertFalse(Path(os.path.join(dst_pg.data_dir, 'src_garbage_dir')).exists()) + + self.set_replica(src_pg, dst_pg) + dst_options = {} + dst_options['port'] = str(dst_pg.port) + self.set_auto_conf(dst_pg, dst_options) + dst_pg.slow_start(replica = True) + dst_pg.stop() + + # test 2 + os.mkdir(os.path.join(dst_pg.data_dir, 'dst_garbage_dir')) + os.mkdir(os.path.join(dst_pg.data_dir, 'dst_usefull_dir')) + with open(os.path.join(os.path.join(dst_pg.data_dir, 'dst_usefull_dir', 'dst_usefull_file')), 'w') as f: + f.write('gems') + f.flush() + f.close + + self.catchup_node( + backup_mode = 'DELTA', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = [ + '-d', 'postgres', '-p', str(src_pg.port), '--stream', + '--exclude-path=src_usefull_dir/src_garbage_file', + '--exclude-path=src_garbage_dir', + '--exclude-path={0}'.format(os.path.join(dst_pg.data_dir, 'dst_usefull_dir')), + ] + ) + + self.assertTrue(Path(os.path.join(dst_pg.data_dir, 'src_usefull_dir')).exists()) + self.assertFalse(Path(os.path.join(dst_pg.data_dir, 'src_usefull_dir', 'src_garbage_file')).exists()) + self.assertFalse(Path(os.path.join(dst_pg.data_dir, 'src_garbage_dir')).exists()) + self.assertFalse(Path(os.path.join(dst_pg.data_dir, 'dst_garbage_dir')).exists()) + self.assertTrue(Path(os.path.join(dst_pg.data_dir, 'dst_usefull_dir', 'dst_usefull_file')).exists()) + + #self.assertEqual(1, 0, 'Stop test') + src_pg.stop() + self.del_test_dir(module_name, self.fname) + + def test_config_exclusion(self): + """ + Test that catchup can preserve dest replication config + """ + # preparation 1: source + src_pg = self.make_simple_node( + base_dir = os.path.join(module_name, self.fname, 'src'), + set_replication = True, + pg_options = { 'wal_log_hints': 'on' } + ) + src_pg.slow_start() + src_pg.safe_psql( + "postgres", + "CREATE TABLE ultimate_question(answer int)") + + # preparation 2: make lagging behind replica + dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + self.catchup_node( + backup_mode = 'FULL', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream'] + ) + self.set_replica(src_pg, dst_pg) + dst_options = {} + dst_options['port'] = str(dst_pg.port) + self.set_auto_conf(dst_pg, dst_options) + dst_pg.slow_start(replica = True) + dst_pg.stop() + + # preparation 3: make changes on master (source) + src_pg.pgbench_init(scale = 10) + pgbench = src_pg.pgbench(options=['-T', '2', '--no-vacuum']) + pgbench.wait() + + # test 1: do delta catchup with relative exclusion paths + self.catchup_node( + backup_mode = 'DELTA', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = [ + '-d', 'postgres', '-p', str(src_pg.port), '--stream', + '--exclude-path=postgresql.conf', + '--exclude-path=postgresql.auto.conf', + '--exclude-path=recovery.conf', + ] + ) + + # run&recover catchup'ed instance + # don't set destination db port and recover options + dst_pg.slow_start(replica = True) + + # check: run verification query + src_pg.safe_psql("postgres", "INSERT INTO ultimate_question VALUES(42)") + src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy') + + # preparation 4: make changes on master (source) + dst_pg.stop() + #src_pg.pgbench_init(scale = 10) + pgbench = src_pg.pgbench(options=['-T', '2', '--no-vacuum']) + pgbench.wait() + + # test 2: do delta catchup with absolute source exclusion paths + self.catchup_node( + backup_mode = 'DELTA', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = [ + '-d', 'postgres', '-p', str(src_pg.port), '--stream', + '--exclude-path={0}/postgresql.conf'.format(src_pg.data_dir), + '--exclude-path={0}/postgresql.auto.conf'.format(src_pg.data_dir), + '--exclude-path={0}/recovery.conf'.format(src_pg.data_dir), + ] + ) + + # run&recover catchup'ed instance + # don't set destination db port and recover options + dst_pg.slow_start(replica = True) + + # check: run verification query + src_pg.safe_psql("postgres", "INSERT INTO ultimate_question VALUES(2*42)") + src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy') + + # preparation 5: make changes on master (source) + dst_pg.stop() + pgbench = src_pg.pgbench(options=['-T', '2', '--no-vacuum']) + pgbench.wait() + + # test 3: do delta catchup with absolute destination exclusion paths + self.catchup_node( + backup_mode = 'DELTA', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = [ + '-d', 'postgres', '-p', str(src_pg.port), '--stream', + '--exclude-path={0}/postgresql.conf'.format(dst_pg.data_dir), + '--exclude-path={0}/postgresql.auto.conf'.format(dst_pg.data_dir), + '--exclude-path={0}/recovery.conf'.format(dst_pg.data_dir), + ] + ) + + # run&recover catchup'ed instance + # don't set destination db port and recover options + dst_pg.slow_start(replica = True) + + # check: run verification query + src_pg.safe_psql("postgres", "INSERT INTO ultimate_question VALUES(3*42)") + src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy') + + # Cleanup + src_pg.stop() + dst_pg.stop() + #self.assertEqual(1, 0, 'Stop test') + self.del_test_dir(module_name, self.fname) diff --git a/travis/Dockerfile.in b/travis/Dockerfile.in index 3e451e24f..e6bbedb61 100644 --- a/travis/Dockerfile.in +++ b/travis/Dockerfile.in @@ -10,6 +10,7 @@ RUN python3 -m pip install virtualenv # Environment ENV PG_MAJOR=${PG_VERSION} PG_BRANCH=${PG_BRANCH} +ENV PTRACK_PATCH_PG_VERSION=${PTRACK_PATCH_PG_VERSION} ENV PGPROBACKUP_GDB=${PGPROBACKUP_GDB} ENV LANG=C.UTF-8 PGHOME=/pg/testdir/pgbin diff --git a/travis/make_dockerfile.sh b/travis/make_dockerfile.sh index 2e8ccd5a3..fc2742cdb 100755 --- a/travis/make_dockerfile.sh +++ b/travis/make_dockerfile.sh @@ -14,6 +14,10 @@ if [ -z ${MODE+x} ]; then MODE=basic fi +if [ -z ${PTRACK_PATCH_PG_VERSION+x} ]; then + PTRACK_PATCH_PG_VERSION=off +fi + if [ -z ${PGPROBACKUP_GDB+x} ]; then PGPROBACKUP_GDB=ON fi @@ -21,11 +25,13 @@ fi echo PG_VERSION=${PG_VERSION} echo PG_BRANCH=${PG_BRANCH} echo MODE=${MODE} +echo PTRACK_PATCH_PG_VERSION=${PTRACK_PATCH_PG_VERSION} echo PGPROBACKUP_GDB=${PGPROBACKUP_GDB} sed \ -e 's/${PG_VERSION}/'${PG_VERSION}/g \ -e 's/${PG_BRANCH}/'${PG_BRANCH}/g \ -e 's/${MODE}/'${MODE}/g \ + -e 's/${PTRACK_PATCH_PG_VERSION}/'${PTRACK_PATCH_PG_VERSION}/g \ -e 's/${PGPROBACKUP_GDB}/'${PGPROBACKUP_GDB}/g \ Dockerfile.in > Dockerfile diff --git a/travis/run_tests.sh b/travis/run_tests.sh index 488d8ee45..4a64fed80 100755 --- a/travis/run_tests.sh +++ b/travis/run_tests.sh @@ -32,9 +32,21 @@ PG_SRC=$PWD/postgres echo "############### Getting Postgres sources:" git clone https://github.com/postgres/postgres.git -b $PG_BRANCH --depth=1 +# Clone ptrack +if [ "$PTRACK_PATCH_PG_VERSION" != "off" ]; then + git clone https://github.com/postgrespro/ptrack.git -b master --depth=1 + export PG_PROBACKUP_PTRACK=on +else + export PG_PROBACKUP_PTRACK=off +fi + + # Compile and install Postgres echo "############### Compiling Postgres:" cd postgres # Go to postgres dir +if [ "$PG_PROBACKUP_PTRACK" = "on" ]; then + git apply -3 ../ptrack/patches/REL_${PTRACK_PATCH_PG_VERSION}_STABLE-ptrack-core.diff +fi CFLAGS="-O0" ./configure --prefix=$PGHOME --enable-debug --enable-cassert --enable-depend --enable-tap-tests make -s -j$(nproc) install #make -s -j$(nproc) -C 'src/common' install @@ -47,6 +59,11 @@ export PATH=$PGHOME/bin:$PATH export LD_LIBRARY_PATH=$PGHOME/lib export PG_CONFIG=$(which pg_config) +if [ "$PG_PROBACKUP_PTRACK" = "on" ]; then + echo "############### Compiling Ptrack:" + make USE_PGXS=1 -C ../ptrack install +fi + # Get amcheck if missing if [ ! -d "contrib/amcheck" ]; then echo "############### Getting missing amcheck:" From 83c0cbb61b36539529ffca085e86ed53cdb48ca3 Mon Sep 17 00:00:00 2001 From: Elena Indrupskaya Date: Fri, 13 Aug 2021 15:23:03 +0300 Subject: [PATCH 191/525] [DOC] [skip travis] Removed version tags 12+ from documentation --- doc/pgprobackup.xml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/doc/pgprobackup.xml b/doc/pgprobackup.xml index 7178cb14c..36628ce46 100644 --- a/doc/pgprobackup.xml +++ b/doc/pgprobackup.xml @@ -143,7 +143,7 @@ doc/src/sgml/pgprobackup.sgml wal_file_name option - + pg_probackup catchup_mode @@ -291,7 +291,7 @@ doc/src/sgml/pgprobackup.sgml Partial restore: restoring only the specified databases. - + Catchup: cloning a PostgreSQL instance for a fallen-behind standby server to catch up with master. @@ -1089,7 +1089,7 @@ GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; mode: , , , - , + , , and . @@ -2089,7 +2089,7 @@ pg_probackup restore -B backup_dir --instance , , , - , + , and processes can be executed on several parallel threads. This can significantly @@ -3408,7 +3408,7 @@ pg_probackup delete -B backup_dir --instance - + Cloning and Synchronizing <productname>PostgreSQL</productname> Instance pg_probackup can create a copy of a PostgreSQL @@ -4449,7 +4449,7 @@ pg_probackup archive-get -B backup_dir --instance - + catchup pg_probackup catchup -b catchup_mode @@ -5188,7 +5188,7 @@ pg_probackup catchup -b catchup_mode You can use these options together with - , , and + , , and commands. @@ -5479,7 +5479,7 @@ pg_probackup catchup -b catchup_mode used with , , , - , + , , , and commands. From 328eea196a4a4aa6fca2d6dd76e1647945bafc0b Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Fri, 13 Aug 2021 16:14:04 +0300 Subject: [PATCH 192/525] Version 2.5.1 --- src/pg_probackup.h | 8 ++++---- tests/expected/option_version.out | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 19f6feff0..dfa7051a3 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -338,14 +338,14 @@ typedef enum ShowFormat #define BYTES_INVALID (-1) /* file didn`t changed since previous backup, DELTA backup do not rely on it */ #define FILE_NOT_FOUND (-2) /* file disappeared during backup */ #define BLOCKNUM_INVALID (-1) -#define PROGRAM_VERSION "2.5.0" +#define PROGRAM_VERSION "2.5.1" /* update when remote agent API or behaviour changes */ -#define AGENT_PROTOCOL_VERSION 20500 -#define AGENT_PROTOCOL_VERSION_STR "2.5.0" +#define AGENT_PROTOCOL_VERSION 20501 +#define AGENT_PROTOCOL_VERSION_STR "2.5.1" /* update only when changing storage format */ -#define STORAGE_FORMAT_VERSION "2.5.0" +#define STORAGE_FORMAT_VERSION "2.4.4" typedef struct ConnectionOptions { diff --git a/tests/expected/option_version.out b/tests/expected/option_version.out index 560b6b592..36e5d4c7a 100644 --- a/tests/expected/option_version.out +++ b/tests/expected/option_version.out @@ -1 +1 @@ -pg_probackup 2.5.0 +pg_probackup 2.5.1 \ No newline at end of file From 97fe5b83723fde64e93163b6efee2a1042c8013f Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Fri, 13 Aug 2021 19:14:22 +0300 Subject: [PATCH 193/525] fix for windows: conditional strndup() replacement --- src/utils/pgut.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/utils/pgut.c b/src/utils/pgut.c index db7c7fd95..52599848d 100644 --- a/src/utils/pgut.c +++ b/src/utils/pgut.c @@ -962,9 +962,18 @@ pgut_strndup(const char *str, size_t n) if (str == NULL) return NULL; +#if _POSIX_C_SOURCE >= 200809L if ((ret = strndup(str, n)) == NULL) elog(ERROR, "could not duplicate string \"%s\": %s", str, strerror(errno)); +#else /* WINDOWS doesn't have strndup() */ + if ((ret = malloc(n + 1)) == NULL) + elog(ERROR, "could not duplicate string \"%s\": %s", + str, strerror(errno)); + + memcpy(ret, str, n); + ret[n] = '\0'; +#endif return ret; } From e5714bc625693dc50270245de2bd18194a29ce92 Mon Sep 17 00:00:00 2001 From: Elena Indrupskaya Date: Tue, 17 Aug 2021 18:23:59 +0300 Subject: [PATCH 194/525] [DOC] [skip travis] Fixed misplaced comma in documentation --- doc/pgprobackup.xml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/pgprobackup.xml b/doc/pgprobackup.xml index 36628ce46..9b236b129 100644 --- a/doc/pgprobackup.xml +++ b/doc/pgprobackup.xml @@ -5187,8 +5187,8 @@ pg_probackup catchup -b catchup_mode Connection Options You can use these options together with - - , , and + , + , and commands. From 14035b271d9a8b6d6e1a4b5bfafa67880c0cfd36 Mon Sep 17 00:00:00 2001 From: Elena Indrupskaya Date: Thu, 19 Aug 2021 10:01:18 +0300 Subject: [PATCH 195/525] [DOC] [skip travis] Incorporated feedback on documentation from translator --- doc/pgprobackup.xml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/doc/pgprobackup.xml b/doc/pgprobackup.xml index 9b236b129..a1eb2bc50 100644 --- a/doc/pgprobackup.xml +++ b/doc/pgprobackup.xml @@ -2090,7 +2090,7 @@ pg_probackup restore -B backup_dir --instance , , , - and + , and processes can be executed on several parallel threads. This can significantly speed up pg_probackup operation given enough resources (CPU @@ -3502,7 +3502,7 @@ pg_probackup delete -B backup_dir --instance command as follows: -pg_probackup catchup -b catchup-mode --source-pgdata=path_to_pgdata_on_remote_server --destination-pgdata=path_to_local_dir --stream [connection_options] [remote_options] +pg_probackup catchup -b catchup_mode --source-pgdata=path_to_pgdata_on_remote_server --destination-pgdata=path_to_local_dir --stream [connection_options] [remote_options] Where catchup_mode can take one of the @@ -3552,12 +3552,12 @@ pg_probackup catchup -b catchup-mode --source-pgdata= a different directory, additionally specify the option: -pg_probackup catchup -b catchup-mode --source-pgdata=path_to_pgdata_on_remote_server --destination-pgdata=path_to_local_dir --stream --tablespace-mapping=OLDDIR=NEWDIR +pg_probackup catchup -b catchup_mode --source-pgdata=path_to_pgdata_on_remote_server --destination-pgdata=path_to_local_dir --stream --tablespace-mapping=OLDDIR=NEWDIR To run the catchup command on parallel threads, specify the number of threads with the option: -pg_probackup catchup -b catchup-mode --source-pgdata=path_to_pgdata_on_remote_server --destination-pgdata=path_to_local_dir --stream --threads=num_threads +pg_probackup catchup -b catchup_mode --source-pgdata=path_to_pgdata_on_remote_server --destination-pgdata=path_to_local_dir --stream --threads=num_threads @@ -4349,7 +4349,7 @@ pg_probackup delete -B backup_dir --instance For details, see the sections Deleting Backups, - Retention Options and + Retention Options, and Configuring Retention Policy. From 52840d78995c057df4647d597f3ec7861edf4791 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Thu, 19 Aug 2021 19:29:45 +0300 Subject: [PATCH 196/525] [PGPRO-5454] copying basedir to exclude the situation of overwriting memory in another thread --- src/stream.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/stream.c b/src/stream.c index 570108cde..a53077391 100644 --- a/src/stream.c +++ b/src/stream.c @@ -42,7 +42,7 @@ static time_t stream_stop_begin = 0; */ typedef struct { - const char *basedir; + char basedir[MAXPGPATH]; PGconn *conn; /* @@ -638,7 +638,7 @@ start_WAL_streaming(PGconn *backup_conn, char *stream_dst_path, ConnectionOption //TODO Add a comment about this calculation stream_stop_timeout = stream_stop_timeout + stream_stop_timeout * 0.1; - stream_thread_arg.basedir = stream_dst_path; + strncpy(stream_thread_arg.basedir, stream_dst_path, sizeof(stream_thread_arg.basedir)); /* * Connect in replication mode to the server. From 986e9ab958aef12b16cf113cebc3b94cb6f26e8c Mon Sep 17 00:00:00 2001 From: Elena Indrupskaya Date: Mon, 30 Aug 2021 14:33:51 +0300 Subject: [PATCH 197/525] [DOC] [skip travis] Incorporated more feedback on probackup documentation from translator --- doc/pgprobackup.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/pgprobackup.xml b/doc/pgprobackup.xml index a1eb2bc50..78abf0a86 100644 --- a/doc/pgprobackup.xml +++ b/doc/pgprobackup.xml @@ -3423,7 +3423,7 @@ pg_probackup delete -B backup_dir --instance To have a fallen-behind standby server catch up with master. - Under high write load, replicas may fail to replay WAL fast enough to keep up with master and hence may lag behind. + Under write-intensive load, replicas may fail to replay WAL fast enough to keep up with master and hence may lag behind. A usual solution to create a new replica and switch to it requires a lot of extra space and data transfer. The catchup command allows you to update an existing replica much faster by bringing differences from master. From 19a3efe07f0336194df2591ffaffa32a71b37542 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Sat, 4 Sep 2021 17:08:44 +0300 Subject: [PATCH 198/525] [Issue #193] added instructions for Astra Linux installation --- README.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/README.md b/README.md index b7e170cf5..cc369c096 100644 --- a/README.md +++ b/README.md @@ -82,6 +82,11 @@ sudo sh -c 'echo "deb-src [arch=amd64] https://repo.postgrespro.ru/pg_probackup/ /etc/apt/sources.list.d/pg_probackup.list' && sudo apt-get update sudo apt-get source pg-probackup-{13,12,11,10,9.6,9.5} +#DEB Astra Linix Orel +sudo sh -c 'echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup/deb/ stretch main-stretch" > /etc/apt/sources.list.d/pg_probackup.list' +sudo wget -O - https://repo.postgrespro.ru/pg_probackup/keys/GPG-KEY-PG_PROBACKUP | sudo apt-key add - && sudo apt-get update +sudo apt-get install pg-probackup-{13,12,11,10,9.6,9.5}{-dbg,} + #RPM Centos Packages rpm -ivh https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-centos.noarch.rpm yum install pg_probackup-{13,12,11,10,9.6,9.5} From ce32d19f360b76126341a3a34fe800abb57f95c8 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Sat, 4 Sep 2021 17:11:05 +0300 Subject: [PATCH 199/525] [Issue #193] added instructions std|ent package installation for Astra Linux Orel --- README.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/README.md b/README.md index cc369c096..344b03fb3 100644 --- a/README.md +++ b/README.md @@ -140,6 +140,12 @@ sudo wget -O - https://repo.postgrespro.ru/pg_probackup-forks/keys/GPG-KEY-PG_PR sudo apt-get install pg-probackup-{std,ent}-{12,11,10,9.6} sudo apt-get install pg-probackup-{std,ent}-{12,11,10,9.6}-dbg +#DEB Astra Linix Orel +sudo sh -c 'echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup-forks/deb/ stretch main-stretch" > /etc/apt/sources.list.d/pg_probackup.list' +sudo wget -O - https://repo.postgrespro.ru/pg_probackup-forks/keys/GPG-KEY-PG_PROBACKUP | sudo apt-key add - && sudo apt-get update +sudo apt-get install pg-probackup-{std,ent}-{12,11,10,9.6}{-dbg,} + + #RPM Centos Packages rpm -ivh https://repo.postgrespro.ru/pg_probackup-forks/keys/pg_probackup-repo-forks-centos.noarch.rpm yum install pg_probackup-{std,ent}-{12,11,10,9.6} From 4a7db4cf5e682441d05a8983d32c1c0a7fadc335 Mon Sep 17 00:00:00 2001 From: gsmol Date: Sun, 26 Sep 2021 01:13:04 +0300 Subject: [PATCH 200/525] built-in package infrastructure (#418) [PR #418] built-in package infrastructure --- .gitignore | 6 + Makefile | 7 + packaging/Dockerfiles/Dockerfile-altlinux_8 | 5 + packaging/Dockerfiles/Dockerfile-altlinux_9 | 5 + packaging/Dockerfiles/Dockerfile-astra_1.11 | 7 + packaging/Dockerfiles/Dockerfile-centos_7 | 5 + packaging/Dockerfiles/Dockerfile-centos_8 | 5 + packaging/Dockerfiles/Dockerfile-createrepo1C | 4 + packaging/Dockerfiles/Dockerfile-debian_10 | 7 + packaging/Dockerfiles/Dockerfile-debian_11 | 7 + packaging/Dockerfiles/Dockerfile-debian_8 | 7 + packaging/Dockerfiles/Dockerfile-debian_9 | 7 + .../Dockerfiles/Dockerfile-oraclelinux_6 | 5 + .../Dockerfiles/Dockerfile-oraclelinux_7 | 5 + .../Dockerfiles/Dockerfile-oraclelinux_8 | 5 + packaging/Dockerfiles/Dockerfile-rhel_7 | 7 + packaging/Dockerfiles/Dockerfile-rhel_8 | 5 + packaging/Dockerfiles/Dockerfile-rosa_6 | 5 + packaging/Dockerfiles/Dockerfile-suse_15.1 | 4 + packaging/Dockerfiles/Dockerfile-suse_15.2 | 4 + packaging/Dockerfiles/Dockerfile-ubuntu_14.04 | 7 + packaging/Dockerfiles/Dockerfile-ubuntu_16.04 | 7 + packaging/Dockerfiles/Dockerfile-ubuntu_18.04 | 7 + packaging/Dockerfiles/Dockerfile-ubuntu_18.10 | 7 + packaging/Dockerfiles/Dockerfile-ubuntu_20.04 | 8 + packaging/Makefile.pkg | 191 ++++++++++++++++++ packaging/Makefile.repo | 167 +++++++++++++++ packaging/Makefile.test | 145 +++++++++++++ packaging/Readme.md | 21 ++ packaging/pkg/Makefile.alt | 74 +++++++ packaging/pkg/Makefile.centos | 49 +++++ packaging/pkg/Makefile.debian | 99 +++++++++ packaging/pkg/Makefile.oraclelinux | 74 +++++++ packaging/pkg/Makefile.rhel | 49 +++++ packaging/pkg/Makefile.suse | 49 +++++ packaging/pkg/Makefile.ubuntu | 99 +++++++++ packaging/pkg/scripts/alt.sh | 123 +++++++++++ packaging/pkg/scripts/deb.sh | 148 ++++++++++++++ packaging/pkg/scripts/rpm.sh | 148 ++++++++++++++ packaging/pkg/scripts/suse.sh | 95 +++++++++ .../specs/deb/pg_probackup/debian/changelog | 11 + .../pkg/specs/deb/pg_probackup/debian/compat | 1 + .../pkg/specs/deb/pg_probackup/debian/control | 29 +++ .../pg_probackup/debian/pg_probackup.install | 1 + .../pkg/specs/deb/pg_probackup/debian/rules | 29 +++ .../deb/pg_probackup/debian/source/format | 1 + .../rpm/rpmbuild/SOURCES/GPG-KEY-PG_PROBACKUP | 52 +++++ .../SOURCES/GPG-KEY-PG_PROBACKUP-FORKS | 52 +++++ .../rpmbuild/SOURCES/pg_probackup-forks.repo | 6 + .../rpm/rpmbuild/SOURCES/pg_probackup.repo | 13 ++ .../rpmbuild/SPECS/pg_probackup-pgpro.spec | 71 +++++++ .../SPECS/pg_probackup-repo-forks.spec | 49 +++++ .../rpm/rpmbuild/SPECS/pg_probackup-repo.spec | 58 ++++++ .../SPECS/pg_probackup.alt.forks.spec | 67 ++++++ .../rpm/rpmbuild/SPECS/pg_probackup.alt.spec | 48 +++++ .../rpm/rpmbuild/SPECS/pg_probackup.spec | 48 +++++ packaging/pkg/tarballs/.gitkeep | 0 packaging/repo/scripts/alt.sh | 62 ++++++ packaging/repo/scripts/deb.sh | 51 +++++ packaging/repo/scripts/rpm.sh | 65 ++++++ packaging/repo/scripts/suse.sh | 72 +++++++ packaging/test/Makefile.alt | 20 ++ packaging/test/Makefile.centos | 41 ++++ packaging/test/Makefile.debian | 41 ++++ packaging/test/Makefile.oraclelinux | 41 ++++ packaging/test/Makefile.rhel | 41 ++++ packaging/test/Makefile.suse | 41 ++++ packaging/test/Makefile.ubuntu | 62 ++++++ packaging/test/scripts/alt.sh | 72 +++++++ packaging/test/scripts/alt_forks.sh | 75 +++++++ packaging/test/scripts/deb.sh | 136 +++++++++++++ packaging/test/scripts/deb_forks.sh | 149 ++++++++++++++ packaging/test/scripts/rpm.sh | 166 +++++++++++++++ packaging/test/scripts/rpm_forks.sh | 173 ++++++++++++++++ packaging/test/scripts/suse.sh | 128 ++++++++++++ packaging/test/scripts/suse_forks.sh | 5 + 76 files changed, 3656 insertions(+) create mode 100644 packaging/Dockerfiles/Dockerfile-altlinux_8 create mode 100644 packaging/Dockerfiles/Dockerfile-altlinux_9 create mode 100644 packaging/Dockerfiles/Dockerfile-astra_1.11 create mode 100644 packaging/Dockerfiles/Dockerfile-centos_7 create mode 100644 packaging/Dockerfiles/Dockerfile-centos_8 create mode 100644 packaging/Dockerfiles/Dockerfile-createrepo1C create mode 100644 packaging/Dockerfiles/Dockerfile-debian_10 create mode 100644 packaging/Dockerfiles/Dockerfile-debian_11 create mode 100644 packaging/Dockerfiles/Dockerfile-debian_8 create mode 100644 packaging/Dockerfiles/Dockerfile-debian_9 create mode 100644 packaging/Dockerfiles/Dockerfile-oraclelinux_6 create mode 100644 packaging/Dockerfiles/Dockerfile-oraclelinux_7 create mode 100644 packaging/Dockerfiles/Dockerfile-oraclelinux_8 create mode 100644 packaging/Dockerfiles/Dockerfile-rhel_7 create mode 100644 packaging/Dockerfiles/Dockerfile-rhel_8 create mode 100644 packaging/Dockerfiles/Dockerfile-rosa_6 create mode 100644 packaging/Dockerfiles/Dockerfile-suse_15.1 create mode 100644 packaging/Dockerfiles/Dockerfile-suse_15.2 create mode 100644 packaging/Dockerfiles/Dockerfile-ubuntu_14.04 create mode 100644 packaging/Dockerfiles/Dockerfile-ubuntu_16.04 create mode 100644 packaging/Dockerfiles/Dockerfile-ubuntu_18.04 create mode 100644 packaging/Dockerfiles/Dockerfile-ubuntu_18.10 create mode 100644 packaging/Dockerfiles/Dockerfile-ubuntu_20.04 create mode 100644 packaging/Makefile.pkg create mode 100644 packaging/Makefile.repo create mode 100644 packaging/Makefile.test create mode 100644 packaging/Readme.md create mode 100644 packaging/pkg/Makefile.alt create mode 100644 packaging/pkg/Makefile.centos create mode 100644 packaging/pkg/Makefile.debian create mode 100644 packaging/pkg/Makefile.oraclelinux create mode 100644 packaging/pkg/Makefile.rhel create mode 100644 packaging/pkg/Makefile.suse create mode 100644 packaging/pkg/Makefile.ubuntu create mode 100755 packaging/pkg/scripts/alt.sh create mode 100755 packaging/pkg/scripts/deb.sh create mode 100755 packaging/pkg/scripts/rpm.sh create mode 100755 packaging/pkg/scripts/suse.sh create mode 100644 packaging/pkg/specs/deb/pg_probackup/debian/changelog create mode 100644 packaging/pkg/specs/deb/pg_probackup/debian/compat create mode 100644 packaging/pkg/specs/deb/pg_probackup/debian/control create mode 100644 packaging/pkg/specs/deb/pg_probackup/debian/pg_probackup.install create mode 100644 packaging/pkg/specs/deb/pg_probackup/debian/rules create mode 100644 packaging/pkg/specs/deb/pg_probackup/debian/source/format create mode 100644 packaging/pkg/specs/rpm/rpmbuild/SOURCES/GPG-KEY-PG_PROBACKUP create mode 100644 packaging/pkg/specs/rpm/rpmbuild/SOURCES/GPG-KEY-PG_PROBACKUP-FORKS create mode 100644 packaging/pkg/specs/rpm/rpmbuild/SOURCES/pg_probackup-forks.repo create mode 100644 packaging/pkg/specs/rpm/rpmbuild/SOURCES/pg_probackup.repo create mode 100644 packaging/pkg/specs/rpm/rpmbuild/SPECS/pg_probackup-pgpro.spec create mode 100644 packaging/pkg/specs/rpm/rpmbuild/SPECS/pg_probackup-repo-forks.spec create mode 100644 packaging/pkg/specs/rpm/rpmbuild/SPECS/pg_probackup-repo.spec create mode 100644 packaging/pkg/specs/rpm/rpmbuild/SPECS/pg_probackup.alt.forks.spec create mode 100644 packaging/pkg/specs/rpm/rpmbuild/SPECS/pg_probackup.alt.spec create mode 100644 packaging/pkg/specs/rpm/rpmbuild/SPECS/pg_probackup.spec create mode 100644 packaging/pkg/tarballs/.gitkeep create mode 100755 packaging/repo/scripts/alt.sh create mode 100755 packaging/repo/scripts/deb.sh create mode 100755 packaging/repo/scripts/rpm.sh create mode 100755 packaging/repo/scripts/suse.sh create mode 100644 packaging/test/Makefile.alt create mode 100644 packaging/test/Makefile.centos create mode 100644 packaging/test/Makefile.debian create mode 100644 packaging/test/Makefile.oraclelinux create mode 100644 packaging/test/Makefile.rhel create mode 100644 packaging/test/Makefile.suse create mode 100644 packaging/test/Makefile.ubuntu create mode 100755 packaging/test/scripts/alt.sh create mode 100755 packaging/test/scripts/alt_forks.sh create mode 100755 packaging/test/scripts/deb.sh create mode 100755 packaging/test/scripts/deb_forks.sh create mode 100755 packaging/test/scripts/rpm.sh create mode 100755 packaging/test/scripts/rpm_forks.sh create mode 100755 packaging/test/scripts/suse.sh create mode 100644 packaging/test/scripts/suse_forks.sh diff --git a/.gitignore b/.gitignore index 474df1c73..c0b4de331 100644 --- a/.gitignore +++ b/.gitignore @@ -54,6 +54,12 @@ /make_dockerfile.sh /backup_restore.sh +# Packaging +/build +/packaging/pkg/tarballs/pgpro.tar.bz2 +/packaging/repo/pg_probackup +/packaging/repo/pg_probackup-forks + # Misc .python-version .vscode diff --git a/Makefile b/Makefile index 1431be4ef..4e463bf7c 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,7 @@ PROGRAM = pg_probackup +WORKDIR ?= $(CURDIR) +BUILDDIR = $(WORKDIR)/build/ +PBK_GIT_REPO = http://github.com/postgrespro/pg_probackup # utils OBJS = src/utils/configuration.o src/utils/json.o src/utils/logger.o \ @@ -80,3 +83,7 @@ src/walmethods.h: $(srchome)/src/bin/pg_basebackup/walmethods.h ifeq ($(PORTNAME), aix) CC=xlc_r endif + +include packaging/Makefile.pkg +include packaging/Makefile.repo +include packaging/Makefile.test diff --git a/packaging/Dockerfiles/Dockerfile-altlinux_8 b/packaging/Dockerfiles/Dockerfile-altlinux_8 new file mode 100644 index 000000000..961aa43dd --- /dev/null +++ b/packaging/Dockerfiles/Dockerfile-altlinux_8 @@ -0,0 +1,5 @@ +FROM alt:p8 +RUN ulimit -n 1024 && apt-get update -y && apt-get install -y tar wget rpm-build +RUN ulimit -n 1024 && apt-get install -y make perl libicu-devel glibc-devel bison flex +RUN ulimit -n 1024 && apt-get install -y git perl-devel readline-devel libxml2-devel libxslt-devel python-devel zlib-devel openssl-devel libkrb5 libkrb5-devel +RUN ulimit -n 1024 && apt-get upgrade -y diff --git a/packaging/Dockerfiles/Dockerfile-altlinux_9 b/packaging/Dockerfiles/Dockerfile-altlinux_9 new file mode 100644 index 000000000..a75728720 --- /dev/null +++ b/packaging/Dockerfiles/Dockerfile-altlinux_9 @@ -0,0 +1,5 @@ +FROM alt:p9 +RUN ulimit -n 1024 && apt-get update -y && apt-get install -y tar wget rpm-build +RUN ulimit -n 1024 && apt-get install -y make perl libicu-devel glibc-devel bison flex +RUN ulimit -n 1024 && apt-get install -y git perl-devel readline-devel libxml2-devel libxslt-devel python-devel zlib-devel openssl-devel libkrb5 libkrb5-devel +RUN ulimit -n 1024 && apt-get dist-upgrade -y diff --git a/packaging/Dockerfiles/Dockerfile-astra_1.11 b/packaging/Dockerfiles/Dockerfile-astra_1.11 new file mode 100644 index 000000000..7db4999cd --- /dev/null +++ b/packaging/Dockerfiles/Dockerfile-astra_1.11 @@ -0,0 +1,7 @@ +FROM pgpro/astra:1.11 +RUN apt-get update -y +RUN apt-get install -y devscripts +RUN apt-get install -y dpkg-dev lsb-release git equivs wget vim +RUN apt-get install -y cmake bison flex libboost-all-dev +RUN apt-get install reprepro -y +RUN apt-get upgrade -y diff --git a/packaging/Dockerfiles/Dockerfile-centos_7 b/packaging/Dockerfiles/Dockerfile-centos_7 new file mode 100644 index 000000000..363440e85 --- /dev/null +++ b/packaging/Dockerfiles/Dockerfile-centos_7 @@ -0,0 +1,5 @@ +FROM centos:7 +RUN yum install -y tar wget rpm-build yum-utils +RUN yum install -y gcc make perl libicu-devel glibc-devel bison flex +RUN yum install -y git +RUN yum upgrade -y diff --git a/packaging/Dockerfiles/Dockerfile-centos_8 b/packaging/Dockerfiles/Dockerfile-centos_8 new file mode 100644 index 000000000..9de1d31b1 --- /dev/null +++ b/packaging/Dockerfiles/Dockerfile-centos_8 @@ -0,0 +1,5 @@ +FROM centos:8 +RUN yum install -y tar wget rpm-build yum-utils +RUN yum install -y gcc make perl libicu-devel glibc-devel bison flex +RUN yum install -y git +RUN yum upgrade -y diff --git a/packaging/Dockerfiles/Dockerfile-createrepo1C b/packaging/Dockerfiles/Dockerfile-createrepo1C new file mode 100644 index 000000000..d987c4f5f --- /dev/null +++ b/packaging/Dockerfiles/Dockerfile-createrepo1C @@ -0,0 +1,4 @@ +FROM ubuntu:17.10 +RUN apt-get -qq update -y +RUN apt-get -qq install -y reprepro rpm createrepo gnupg rsync perl less wget expect rsync dpkg-dev +RUN apt-get upgrade -y diff --git a/packaging/Dockerfiles/Dockerfile-debian_10 b/packaging/Dockerfiles/Dockerfile-debian_10 new file mode 100644 index 000000000..f25ceeac5 --- /dev/null +++ b/packaging/Dockerfiles/Dockerfile-debian_10 @@ -0,0 +1,7 @@ +FROM debian:10 +RUN apt-get update -y +RUN apt-get install -y devscripts +RUN apt-get install -y dpkg-dev lsb-release git equivs wget vim +RUN apt-get install -y cmake bison flex libboost-all-dev +RUN apt-get install -y reprepro +RUN apt-get upgrade -y diff --git a/packaging/Dockerfiles/Dockerfile-debian_11 b/packaging/Dockerfiles/Dockerfile-debian_11 new file mode 100644 index 000000000..db736c193 --- /dev/null +++ b/packaging/Dockerfiles/Dockerfile-debian_11 @@ -0,0 +1,7 @@ +FROM debian:11 +RUN apt-get update -y +RUN apt-get install -y devscripts +RUN apt-get install -y dpkg-dev lsb-release git equivs wget vim +RUN apt-get install -y cmake bison flex libboost-all-dev +RUN apt-get install -y reprepro +RUN apt-get upgrade -y diff --git a/packaging/Dockerfiles/Dockerfile-debian_8 b/packaging/Dockerfiles/Dockerfile-debian_8 new file mode 100644 index 000000000..0be9528bb --- /dev/null +++ b/packaging/Dockerfiles/Dockerfile-debian_8 @@ -0,0 +1,7 @@ +FROM debian:8 +RUN apt-get update -y +RUN apt-get install -y devscripts +RUN apt-get install -y dpkg-dev lsb-release git equivs wget vim +RUN apt-get install -y cmake bison flex libboost-all-dev +RUN apt-get install -y reprepro +RUN apt-get upgrade -y diff --git a/packaging/Dockerfiles/Dockerfile-debian_9 b/packaging/Dockerfiles/Dockerfile-debian_9 new file mode 100644 index 000000000..6ca10faa8 --- /dev/null +++ b/packaging/Dockerfiles/Dockerfile-debian_9 @@ -0,0 +1,7 @@ +FROM debian:9 +RUN apt-get update -y +RUN apt-get install -y devscripts +RUN apt-get install -y dpkg-dev lsb-release git equivs wget vim +RUN apt-get install -y cmake bison flex libboost-all-dev +RUN apt-get install -y reprepro +RUN apt-get upgrade -y diff --git a/packaging/Dockerfiles/Dockerfile-oraclelinux_6 b/packaging/Dockerfiles/Dockerfile-oraclelinux_6 new file mode 100644 index 000000000..04325e869 --- /dev/null +++ b/packaging/Dockerfiles/Dockerfile-oraclelinux_6 @@ -0,0 +1,5 @@ +FROM oraclelinux:6 +RUN yum install -y tar wget rpm-build yum-utils +RUN yum install -y gcc make perl libicu-devel glibc-devel bison flex +RUN yum install -y git openssl +RUN yum upgrade -y diff --git a/packaging/Dockerfiles/Dockerfile-oraclelinux_7 b/packaging/Dockerfiles/Dockerfile-oraclelinux_7 new file mode 100644 index 000000000..871d920eb --- /dev/null +++ b/packaging/Dockerfiles/Dockerfile-oraclelinux_7 @@ -0,0 +1,5 @@ +FROM oraclelinux:7 +RUN yum install -y tar wget rpm-build yum-utils +RUN yum install -y gcc make perl libicu-devel glibc-devel bison flex +RUN yum install -y git openssl +RUN yum upgrade -y diff --git a/packaging/Dockerfiles/Dockerfile-oraclelinux_8 b/packaging/Dockerfiles/Dockerfile-oraclelinux_8 new file mode 100644 index 000000000..32e7cb03f --- /dev/null +++ b/packaging/Dockerfiles/Dockerfile-oraclelinux_8 @@ -0,0 +1,5 @@ +FROM oraclelinux:8 +RUN yum install -y tar wget rpm-build yum-utils +RUN yum install -y gcc make perl libicu-devel glibc-devel bison flex +RUN yum install -y git openssl +RUN yum upgrade -y diff --git a/packaging/Dockerfiles/Dockerfile-rhel_7 b/packaging/Dockerfiles/Dockerfile-rhel_7 new file mode 100644 index 000000000..322c44b59 --- /dev/null +++ b/packaging/Dockerfiles/Dockerfile-rhel_7 @@ -0,0 +1,7 @@ +FROM registry.access.redhat.com/ubi7 +RUN yum install -y http://mirror.centos.org/centos/7/os/x86_64/Packages/elfutils-0.176-5.el7.x86_64.rpm +RUN yum install -y http://mirror.centos.org/centos/7/os/x86_64/Packages/rpm-build-4.11.3-45.el7.x86_64.rpm +RUN yum install -y tar wget yum-utils +RUN yum install -y gcc make perl libicu-devel glibc-devel +RUN yum install -y git +RUN yum upgrade -y diff --git a/packaging/Dockerfiles/Dockerfile-rhel_8 b/packaging/Dockerfiles/Dockerfile-rhel_8 new file mode 100644 index 000000000..c8e1e225e --- /dev/null +++ b/packaging/Dockerfiles/Dockerfile-rhel_8 @@ -0,0 +1,5 @@ +FROM registry.access.redhat.com/ubi8 +RUN yum install -y tar wget rpm-build yum-utils +RUN yum install -y gcc make perl libicu-devel glibc-devel +RUN yum install -y git +RUN yum upgrade -y diff --git a/packaging/Dockerfiles/Dockerfile-rosa_6 b/packaging/Dockerfiles/Dockerfile-rosa_6 new file mode 100644 index 000000000..42fa913e1 --- /dev/null +++ b/packaging/Dockerfiles/Dockerfile-rosa_6 @@ -0,0 +1,5 @@ +FROM pgpro/rosa-6 +RUN yum install -y tar wget rpm-build yum-utils +RUN yum install -y gcc make perl libicu-devel glibc-devel bison flex +RUN yum install -y git +RUN yum upgrade -y diff --git a/packaging/Dockerfiles/Dockerfile-suse_15.1 b/packaging/Dockerfiles/Dockerfile-suse_15.1 new file mode 100644 index 000000000..afc9434a2 --- /dev/null +++ b/packaging/Dockerfiles/Dockerfile-suse_15.1 @@ -0,0 +1,4 @@ +FROM opensuse/leap:15.1 +RUN ulimit -n 1024 && zypper install -y tar wget rpm-build +RUN ulimit -n 1024 && zypper install -y gcc make perl libicu-devel glibc-devel bison flex +RUN ulimit -n 1024 && zypper install -y git rsync diff --git a/packaging/Dockerfiles/Dockerfile-suse_15.2 b/packaging/Dockerfiles/Dockerfile-suse_15.2 new file mode 100644 index 000000000..7e56e299a --- /dev/null +++ b/packaging/Dockerfiles/Dockerfile-suse_15.2 @@ -0,0 +1,4 @@ +FROM opensuse/leap:15.2 +RUN ulimit -n 1024 && zypper install -y tar wget rpm-build +RUN ulimit -n 1024 && zypper install -y gcc make perl libicu-devel glibc-devel bison flex +RUN ulimit -n 1024 && zypper install -y git rsync diff --git a/packaging/Dockerfiles/Dockerfile-ubuntu_14.04 b/packaging/Dockerfiles/Dockerfile-ubuntu_14.04 new file mode 100644 index 000000000..10132f826 --- /dev/null +++ b/packaging/Dockerfiles/Dockerfile-ubuntu_14.04 @@ -0,0 +1,7 @@ +FROM ubuntu:14.04 +RUN apt-get update -y +RUN apt-get install -y devscripts +RUN apt-get install -y dpkg-dev lsb-release git equivs wget vim +RUN apt-get install -y cmake bison flex libboost-all-dev +RUN apt-get install -y reprepro +RUN apt-get upgrade -y diff --git a/packaging/Dockerfiles/Dockerfile-ubuntu_16.04 b/packaging/Dockerfiles/Dockerfile-ubuntu_16.04 new file mode 100644 index 000000000..d511829c0 --- /dev/null +++ b/packaging/Dockerfiles/Dockerfile-ubuntu_16.04 @@ -0,0 +1,7 @@ +FROM ubuntu:16.04 +RUN apt-get update -y +RUN apt-get install -y devscripts +RUN apt-get install -y dpkg-dev lsb-release git equivs wget vim +RUN apt-get install -y cmake bison flex libboost-all-dev +RUN apt-get install -y reprepro +RUN apt-get upgrade -y diff --git a/packaging/Dockerfiles/Dockerfile-ubuntu_18.04 b/packaging/Dockerfiles/Dockerfile-ubuntu_18.04 new file mode 100644 index 000000000..20a8567e0 --- /dev/null +++ b/packaging/Dockerfiles/Dockerfile-ubuntu_18.04 @@ -0,0 +1,7 @@ +FROM ubuntu:18.04 +RUN apt-get update -y +RUN apt-get install -y devscripts +RUN apt-get install -y dpkg-dev lsb-release git equivs wget vim +RUN apt-get install -y cmake bison flex libboost-all-dev +RUN apt-get install -y reprepro +RUN apt-get upgrade -y diff --git a/packaging/Dockerfiles/Dockerfile-ubuntu_18.10 b/packaging/Dockerfiles/Dockerfile-ubuntu_18.10 new file mode 100644 index 000000000..66cefff16 --- /dev/null +++ b/packaging/Dockerfiles/Dockerfile-ubuntu_18.10 @@ -0,0 +1,7 @@ +FROM ubuntu:18.10 +RUN apt-get update -y +RUN apt-get install -y devscripts +RUN apt-get install -y dpkg-dev lsb-release git equivs wget vim +RUN apt-get install -y cmake bison flex libboost-all-dev +RUN apt-get install -y reprepro +RUN apt-get upgrade -y diff --git a/packaging/Dockerfiles/Dockerfile-ubuntu_20.04 b/packaging/Dockerfiles/Dockerfile-ubuntu_20.04 new file mode 100644 index 000000000..79288d308 --- /dev/null +++ b/packaging/Dockerfiles/Dockerfile-ubuntu_20.04 @@ -0,0 +1,8 @@ +FROM ubuntu:20.04 +ENV DEBIAN_FRONTEND noninteractive +RUN ulimit -n 1024 && apt-get update -y +RUN ulimit -n 1024 && apt-get install -y devscripts +RUN ulimit -n 1024 && apt-get install -y dpkg-dev lsb-release git equivs wget vim +RUN ulimit -n 1024 && apt-get install -y cmake bison flex libboost-all-dev +RUN ulimit -n 1024 && apt-get install -y reprepro +RUN ulimit -n 1024 && apt-get upgrade -y diff --git a/packaging/Makefile.pkg b/packaging/Makefile.pkg new file mode 100644 index 000000000..bfe2043c3 --- /dev/null +++ b/packaging/Makefile.pkg @@ -0,0 +1,191 @@ +ifeq ($(PBK_EDITION),std) + PBK_PKG_REPO = pg_probackup-forks + PBK_EDITION_FULL = Standart + PKG_NAME_SUFFIX = std- +else ifeq ($(PBK_EDITION),ent) + PBK_PKG_REPO = pg_probackup-forks + PBK_EDITION_FULL = Enterprise + PKG_NAME_SUFFIX = ent- +else + PBK_PKG_REPO = pg_probackup + PBK_EDITION_FULL = + PBK_EDITION = + PKG_NAME_SUFFIX = +endif + +check_env: + @if [ -z ${PBK_VERSION} ] ; then \ + echo "Env variable PBK_VERSION is not set" ; \ + false ; \ + fi + + @if [ -z ${PBK_RELEASE} ] ; then \ + echo "Env variable PBK_RELEASE is not set" ; \ + false ; \ + fi + + @if [ -z ${PBK_HASH} ] ; then \ + echo "Env variable PBK_HASH is not set" ; \ + false ; \ + fi + +pkg: check_env build/prepare build/all + @echo Build for all platform: done + +build/prepare: + mkdir -p build + +build/clean: build/prepare + find $(BUILDDIR) -maxdepth 1 -type f -exec rm -f {} \; + +build/all: build/debian build/ubuntu build/centos build/oraclelinux build/alt build/suse # build/rhel + @echo Packaging is done + +### DEBIAN +build/debian: build/debian_8 build/debian_9 build/debian_10 build/debian_11 + @echo Debian: done + +build/debian_8: build/debian_8_9.5 build/debian_8_9.6 build/debian_8_10 build/debian_8_11 build/debian_8_12 build/debian_8_13 + @echo Debian 8: done + +build/debian_9: build/debian_9_9.5 build/debian_9_9.6 build/debian_9_10 build/debian_9_11 build/debian_9_12 build/debian_9_13 + @echo Debian 9: done + +build/debian_10: build/debian_10_9.5 build/debian_10_9.6 build/debian_10_10 build/debian_10_11 build/debian_10_12 build/debian_10_13 + @echo Debian 10: done + +build/debian_11: build/debian_11_9.5 build/debian_11_9.6 build/debian_11_10 build/debian_11_11 build/debian_11_12 build/debian_11_13 + @echo Debian 11: done + +### UBUNTU +build/ubuntu: build/ubuntu_14.04 build/ubuntu_16.04 build/ubuntu_18.04 build/ubuntu_20.04 + @echo Ubuntu: done + +build/ubuntu_14.04: build/ubuntu_14.04_9.5 build/ubuntu_14.04_9.6 build/ubuntu_14.04_10 build/ubuntu_14.04_11 build/ubuntu_14.04_12 build/ubuntu_14.04_13 + @echo Ubuntu 14.04: done + +build/ubuntu_16.04: build/ubuntu_16.04_9.5 build/ubuntu_16.04_9.6 build/ubuntu_16.04_10 build/ubuntu_16.04_11 build/ubuntu_16.04_12 build/ubuntu_16.04_13 + @echo Ubuntu 16.04: done + +build/ubuntu_18.04: build/ubuntu_18.04_9.5 build/ubuntu_18.04_9.6 build/ubuntu_18.04_10 build/ubuntu_18.04_11 build/ubuntu_18.04_12 build/ubuntu_18.04_13 + @echo Ubuntu 18.04: done + +build/ubuntu_20.04: build/ubuntu_20.04_9.5 build/ubuntu_20.04_9.6 build/ubuntu_20.04_10 build/ubuntu_20.04_11 build/ubuntu_20.04_12 build/ubuntu_20.04_13 + @echo Ubuntu 20.04: done + +define build_deb + docker rm -f $1_$2_probackup_$(PKG_NAME_SUFFIX)$(PBK_VERSION) >> /dev/null 2>&1 ; \ + docker run \ + -v $(WORKDIR)/packaging/pkg:/app/in \ + -v $(WORKDIR)/build/data/$(PBK_PKG_REPO)/$1/$2/pg-probackup-$(PKG_NAME_SUFFIX)$4/$(PBK_VERSION):/app/out \ + -e "DISTRIB=$1" -e "DISTRIB_VERSION=$2" -e "CODENAME=$3" -e "PG_VERSION=$4" -e "PG_FULL_VERSION=$5" \ + -e "PKG_HASH=$(PBK_HASH)" -e "PKG_URL=$(PBK_GIT_REPO)" -e "PKG_RELEASE=$(PBK_RELEASE)" -e "PKG_NAME=pg-probackup-$(PKG_NAME_SUFFIX)$4" \ + -e "PKG_VERSION=$(PBK_VERSION)" -e "PBK_EDITION=$(PBK_EDITION)" -e "PBK_EDITION_FULL=$(PBK_EDITION_FULL)" \ + --name $1_$2_probackup_$(PKG_NAME_SUFFIX)$(PBK_VERSION)_pg_$5 \ + --rm pgpro/$1:$2 /app/in/scripts/deb.sh +endef + +include packaging/pkg/Makefile.debian +include packaging/pkg/Makefile.ubuntu + +# CENTOS +build/centos: build/centos_7 build/centos_8 #build/rpm_repo_package_centos + @echo Centos: done + +build/centos_7: build/centos_7_9.5 build/centos_7_9.6 build/centos_7_10 build/centos_7_11 build/centos_7_12 build/centos_7_13 + @echo Centos 7: done + +build/centos_8: build/centos_8_9.5 build/centos_8_9.6 build/centos_8_10 build/centos_8_11 build/centos_8_12 build/centos_8_13 + @echo Centos 8: done + +# Oracle Linux +build/oraclelinux: build/oraclelinux_6 build/oraclelinux_7 build/oraclelinux_8 #build/rpm_repo_package_oraclelinux + @echo Oraclelinux: done + +build/oraclelinux_6: build/oraclelinux_6_9.5 build/oraclelinux_6_9.6 build/oraclelinux_6_10 build/oraclelinux_6_11 build/oraclelinux_6_12 build/oraclelinux_6_13 + @echo Oraclelinux 6: done + +build/oraclelinux_7: build/oraclelinux_7_9.5 build/oraclelinux_7_9.6 build/oraclelinux_7_10 build/oraclelinux_7_11 build/oraclelinux_7_12 build/oraclelinux_7_13 + @echo Oraclelinux 7: done + +build/oraclelinux_8: build/oraclelinux_8_9.5 build/oraclelinux_8_9.6 build/oraclelinux_8_10 build/oraclelinux_8_11 build/oraclelinux_8_12 build/oraclelinux_8_13 + @echo Oraclelinux 8: done + +# RHEL +build/rhel: build/rhel_7 build/rhel_8 #build/rpm_repo_package_rhel + @echo Rhel: done + +build/rhel_7: build/rhel_7_9.5 build/rhel_7_9.6 build/rhel_7_10 build/rhel_7_11 build/rhel_7_12 build/rhel_7_13 + @echo Rhel 7: done + +build/rhel_8: build/rhel_8_9.5 build/rhel_8_9.6 build/rhel_8_10 build/rhel_8_11 build/rhel_8_12 build/rhel_8_13 + @echo Rhel 8: done + + +define build_rpm + docker rm -f $1_$2_probackup_$(PKG_NAME_SUFFIX)$(PBK_VERSION) >> /dev/null 2>&1 ; \ + docker run \ + -v $(WORKDIR)/packaging/pkg:/app/in \ + -v $(WORKDIR)/build/data/$(PBK_PKG_REPO)/$1/$2/pg_probackup-$(PKG_NAME_SUFFIX)$4/$(PBK_VERSION):/app/out \ + -e "DISTRIB=$1" -e "DISTRIB_VERSION=$2" -e "CODENAME=$3" -e "PG_VERSION=$4" -e "PG_FULL_VERSION=$5" \ + -e "PKG_HASH=$(PBK_HASH)" -e "PKG_URL=$(PBK_GIT_REPO)" -e "PKG_RELEASE=$(PBK_RELEASE)" -e "PKG_NAME=pg_probackup-$(PKG_NAME_SUFFIX)$4" \ + -e "PKG_VERSION=$(PBK_VERSION)" -e "PBK_EDITION=$(PBK_EDITION)" -e "PBK_EDITION_FULL=$(PBK_EDITION_FULL)" \ + --name $1_$2_probackup_$(PKG_NAME_SUFFIX)$(PBK_VERSION)_pg_$5 \ + --rm pgpro/$1:$2 /app/in/scripts/rpm.sh +endef + +include packaging/pkg/Makefile.centos +include packaging/pkg/Makefile.rhel +include packaging/pkg/Makefile.oraclelinux + + +# Alt Linux +build/alt: build/alt_7 build/alt_8 build/alt_9 + @echo Alt Linux: done + +build/alt_7: build/alt_7_9.5 build/alt_7_9.6 build/alt_7_10 build/alt_7_11 build/alt_7_12 build/alt_7_13 + @echo Alt Linux 7: done + +build/alt_8: build/alt_8_9.5 build/alt_8_9.6 build/alt_8_10 build/alt_8_11 build/alt_8_12 build/alt_8_13 + @echo Alt Linux 8: done + +build/alt_9: build/alt_9_9.5 build/alt_9_9.6 build/alt_9_10 build/alt_9_11 build/alt_9_12 build/alt_9_13 + @echo Alt Linux 9: done + +define build_alt + docker rm -f $1_$2_probackup_$(PKG_NAME_SUFFIX)$(PBK_VERSION) >> /dev/null 2>&1 ; \ + docker run \ + -v $(WORKDIR)/packaging/pkg:/app/in \ + -v $(WORKDIR)/build/data/$(PBK_PKG_REPO)/$1/$2/pg_probackup-$(PKG_NAME_SUFFIX)$4/$(PBK_VERSION):/app/out \ + -e "DISTRIB=$1" -e "DISTRIB_VERSION=$2" -e "CODENAME=$3" -e "PG_VERSION=$4" -e "PG_FULL_VERSION=$5" \ + -e "PKG_HASH=$(PBK_HASH)" -e "PKG_URL=$(PBK_GIT_REPO)" -e "PKG_RELEASE=$(PBK_RELEASE)" -e "PKG_NAME=pg_probackup-$(PKG_NAME_SUFFIX)$4" \ + -e "PKG_VERSION=$(PBK_VERSION)" -e "PBK_EDITION=$(PBK_EDITION)" -e "PBK_EDITION_FULL=$(PBK_EDITION_FULL)" \ + --name $1_$2_probackup_$(PKG_NAME_SUFFIX)$(PBK_VERSION)_pg_$5 \ + --rm pgpro/$1:$2 /app/in/scripts/alt.sh +endef + +include packaging/pkg/Makefile.alt + +# SUSE Linux +build/suse: build/suse_15.1 build/suse_15.2 + @echo Suse: done + +build/suse_15.1: build/suse_15.1_9.5 build/suse_15.1_9.6 build/suse_15.1_10 build/suse_15.1_11 build/suse_15.1_12 build/suse_15.1_13 + @echo Rhel 15.1: done + +build/suse_15.2: build/suse_15.2_9.5 build/suse_15.2_9.6 build/suse_15.2_10 build/suse_15.2_11 build/suse_15.2_12 build/suse_15.2_13 + @echo Rhel 15.1: done + +define build_suse + docker rm -f $1_$2_probackup_$(PKG_NAME_SUFFIX)$(PBK_VERSION) >> /dev/null 2>&1 ; \ + docker run \ + -v $(WORKDIR)/packaging/pkg:/app/in \ + -v $(WORKDIR)/build/data/$(PBK_PKG_REPO)/$1/$2/pg_probackup-$(PKG_NAME_SUFFIX)$4/$(PBK_VERSION):/app/out \ + -e "DISTRIB=$1" -e "DISTRIB_VERSION=$2" -e "CODENAME=$3" -e "PG_VERSION=$4" -e "PG_FULL_VERSION=$5" \ + -e "PKG_HASH=$(PBK_HASH)" -e "PKG_URL=$(PBK_GIT_REPO)" -e "PKG_RELEASE=$(PBK_RELEASE)" -e "PKG_NAME=pg_probackup-$(PKG_NAME_SUFFIX)$4" \ + -e "PKG_VERSION=$(PBK_VERSION)" -e "PBK_EDITION=$(PBK_EDITION)" -e "PBK_EDITION_FULL=$(PBK_EDITION_FULL)" \ + --name $1_$2_probackup_$(PKG_NAME_SUFFIX)$(PBK_VERSION)_pg_$5 \ + --rm pgpro/$1:$2 /app/in/scripts/suse.sh +endef + +include packaging/pkg/Makefile.suse diff --git a/packaging/Makefile.repo b/packaging/Makefile.repo new file mode 100644 index 000000000..986c827e9 --- /dev/null +++ b/packaging/Makefile.repo @@ -0,0 +1,167 @@ +#### REPO BUILD #### +repo: check_env repo/debian repo/ubuntu repo/centos repo/oraclelinux repo/alt repo/suse repo_finish #repo/rhel + @echo Build repo for all platform: done + +# Debian +repo/debian: build/repo_debian_8 build/repo_debian_9 build/repo_debian_10 build/repo_debian_11 + @echo Build repo for debian platforms: done + +build/repo_debian_8: + $(call build_repo_deb,debian,8,jessie) + touch build/repo_debian_8 + +build/repo_debian_9: + $(call build_repo_deb,debian,9,stretch) + touch build/repo_debian_9 + +build/repo_debian_10: + $(call build_repo_deb,debian,10,buster) + touch build/repo_debian_10 + +build/repo_debian_11: + $(call build_repo_deb,debian,11,bullseye) + touch build/repo_debian_11 + +# Ubuntu +repo/ubuntu: build/repo_ubuntu_14.04 build/repo_ubuntu_16.04 build/repo_ubuntu_18.04 build/repo_ubuntu_20.04 + @echo Build repo for ubuntu platforms: done + +build/repo_ubuntu_14.04: + $(call build_repo_deb,ubuntu,14.04,trusty) + touch build/repo_ubuntu_14.04 + +build/repo_ubuntu_16.04: + $(call build_repo_deb,ubuntu,16.04,xenial) + touch build/repo_ubuntu_16.04 + +build/repo_ubuntu_18.04: + $(call build_repo_deb,ubuntu,18.04,bionic) + touch build/repo_ubuntu_18.04 + +build/repo_ubuntu_20.04: + $(call build_repo_deb,ubuntu,20.04,focal) + touch build/repo_ubuntu_20.04 + +# Centos +repo/centos: build/repo_centos_7 build/repo_centos_8 + @echo Build repo for centos platforms: done + +build/repo_centos_7: + $(call build_repo_rpm,centos,7,,) + touch build/repo_centos_7 + +build/repo_centos_8: + $(call build_repo_rpm,centos,8,,) + touch build/repo_centos_8 + +# Oraclelinux +repo/oraclelinux: build/repo_oraclelinux_6 build/repo_oraclelinux_7 build/repo_oraclelinux_8 + @echo Build repo for oraclelinux platforms: done + +build/repo_oraclelinux_6: + $(call build_repo_rpm,oraclelinux,6,6Server) + touch build/repo_oraclelinux_6 + +build/repo_oraclelinux_7: + $(call build_repo_rpm,oraclelinux,7,7Server) + touch build/repo_oraclelinux_7 + +build/repo_oraclelinux_8: + $(call build_repo_rpm,oraclelinux,8,,) + touch build/repo_oraclelinux_8 + +# RHEL +repo/rhel: build/repo_rhel_7 build/repo_rhel_8 + @echo Build repo for rhel platforms: done + +build/repo_rhel_7: + $(call build_repo_rpm,rhel,7,7Server) + touch build/repo_rhel_7 + +build/repo_rhel_8: + $(call build_repo_rpm,rhel,8,,) + touch build/repo_rhel_8 + +# ALT +repo/alt: build/repo_alt_7 build/repo_alt_8 build/repo_alt_9 + @echo Build repo for alt platforms: done + +build/repo_alt_7: + $(call build_repo_alt,alt,7,,) + touch build/repo_alt_7 + +build/repo_alt_8: + $(call build_repo_alt,alt,8,,) + touch build/repo_alt_8 + +build/repo_alt_9: + $(call build_repo_alt,alt,9,,) + touch build/repo_alt_9 + +# SUSE +repo/suse: build/repo_suse_15.1 build/repo_suse_15.2 + @echo Build repo for suse platforms: done + +build/repo_suse_15.1: + $(call build_repo_suse,suse,15.1,,) + touch build/repo_suse_15.1 + +build/repo_suse_15.2: + $(call build_repo_suse,suse,15.2,,) + touch build/repo_suse_15.2 + +repo_finish: +# cd build/data/www/$(PBK_PKG_REPO)/ + cd $(BUILDDIR)/data/www/$(PBK_PKG_REPO)/rpm && sudo ln -nsf $(PBK_VERSION) latest + cd $(BUILDDIR)/data/www/$(PBK_PKG_REPO)/srpm && sudo ln -nsf $(PBK_VERSION) latest + +# sudo ln -rfs build/data/www/$(PBK_PKG_REPO)/rpm/${PBK_VERSION} build/data/www/$(PBK_PKG_REPO)/rpm/latest +# sudo ln -rfs build/data/www/$(PBK_PKG_REPO)/srpm/${PBK_VERSION} build/data/www/$(PBK_PKG_REPO)/srpm/latest + +define build_repo_deb + docker rm -f $1_$2_pbk_repo >> /dev/null 2>&1 ; \ + docker run \ + -v $(WORKDIR)/packaging/repo:/app/repo \ + -v $(WORKDIR)/build/data/www:/app/www \ + -v $(WORKDIR)/build/data/$(PBK_PKG_REPO)/$1/$2:/app/in \ + -e "DISTRIB=$1" -e "DISTRIB_VERSION=$2" -e "CODENAME=$3" \ + -e "PBK_PKG_REPO=$(PBK_PKG_REPO)" -e "PBK_EDITION=$(PBK_EDITION)" \ + --name $1_$2_pbk_repo \ + --rm pgpro/repo /app/repo/scripts/deb.sh +endef + +define build_repo_rpm + docker rm -f $1_$2_pbk_repo >> /dev/null 2>&1 ; \ + docker run \ + -v $(WORKDIR)/packaging/repo:/app/repo \ + -v $(WORKDIR)/build/data/www:/app/www \ + -v $(WORKDIR)/build/data/$(PBK_PKG_REPO)/$1/$2:/app/in \ + -e "DISTRIB=$1" -e "DISTRIB_VERSION=$2" -e "CODENAME=$3" \ + -e "PBK_PKG_REPO=$(PBK_PKG_REPO)" -e "PBK_EDITION=$(PBK_EDITION)" \ + --name $1_$2_pbk_repo \ + --rm pgpro/repo /app/repo/scripts/rpm.sh +endef + +define build_repo_alt + docker rm -f $1_$2_pbk_repo >> /dev/null 2>&1 ; \ + docker run \ + -v $(WORKDIR)/packaging/repo:/app/repo \ + -v $(WORKDIR)/build/data/www:/app/www \ + -v $(WORKDIR)/build/data/$(PBK_PKG_REPO)/$1/$2:/app/in \ + -e "DISTRIB=$1" -e "DISTRIB_VERSION=$2" -e "CODENAME=$3" \ + -e "PBK_PKG_REPO=$(PBK_PKG_REPO)" -e "PBK_EDITION=$(PBK_EDITION)" \ + --name $1_$2_pbk_repo \ + --rm pgpro/$1:$2 /app/repo/scripts/alt.sh +endef + +define build_repo_suse + docker rm -f $1_$2_pbk_repo >> /dev/null 2>&1 ; \ + docker run \ + -v $(WORKDIR)/packaging/repo:/app/repo \ + -v $(WORKDIR)/build/data/www:/app/www \ + -v $(WORKDIR)/build/data/$(PBK_PKG_REPO)/$1/$2:/app/in \ + -e "DISTRIB=$1" -e "DISTRIB_VERSION=$2" -e "CODENAME=$3" \ + -e "PBK_PKG_REPO=$(PBK_PKG_REPO)" -e "PBK_EDITION=$(PBK_EDITION)" \ + --name $1_$2_pbk_repo \ + --rm pgpro/$1:$2 /app/repo/scripts/suse.sh +endef diff --git a/packaging/Makefile.test b/packaging/Makefile.test new file mode 100644 index 000000000..fbb415c46 --- /dev/null +++ b/packaging/Makefile.test @@ -0,0 +1,145 @@ +ifeq ($(PBK_EDITION),std) + SCRIPT_SUFFIX = _forks +else ifeq ($(PBK_EDITION),ent) + SCRIPT_SUFFIX = _forks +else + SCRIPT_SUFFIX = +endif + +test: build/test_all + @echo Test for all platform: done + +build/test_all: build/test_debian build/test_ubuntu build/test_centos build/test_oraclelinux build/test_alt build/test_suse # build/test_rhel + @echo Package testing is done + +### DEBIAN +build/test_debian: build/test_debian_9 build/test_debian_10 build/test_debian_11 + @echo Debian: done + +build/test_debian_9: build/test_debian_9_9.6 build/test_debian_9_10 build/test_debian_9_11 build/test_debian_9_12 build/test_debian_9_13 + @echo Debian 9: done + +build/test_debian_10: build/test_debian_10_9.6 build/test_debian_10_10 build/test_debian_10_11 build/test_debian_10_12 build/test_debian_10_13 + @echo Debian 10: done + +build/test_debian_11: build/test_debian_11_9.6 build/test_debian_11_10 build/test_debian_11_11 build/test_debian_11_12 build/test_debian_11_13 + @echo Debian 11: done + +### UBUNTU +build/test_ubuntu: build/test_ubuntu_16.04 build/test_ubuntu_18.04 build/test_ubuntu_20.04 + @echo Ubuntu: done + +build/test_ubuntu_16.04: build/test_ubuntu_16.04_9.6 build/test_ubuntu_16.04_10 build/test_ubuntu_16.04_11 build/test_ubuntu_16.04_12 build/test_ubuntu_16.04_13 + @echo Ubuntu 16.04: done + +build/test_ubuntu_18.04: build/test_ubuntu_18.04_9.6 build/test_ubuntu_18.04_10 build/test_ubuntu_18.04_11 build/test_ubuntu_18.04_12 build/test_ubuntu_18.04_13 + @echo Ubuntu 18.04: done + +build/test_ubuntu_20.04: build/test_ubuntu_20.04_9.6 build/test_ubuntu_20.04_10 build/test_ubuntu_20.04_11 build/test_ubuntu_20.04_12 build/test_ubuntu_20.04_13 + @echo Ubuntu 20.04: done + +define test_deb + docker rm -f $1_$2_probackup_$(PKG_NAME_SUFFIX)$(PBK_VERSION) >> /dev/null 2>&1 ; \ + docker run \ + -v $(WORKDIR)/packaging/test:/app/in \ + -v $(BUILDDIR)/data/www:/app/www \ + -e "DISTRIB=$1" -e "DISTRIB_VERSION=$2" -e "CODENAME=$3" -e "PG_VERSION=$4" -e "PG_FULL_VERSION=$5" \ + -e "PKG_HASH=$(PBK_HASH)" -e "PKG_URL=$(PBK_GIT_REPO)" -e "PKG_RELEASE=$(PBK_RELEASE)" -e "PKG_NAME=pg-probackup-$(PKG_NAME_SUFFIX)$4" \ + -e "PKG_VERSION=$(PBK_VERSION)" -e "PBK_EDITION=$(PBK_EDITION)" -e "PBK_EDITION_FULL=$(PBK_EDITION_FULL)" \ + --name $1_$2_probackup_$(PKG_NAME_SUFFIX)$(PBK_VERSION)_pg_$5 \ + --rm pgpro/$1:$2 /app/in/scripts/deb$(SCRIPT_SUFFIX).sh +endef + +include packaging/test/Makefile.debian +include packaging/test/Makefile.ubuntu + +# CENTOS +build/test_centos: build/test_centos_7 build/test_centos_8 + @echo Centos: done + +build/test_centos_7: build/test_centos_7_9.6 build/test_centos_7_10 build/test_centos_7_11 build/test_centos_7_12 build/test_centos_7_13 + @echo Centos 7: done + +build/test_centos_8: build/test_centos_8_9.6 build/test_centos_8_10 build/test_centos_8_11 build/test_centos_8_12 build/test_centos_8_13 + @echo Centos 8: done + +# Oracle Linux +build/test_oraclelinux: build/test_oraclelinux_7 build/test_oraclelinux_8 + @echo Oraclelinux: done + +build/test_oraclelinux_7: build/test_oraclelinux_7_9.6 build/test_oraclelinux_7_10 build/test_oraclelinux_7_11 build/test_oraclelinux_7_12 build/test_oraclelinux_7_13 + @echo Oraclelinux 7: done + +build/test_oraclelinux_8: build/test_oraclelinux_8_9.6 build/test_oraclelinux_8_10 build/test_oraclelinux_8_11 build/test_oraclelinux_8_12 build/test_oraclelinux_8_13 + @echo Oraclelinux 8: done + +# RHEL +build/test_rhel: build/test_rhel_7 build/test_rhel_8 + @echo Rhel: done + +build/test_rhel_7: build/test_rhel_7_9.5 build/test_rhel_7_9.6 build/test_rhel_7_10 build/test_rhel_7_11 build/test_rhel_7_12 build/test_rhel_7_13 + @echo Rhel 7: done + +build/test_rhel_8: build/test_rhel_8_9.5 build/test_rhel_8_9.6 build/test_rhel_8_10 build/test_rhel_8_11 build/test_rhel_8_12 build/test_rhel_8_13 + @echo Rhel 8: done + +define test_rpm + docker rm -f $1_$2_probackup_$(PKG_NAME_SUFFIX)$(PBK_VERSION) >> /dev/null 2>&1 ; \ + docker run \ + -v $(WORKDIR)/packaging/test:/app/in \ + -v $(BUILDDIR)/data/www:/app/www \ + -e "DISTRIB=$1" -e "DISTRIB_VERSION=$2" -e "CODENAME=$3" -e "PG_VERSION=$4" -e "PG_FULL_VERSION=$5" \ + -e "PKG_HASH=$(PBK_HASH)" -e "PKG_URL=$(PBK_GIT_REPO)" -e "PKG_RELEASE=$(PBK_RELEASE)" -e "PKG_NAME=pg_probackup-$(PKG_NAME_SUFFIX)$4" \ + -e "PKG_VERSION=$(PBK_VERSION)" -e "PBK_EDITION=$(PBK_EDITION)" -e "PBK_EDITION_FULL=$(PBK_EDITION_FULL)" \ + --name $1_$2_probackup_$(PKG_NAME_SUFFIX)$(PBK_VERSION)_pg_$5 \ + --rm pgpro/$1:$2 /app/in/scripts/rpm$(SCRIPT_SUFFIX).sh +endef + +include packaging/test/Makefile.centos +include packaging/test/Makefile.rhel +include packaging/test/Makefile.oraclelinux + +# Alt Linux +build/test_alt: build/test_alt_9 + @echo Alt Linux: done + +build/test_alt_9: build/test_alt_9_9.6 build/test_alt_9_10 build/test_alt_9_11 build/test_alt_9_12 build/test_alt_9_13 + @echo Alt Linux 9: done + +define test_alt + docker rm -f $1_$2_probackup_$(PKG_NAME_SUFFIX)$(PBK_VERSION) >> /dev/null 2>&1 ; \ + docker run \ + -v $(WORKDIR)/packaging/test:/app/in \ + -v $(BUILDDIR)/data/www:/app/www \ + -e "DISTRIB=$1" -e "DISTRIB_VERSION=$2" -e "CODENAME=$3" -e "PG_VERSION=$4" -e "PG_FULL_VERSION=$5" \ + -e "PKG_HASH=$(PBK_HASH)" -e "PKG_URL=$(PBK_GIT_REPO)" -e "PKG_RELEASE=$(PBK_RELEASE)" -e "PKG_NAME=pg_probackup-$(PKG_NAME_SUFFIX)$4" \ + -e "PKG_VERSION=$(PBK_VERSION)" -e "PBK_EDITION=$(PBK_EDITION)" -e "PBK_EDITION_FULL=$(PBK_EDITION_FULL)" \ + --name $1_$2_probackup_$(PKG_NAME_SUFFIX)$(PBK_VERSION)_pg_$5 \ + --rm pgpro/$1:$2 /app/in/scripts/alt$(SCRIPT_SUFFIX).sh +endef + +include packaging/test/Makefile.alt + +# SUSE Linux +build/test_suse: build/test_suse_15.1 build/test_suse_15.2 + @echo Suse: done + +build/test_suse_15.1: build/test_suse_15.1_9.6 build/test_suse_15.1_10 build/test_suse_15.1_11 build/test_suse_15.1_12 build/test_suse_15.1_13 + @echo Rhel 15.1: done + +build/test_suse_15.2: build/test_suse_15.2_9.6 build/test_suse_15.2_10 build/test_suse_15.2_11 build/test_suse_15.2_12 build/test_suse_15.2_13 + @echo Rhel 15.1: done + +define test_suse + docker rm -f $1_$2_probackup_$(PKG_NAME_SUFFIX)$(PBK_VERSION) >> /dev/null 2>&1 ; \ + docker run \ + -v $(WORKDIR)/packaging/test:/app/in \ + -v $(BUILDDIR)/data/www:/app/www \ + -e "DISTRIB=$1" -e "DISTRIB_VERSION=$2" -e "CODENAME=$3" -e "PG_VERSION=$4" -e "PG_FULL_VERSION=$5" \ + -e "PKG_HASH=$(PBK_HASH)" -e "PKG_URL=$(PBK_GIT_REPO)" -e "PKG_RELEASE=$(PBK_RELEASE)" -e "PKG_NAME=pg_probackup-$(PKG_NAME_SUFFIX)$4" \ + -e "PKG_VERSION=$(PBK_VERSION)" -e "PBK_EDITION=$(PBK_EDITION)" -e "PBK_EDITION_FULL=$(PBK_EDITION_FULL)" \ + --name $1_$2_probackup_$(PKG_NAME_SUFFIX)$(PBK_VERSION)_pg_$5 \ + --rm pgpro/$1:$2 /app/in/scripts/suse$(SCRIPT_SUFFIX).sh +endef + +include packaging/test/Makefile.suse diff --git a/packaging/Readme.md b/packaging/Readme.md new file mode 100644 index 000000000..c6cbf16b5 --- /dev/null +++ b/packaging/Readme.md @@ -0,0 +1,21 @@ +Example: +``` +export PBK_VERSION=2.4.17 +export PBK_HASH=57f871accce2604 +export PBK_RELEASE=1 +export PBK_EDITION=std|ent +make pkg +``` + +To build binaries for PostgresPro Standart or Enterprise, a pgpro.tar.bz2 with latest git tree must be preset in `packaging/tarballs` directory: +``` +cd packaging/tarballs +git clone pgpro_repo pgpro +tar -cjSf pgpro.tar.bz2 pgpro +``` + +To build repo the gpg keys for package signing must be present ... +Repo must be build using 1 thread (due to debian bullshit): +``` +make repo -j1 +``` diff --git a/packaging/pkg/Makefile.alt b/packaging/pkg/Makefile.alt new file mode 100644 index 000000000..e3fbae26e --- /dev/null +++ b/packaging/pkg/Makefile.alt @@ -0,0 +1,74 @@ +# ALT 7 +build/alt_7_9.5: + $(call build_alt,alt,7,,9.5,9.5.25) + touch build/alt_7_9.5 + +build/alt_7_9.6: + $(call build_alt,alt,7,,9.6,9.6.21) + touch build/alt_7_9.6 + +build/alt_7_10: + $(call build_alt,alt,7,,10,10.17) + touch build/alt_7_10 + +build/alt_7_11: + $(call build_alt,alt,7,,11,11.11) + touch build/alt_7_11 + +build/alt_7_12: + $(call build_alt,alt,7,,12,12.6) + touch build/alt_7_12 + +build/alt_7_13: + $(call build_alt,alt,7,,13,13.2) + touch build/alt_7_13 + +# ALT 8 +build/alt_8_9.5: + $(call build_alt,alt,8,,9.5,9.5.25) + touch build/alt_8_9.5 + +build/alt_8_9.6: + $(call build_alt,alt,8,,9.6,9.6.21) + touch build/alt_8_9.6 + +build/alt_8_10: + $(call build_alt,alt,8,,10,10.17) + touch build/alt_8_10 + +build/alt_8_11: + $(call build_alt,alt,8,,11,11.11) + touch build/alt_8_11 + +build/alt_8_12: + $(call build_alt,alt,8,,12,12.6) + touch build/alt_8_12 + +build/alt_8_13: + $(call build_alt,alt,8,,13,13.2) + touch build/alt_8_13 + +# ALT 9 +build/alt_9_9.5: + $(call build_alt,alt,9,,9.5,9.5.25) + touch build/alt_9_9.5 + +build/alt_9_9.6: + $(call build_alt,alt,9,,9.6,9.6.21) + touch build/alt_9_9.6 + +build/alt_9_10: + $(call build_alt,alt,9,,10,10.17) + touch build/alt_9_10 + +build/alt_9_11: + $(call build_alt,alt,9,,11,11.11) + touch build/alt_9_11 + +build/alt_9_12: + $(call build_alt,alt,9,,12,12.6) + touch build/alt_9_12 + +build/alt_9_13: + $(call build_alt,alt,9,,13,13.2) + touch build/alt_9_13 diff --git a/packaging/pkg/Makefile.centos b/packaging/pkg/Makefile.centos new file mode 100644 index 000000000..9353b2cde --- /dev/null +++ b/packaging/pkg/Makefile.centos @@ -0,0 +1,49 @@ +# CENTOS 7 +build/centos_7_9.5: + $(call build_rpm,centos,7,,9.5,9.5.25) + touch build/centos_7_9.5 + +build/centos_7_9.6: + $(call build_rpm,centos,7,,9.6,9.6.21) + touch build/centos_7_9.6 + +build/centos_7_10: + $(call build_rpm,centos,7,,10,10.16) + touch build/centos_7_10 + +build/centos_7_11: + $(call build_rpm,centos,7,,11,11.11) + touch build/centos_7_11 + +build/centos_7_12: + $(call build_rpm,centos,7,,12,12.6) + touch build/centos_7_12 + +build/centos_7_13: + $(call build_rpm,centos,7,,13,13.2) + touch build/centos_7_13 + +# CENTOS 8 +build/centos_8_9.5: + $(call build_rpm,centos,8,,9.5,9.5.25) + touch build/centos_8_9.5 + +build/centos_8_9.6: + $(call build_rpm,centos,8,,9.6,9.6.21) + touch build/centos_8_9.6 + +build/centos_8_10: + $(call build_rpm,centos,8,,10,10.16) + touch build/centos_8_10 + +build/centos_8_11: + $(call build_rpm,centos,8,,11,11.11) + touch build/centos_8_11 + +build/centos_8_12: + $(call build_rpm,centos,8,,12,12.6) + touch build/centos_8_12 + +build/centos_8_13: + $(call build_rpm,centos,8,,13,13.2) + touch build/centos_8_13 diff --git a/packaging/pkg/Makefile.debian b/packaging/pkg/Makefile.debian new file mode 100644 index 000000000..9625a14e9 --- /dev/null +++ b/packaging/pkg/Makefile.debian @@ -0,0 +1,99 @@ +# DEBIAN 8 +build/debian_8_9.5: + $(call build_deb,debian,8,jessie,9.5,9.5.25) + touch build/debian_8_9.5 + +build/debian_8_9.6: + $(call build_deb,debian,8,jessie,9.6,9.6.21) + touch build/debian_8_9.6 + +build/debian_8_10: + $(call build_deb,debian,8,jessie,10,10.16) + touch build/debian_8_10 + +build/debian_8_11: + $(call build_deb,debian,8,jessie,11,11.11) + touch build/debian_8_11 + +build/debian_8_12: + $(call build_deb,debian,8,jessie,12,12.6) + touch build/debian_8_12 + +build/debian_8_13: + $(call build_deb,debian,8,jessie,13,13.2) + touch build/debian_8_13 + +# DEBIAN 9 +build/debian_9_9.5: + $(call build_deb,debian,9,stretch,9.5,9.5.25) + touch build/debian_9_9.5 + +build/debian_9_9.6: + $(call build_deb,debian,9,stretch,9.6,9.6.21) + touch build/debian_9_9.6 + +build/debian_9_10: + $(call build_deb,debian,9,stretch,10,10.16) + touch build/debian_9_10 + +build/debian_9_11: + $(call build_deb,debian,9,stretch,11,11.11) + touch build/debian_9_11 + +build/debian_9_12: + $(call build_deb,debian,9,stretch,12,12.6) + touch build/debian_9_12 + +build/debian_9_13: + $(call build_deb,debian,9,stretch,13,13.2) + touch build/debian_9_13 + +# DEBIAN 10 +build/debian_10_9.5: + $(call build_deb,debian,10,buster,9.5,9.5.25) + touch build/debian_10_9.5 + +build/debian_10_9.6: + $(call build_deb,debian,10,buster,9.6,9.6.21) + touch build/debian_10_9.6 + +build/debian_10_10: + $(call build_deb,debian,10,buster,10,10.16) + touch build/debian_10_10 + +build/debian_10_11: + $(call build_deb,debian,10,buster,11,11.11) + touch build/debian_10_11 + +build/debian_10_12: + $(call build_deb,debian,10,buster,12,12.6) + touch build/debian_10_12 + +build/debian_10_13: + $(call build_deb,debian,10,buster,13,13.2) + touch build/debian_10_13 + +# DEBIAN 11 +build/debian_11_9.5: + $(call build_deb,debian,11,bullseye,9.5,9.5.25) + touch build/debian_11_9.5 + +build/debian_11_9.6: + $(call build_deb,debian,11,bullseye,9.6,9.6.21) + touch build/debian_11_9.6 + +build/debian_11_10: + $(call build_deb,debian,11,bullseye,10,10.16) + touch build/debian_11_10 + +build/debian_11_11: + $(call build_deb,debian,11,bullseye,11,11.11) + touch build/debian_11_11 + +build/debian_11_12: + $(call build_deb,debian,11,bullseye,12,12.6) + touch build/debian_11_12 + +build/debian_11_13: + $(call build_deb,debian,11,bullseye,13,13.2) + touch build/debian_11_13 diff --git a/packaging/pkg/Makefile.oraclelinux b/packaging/pkg/Makefile.oraclelinux new file mode 100644 index 000000000..f4eada23f --- /dev/null +++ b/packaging/pkg/Makefile.oraclelinux @@ -0,0 +1,74 @@ +# ORACLE LINUX 6 +build/oraclelinux_6_9.5: + $(call build_rpm,oraclelinux,6,,9.5,9.5.25) + touch build/oraclelinux_6_9.5 + +build/oraclelinux_6_9.6: + $(call build_rpm,oraclelinux,6,,9.6,9.6.21) + touch build/oraclelinux_6_9.6 + +build/oraclelinux_6_10: + $(call build_rpm,oraclelinux,6,,10,10.16) + touch build/oraclelinux_6_10 + +build/oraclelinux_6_11: + $(call build_rpm,oraclelinux,6,,11,11.11) + touch build/oraclelinux_6_11 + +build/oraclelinux_6_12: + $(call build_rpm,oraclelinux,6,,12,12.6) + touch build/oraclelinux_6_12 + +build/oraclelinux_6_13: + $(call build_rpm,oraclelinux,6,,13,13.2) + touch build/oraclelinux_6_13 + +# ORACLE LINUX 7 +build/oraclelinux_7_9.5: + $(call build_rpm,oraclelinux,7,,9.5,9.5.25) + touch build/oraclelinux_7_9.5 + +build/oraclelinux_7_9.6: + $(call build_rpm,oraclelinux,7,,9.6,9.6.21) + touch build/oraclelinux_7_9.6 + +build/oraclelinux_7_10: + $(call build_rpm,oraclelinux,7,,10,10.16) + touch build/oraclelinux_7_10 + +build/oraclelinux_7_11: + $(call build_rpm,oraclelinux,7,,11,11.11) + touch build/oraclelinux_7_11 + +build/oraclelinux_7_12: + $(call build_rpm,oraclelinux,7,,12,12.6) + touch build/oraclelinux_7_12 + +build/oraclelinux_7_13: + $(call build_rpm,oraclelinux,7,,13,13.2) + touch build/oraclelinux_7_13 + +# ORACLE LINUX 8 +build/oraclelinux_8_9.5: + $(call build_rpm,oraclelinux,8,,9.5,9.5.25) + touch build/oraclelinux_8_9.5 + +build/oraclelinux_8_9.6: + $(call build_rpm,oraclelinux,8,,9.6,9.6.21) + touch build/oraclelinux_8_9.6 + +build/oraclelinux_8_10: + $(call build_rpm,oraclelinux,8,,10,10.16) + touch build/oraclelinux_8_10 + +build/oraclelinux_8_11: + $(call build_rpm,oraclelinux,8,,11,11.11) + touch build/oraclelinux_8_11 + +build/oraclelinux_8_12: + $(call build_rpm,oraclelinux,8,,12,12.6) + touch build/oraclelinux_8_12 + +build/oraclelinux_8_13: + $(call build_rpm,oraclelinux,8,,13,13.2) + touch build/oraclelinux_8_13 diff --git a/packaging/pkg/Makefile.rhel b/packaging/pkg/Makefile.rhel new file mode 100644 index 000000000..f266966cf --- /dev/null +++ b/packaging/pkg/Makefile.rhel @@ -0,0 +1,49 @@ +# RHEL 7 +build/rhel_7_9.5: + $(call build_rpm,rhel,7,7Server,9.5,9.5.25) + touch build/rhel_7_9.5 + +build/rhel_7_9.6: + $(call build_rpm,rhel,7,7Server,9.6,9.6.21) + touch build/rhel_7_9.6 + +build/rhel_7_10: + $(call build_rpm,rhel,7,7Server,10,10.16) + touch build/rhel_7_10 + +build/rhel_7_11: + $(call build_rpm,rhel,7,7Server,11,11.11) + touch build/rhel_7_11 + +build/rhel_7_12: + $(call build_rpm,rhel,7,7Server,12,12.6) + touch build/rhel_7_12 + +build/rhel_7_13: + $(call build_rpm,rhel,7,7Server,13,13.2) + touch build/rhel_7_13 + +# RHEL 8 +build/rhel_8_9.5: + $(call build_rpm,rhel,8,8Server,9.5,9.5.25) + touch build/rhel_8_9.5 + +build/rhel_8_9.6: + $(call build_rpm,rhel,8,8Server,9.6,9.6.21) + touch build/rhel_8_9.6 + +build/rhel_8_10: + $(call build_rpm,rhel,8,8Server,10,10.16) + touch build/rhel_8_10 + +build/rhel_8_11: + $(call build_rpm,rhel,8,8Server,11,11.11) + touch build/rhel_8_11 + +build/rhel_8_12: + $(call build_rpm,rhel,8,8Server,12,12.6) + touch build/rhel_8_12 + +build/rhel_8_13: + $(call build_rpm,rhel,8,8Server,13,13.2) + touch build/rhel_8_13 diff --git a/packaging/pkg/Makefile.suse b/packaging/pkg/Makefile.suse new file mode 100644 index 000000000..a9f1eaa36 --- /dev/null +++ b/packaging/pkg/Makefile.suse @@ -0,0 +1,49 @@ +# Suse 15.1 +build/suse_15.1_9.5: + $(call build_suse,suse,15.1,,9.5,9.5.25) + touch build/suse_15.1_9.5 + +build/suse_15.1_9.6: + $(call build_suse,suse,15.1,,9.6,9.6.21) + touch build/suse_15.1_9.6 + +build/suse_15.1_10: + $(call build_suse,suse,15.1,,10,10.16) + touch build/suse_15.1_10 + +build/suse_15.1_11: + $(call build_suse,suse,15.1,,11,11.11) + touch build/suse_15.1_11 + +build/suse_15.1_12: + $(call build_suse,suse,15.1,,12,12.6) + touch build/suse_15.1_12 + +build/suse_15.1_13: + $(call build_suse,suse,15.1,,13,13.2) + touch build/suse_15.1_13 + +# Suse 15.2 +build/suse_15.2_9.5: + $(call build_suse,suse,15.2,,9.5,9.5.25) + touch build/suse_15.2_9.5 + +build/suse_15.2_9.6: + $(call build_suse,suse,15.2,,9.6,9.6.21) + touch build/suse_15.2_9.6 + +build/suse_15.2_10: + $(call build_suse,suse,15.2,,10,10.16) + touch build/suse_15.2_10 + +build/suse_15.2_11: + $(call build_suse,suse,15.2,,11,11.11) + touch build/suse_15.2_11 + +build/suse_15.2_12: + $(call build_suse,suse,15.2,,12,12.6) + touch build/suse_15.2_12 + +build/suse_15.2_13: + $(call build_suse,suse,15.2,,13,13.2) + touch build/suse_15.2_13 diff --git a/packaging/pkg/Makefile.ubuntu b/packaging/pkg/Makefile.ubuntu new file mode 100644 index 000000000..3f76de516 --- /dev/null +++ b/packaging/pkg/Makefile.ubuntu @@ -0,0 +1,99 @@ +# UBUNTU 20.04 +build/ubuntu_20.04_9.5: + $(call build_deb,ubuntu,20.04,focal,9.5,9.5.25) + touch build/ubuntu_20.04_9.5 + +build/ubuntu_20.04_9.6: + $(call build_deb,ubuntu,20.04,focal,9.6,9.6.21) + touch build/ubuntu_20.04_9.6 + +build/ubuntu_20.04_10: + $(call build_deb,ubuntu,20.04,focal,10,10.16) + touch build/ubuntu_20.04_10 + +build/ubuntu_20.04_11: + $(call build_deb,ubuntu,20.04,focal,11,11.11) + touch build/ubuntu_20.04_11 + +build/ubuntu_20.04_12: + $(call build_deb,ubuntu,20.04,focal,12,12.6) + touch build/ubuntu_20.04_12 + +build/ubuntu_20.04_13: + $(call build_deb,ubuntu,20.04,focal,13,13.2) + touch build/ubuntu_20.04_13 + +# UBUNTU 18.04 +build/ubuntu_18.04_9.5: + $(call build_deb,ubuntu,18.04,bionic,9.5,9.5.25) + touch build/ubuntu_18.04_9.5 + +build/ubuntu_18.04_9.6: + $(call build_deb,ubuntu,18.04,bionic,9.6,9.6.21) + touch build/ubuntu_18.04_9.6 + +build/ubuntu_18.04_10: + $(call build_deb,ubuntu,18.04,bionic,10,10.16) + touch build/ubuntu_18.04_10 + +build/ubuntu_18.04_11: + $(call build_deb,ubuntu,18.04,bionic,11,11.11) + touch build/ubuntu_18.04_11 + +build/ubuntu_18.04_12: + $(call build_deb,ubuntu,18.04,bionic,12,12.6) + touch build/ubuntu_18.04_12 + +build/ubuntu_18.04_13: + $(call build_deb,ubuntu,18.04,bionic,13,13.2) + touch build/ubuntu_18.04_13 + +# UBUNTU 16.04 +build/ubuntu_16.04_9.5: + $(call build_deb,ubuntu,16.04,xenial,9.5,9.5.25) + touch build/ubuntu_16.04_9.5 + +build/ubuntu_16.04_9.6: + $(call build_deb,ubuntu,16.04,xenial,9.6,9.6.21) + touch build/ubuntu_16.04_9.6 + +build/ubuntu_16.04_10: + $(call build_deb,ubuntu,16.04,xenial,10,10.16) + touch build/ubuntu_16.04_10 + +build/ubuntu_16.04_11: + $(call build_deb,ubuntu,16.04,xenial,11,11.11) + touch build/ubuntu_16.04_11 + +build/ubuntu_16.04_12: + $(call build_deb,ubuntu,16.04,xenial,12,12.6) + touch build/ubuntu_16.04_12 + +build/ubuntu_16.04_13: + $(call build_deb,ubuntu,16.04,xenial,13,13.2) + touch build/ubuntu_16.04_13 + +# UBUNTU 14.04 +build/ubuntu_14.04_9.5: + $(call build_deb,ubuntu,14.04,trusty,9.5,9.5.25) + touch build/ubuntu_14.04_9.5 + +build/ubuntu_14.04_9.6: + $(call build_deb,ubuntu,14.04,trusty,9.6,9.6.21) + touch build/ubuntu_14.04_9.6 + +build/ubuntu_14.04_10: + $(call build_deb,ubuntu,14.04,trusty,10,10.16) + touch build/ubuntu_14.04_10 + +build/ubuntu_14.04_11: + $(call build_deb,ubuntu,14.04,trusty,11,11.11) + touch build/ubuntu_14.04_11 + +build/ubuntu_14.04_12: + $(call build_deb,ubuntu,14.04,trusty,12,12.6) + touch build/ubuntu_14.04_12 + +build/ubuntu_14.04_13: + $(call build_deb,ubuntu,14.04,trusty,13,13.2) + touch build/ubuntu_14.04_13 diff --git a/packaging/pkg/scripts/alt.sh b/packaging/pkg/scripts/alt.sh new file mode 100755 index 000000000..ae3c713fa --- /dev/null +++ b/packaging/pkg/scripts/alt.sh @@ -0,0 +1,123 @@ +#!/usr/bin/env bash + +# Copyright Notice: +# © (C) Postgres Professional 2015-2016 http://www.postgrespro.ru/ +# Distributed under Apache License 2.0 +# Распространяется по лицензии Apache 2.0 + +set -xe +set -o pipefail + +# THere is no std/ent packages for PG 9.5 +if [[ ${PG_VERSION} == '9.5' ]] && [[ ${PBK_EDITION} != '' ]] ; then + exit 0 +fi + +# fix https://github.com/moby/moby/issues/23137 +ulimit -n 1024 +apt-get update -y + +mkdir /root/build +cd /root/build + +# Copy rpmbuild +cp -rv /app/in/specs/rpm/rpmbuild /root/ + +# download pbk +git clone $PKG_URL pg_probackup-${PKG_VERSION} +cd pg_probackup-${PKG_VERSION} +git checkout ${PKG_HASH} +cd .. + +# tarball it +if [[ ${PBK_EDITION} == '' ]] ; then + tar -cjf pg_probackup-${PKG_VERSION}.tar.bz2 pg_probackup-${PKG_VERSION} + mv pg_probackup-${PKG_VERSION}.tar.bz2 /root/rpmbuild/SOURCES + rm -rf pg_probackup-${PKG_VERSION} +else + mv pg_probackup-${PKG_VERSION} /root/rpmbuild/SOURCES +fi + + +if [[ ${PBK_EDITION} == '' ]] ; then + # Download PostgreSQL source + wget -q http://ftp.postgresql.org/pub/source/v${PG_FULL_VERSION}/postgresql-${PG_FULL_VERSION}.tar.bz2 -O postgresql-${PG_VERSION}.tar.bz2 + mv postgresql-${PG_VERSION}.tar.bz2 /root/rpmbuild/SOURCES/ + +else + tar -xf /app/in/tarballs/pgpro.tar.bz2 -C /root/rpmbuild/SOURCES/ + cd /root/rpmbuild/SOURCES/pgpro + + PGPRO_TOC=$(echo ${PG_FULL_VERSION} | sed 's|\.|_|g') + if [[ ${PBK_EDITION} == 'std' ]] ; then + git checkout "PGPRO${PGPRO_TOC}_1" + else + git checkout "PGPROEE${PGPRO_TOC}_1" + fi + rm -rf .git + + cd /root/rpmbuild/SOURCES/ + mv pgpro postgrespro-${PBK_EDITION}-${PG_FULL_VERSION} + chown -R root:root postgrespro-${PBK_EDITION}-${PG_FULL_VERSION} +fi + + +#cd /root/rpmbuild/SOURCES +#sed -i "s/@PG_VERSION@/${PKG_VERSION}/" pg_probackup.repo + +# build postgresql +echo '%_allow_root_build yes' > /root/.rpmmacros +echo '%_topdir %{getenv:HOME}/rpmbuild' >> /root/.rpmmacros + +cd /root/rpmbuild/SPECS +if [[ ${PBK_EDITION} == '' ]] ; then + sed -i "s/@PKG_VERSION@/${PKG_VERSION}/" pg_probackup.alt.spec + sed -i "s/@PKG_RELEASE@/${PKG_RELEASE}/" pg_probackup.alt.spec + sed -i "s/@PKG_HASH@/${PKG_HASH}/" pg_probackup.alt.spec + sed -i "s/@PG_VERSION@/${PG_VERSION}/" pg_probackup.alt.spec + sed -i "s/@PG_FULL_VERSION@/${PG_FULL_VERSION}/" pg_probackup.alt.spec +else + sed -i "s/@EDITION@/${PBK_EDITION}/" pg_probackup.alt.forks.spec + sed -i "s/@EDITION_FULL@/${PBK_EDITION_FULL}/" pg_probackup.alt.forks.spec + sed -i "s/@PKG_VERSION@/${PKG_VERSION}/" pg_probackup.alt.forks.spec + sed -i "s/@PKG_RELEASE@/${PKG_RELEASE}/" pg_probackup.alt.forks.spec + sed -i "s/@PKG_HASH@/${PKG_HASH}/" pg_probackup.alt.forks.spec + sed -i "s/@PG_VERSION@/${PG_VERSION}/" pg_probackup.alt.forks.spec + sed -i "s/@PG_FULL_VERSION@/${PG_FULL_VERSION}/" pg_probackup.alt.forks.spec + + if [ ${PG_VERSION} != '9.6' ]; then + sed -i "s|@PREFIX@|/opt/pgpro/${EDITION}-${PG_VERSION}|g" pg_probackup.alt.forks.spec + fi +fi + +# ALT Linux suck as detecting dependecies, so the manual hint is required +if [ ${DISTRIB_VERSION} == '7' ]; then + apt-get install libpq5.10 + +elif [ ${DISTRIB_VERSION} == '8' ]; then + apt-get install libpq5.12 + +else + apt-get install libpq5 +fi + +# install dependencies +#stolen from postgrespro +apt-get install -y flex libldap-devel libpam-devel libreadline-devel libssl-devel + +if [[ ${PBK_EDITION} == '' ]] ; then + + # build pg_probackup + rpmbuild -bs pg_probackup.alt.spec + rpmbuild -ba pg_probackup.alt.spec #2>&1 | tee -ai /app/out/build.log + + # write artefacts to out directory + rm -rf /app/out/* + cp -arv /root/rpmbuild/{RPMS,SRPMS} /app/out +else + rpmbuild -ba pg_probackup.alt.forks.spec #2>&1 | tee -ai /app/out/build.log + # write artefacts to out directory + rm -rf /app/out/* + # cp -arv /root/rpmbuild/{RPMS,SRPMS} /app/out + cp -arv /root/rpmbuild/RPMS /app/out +fi diff --git a/packaging/pkg/scripts/deb.sh b/packaging/pkg/scripts/deb.sh new file mode 100755 index 000000000..2fe2018b6 --- /dev/null +++ b/packaging/pkg/scripts/deb.sh @@ -0,0 +1,148 @@ +#!/usr/bin/env bash + +# Copyright Notice: +# © (C) Postgres Professional 2015-2016 http://www.postgrespro.ru/ +# Distributed under Apache License 2.0 +# Распространяется по лицензии Apache 2.0 + +set -xe +set -o pipefail + +# fix https://github.com/moby/moby/issues/23137 +ulimit -n 1024 + +# THere is no std/ent packages for PG 9.5 +if [[ ${PG_VERSION} == '9.5' ]] && [[ ${PBK_EDITION} != '' ]] ; then + exit 0 +fi + +# PACKAGES NEEDED +apt-get update -y && apt-get install -y git wget bzip2 devscripts equivs + +# Prepare +export DEBIAN_FRONTEND=noninteractive +echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections + +if [ ${CODENAME} == 'jessie' ]; then +printf "deb http://archive.debian.org/debian/ jessie main\ndeb-src http://archive.debian.org/debian/ jessie main\ndeb http://security.debian.org jessie/updates main\ndeb-src http://security.debian.org jessie/updates main" > /etc/apt/sources.list +fi + +apt-get -qq update -y + +# download PKG_URL if PKG_HASH is omitted +mkdir /root/build +cd /root/build + +# clone pbk repo +git clone $PKG_URL ${PKG_NAME}_${PKG_VERSION} +cd ${PKG_NAME}_${PKG_VERSION} +git checkout ${PKG_HASH} +cd .. + +PG_TOC=$(echo ${PG_VERSION} | sed 's|\.||g') +# Download PostgreSQL source if building for vanilla +if [[ ${PBK_EDITION} == '' ]] ; then + wget -q http://ftp.postgresql.org/pub/source/v${PG_FULL_VERSION}/postgresql-${PG_FULL_VERSION}.tar.bz2 +fi + +cd /root/build/${PKG_NAME}_${PKG_VERSION} +cp -av /app/in/specs/deb/pg_probackup/debian ./ +if [[ ${PBK_EDITION} == '' ]] ; then + sed -i "s/@PKG_NAME@/${PKG_NAME}/g" debian/changelog + sed -i "s/@PKG_VERSION@/${PKG_VERSION}/g" debian/changelog + sed -i "s/@PKG_RELEASE@/${PKG_RELEASE}/g" debian/changelog + sed -i "s/@PKG_HASH@/${PKG_HASH}/g" debian/changelog + sed -i "s/@CODENAME@/${CODENAME}/g" debian/changelog + + sed -i "s/@PKG_NAME@/${PKG_NAME}/g" debian/control + sed -i "s/@PG_VERSION@/${PG_VERSION}/g" debian/control + + sed -i "s/@PG_VERSION@/${PG_VERSION}/" debian/pg_probackup.install + mv debian/pg_probackup.install debian/${PKG_NAME}.install + + sed -i "s/@PKG_NAME@/${PKG_NAME}/g" debian/rules + sed -i "s/@PG_TOC@/${PG_TOC}/g" debian/rules + sed -i "s/@PG_VERSION@/${PG_VERSION}/g" debian/rules + sed -i "s/@PG_FULL_VERSION@/${PG_FULL_VERSION}/g" debian/rules + sed -i "s|@PREFIX@|/stump|g" debian/rules +else + sed -i "s/@PKG_NAME@/pg-probackup-${PBK_EDITION}-${PG_VERSION}/g" debian/changelog + sed -i "s/@PKG_VERSION@/${PKG_VERSION}/g" debian/changelog + sed -i "s/@PKG_RELEASE@/${PKG_RELEASE}/g" debian/changelog + sed -i "s/@PKG_HASH@/${PKG_HASH}/g" debian/changelog + sed -i "s/@CODENAME@/${CODENAME}/g" debian/changelog + + sed -i "s/@PKG_NAME@/pg-probackup-${PBK_EDITION}-${PG_VERSION}/g" debian/control + sed -i "s/pg-probackup-@PG_VERSION@/pg-probackup-${PBK_EDITION}-${PG_VERSION}/g" debian/control + sed -i "s/@PG_VERSION@/${PG_VERSION}/g" debian/control + sed -i "s/PostgreSQL/PostgresPro ${PBK_EDITION_FULL}/g" debian/control + + sed -i "s/pg_probackup-@PG_VERSION@/pg_probackup-${PBK_EDITION}-${PG_VERSION}/" debian/pg_probackup.install + mv debian/pg_probackup.install debian/pg-probackup-${PBK_EDITION}-${PG_VERSION}.install + + sed -i "s/@PKG_NAME@/pg-probackup-${PBK_EDITION}-${PG_VERSION}/g" debian/rules + sed -i "s/@PG_TOC@/${PG_TOC}/g" debian/rules + sed -i "s/pg_probackup-@PG_VERSION@/pg_probackup-${PBK_EDITION}-${PG_VERSION}/g" debian/rules + sed -i "s/postgresql-@PG_FULL_VERSION@/postgrespro-${PBK_EDITION}-${PG_FULL_VERSION}/g" debian/rules + + if [ ${PG_VERSION} == '9.6' ]; then + sed -i "s|@PREFIX@|/stump|g" debian/rules + else + sed -i "s|@PREFIX@|/opt/pgpro/${PBK_EDITION}-${PG_VERSION}|g" debian/rules + fi +fi + +# Build dependencies +mk-build-deps --install --remove --tool 'apt-get --no-install-recommends --yes' debian/control +rm -rf ./*.deb + +# Pack source to orig.tar.gz +mkdir -p /root/build/dsc +if [[ ${PBK_EDITION} == '' ]] ; then + mv /root/build/postgresql-${PG_FULL_VERSION}.tar.bz2 \ + /root/build/dsc/${PKG_NAME}_${PKG_VERSION}.orig-postgresql${PG_TOC}.tar.bz2 + + cd /root/build/${PKG_NAME}_${PKG_VERSION} + tar -xf /root/build/dsc/${PKG_NAME}_${PKG_VERSION}.orig-postgresql${PG_TOC}.tar.bz2 + cd /root/build + + tar -czf ${PKG_NAME}_${PKG_VERSION}.orig.tar.gz \ + ${PKG_NAME}_${PKG_VERSION} + + mv /root/build/${PKG_NAME}_${PKG_VERSION}.orig.tar.gz /root/build/dsc + + cd /root/build/${PKG_NAME}_${PKG_VERSION} + tar -xf /root/build/dsc/${PKG_NAME}_${PKG_VERSION}.orig-postgresql${PG_TOC}.tar.bz2 +else + tar -xf /app/in/tarballs/pgpro.tar.bz2 -C /root/build/dsc/ + cd /root/build/dsc/pgpro + + PGPRO_TOC=$(echo ${PG_FULL_VERSION} | sed 's|\.|_|g') + if [[ ${PBK_EDITION} == 'std' ]] ; then + git checkout "PGPRO${PGPRO_TOC}_1" + else + git checkout "PGPROEE${PGPRO_TOC}_1" + fi + + mv /root/build/dsc/pgpro /root/build/${PKG_NAME}_${PKG_VERSION}/postgrespro-${PBK_EDITION}-${PG_FULL_VERSION} +fi + +# BUILD: SOURCE PKG +if [[ ${PBK_EDITION} == '' ]] ; then + cd /root/build/dsc + dpkg-source -b /root/build/${PKG_NAME}_${PKG_VERSION} +fi + +# BUILD: DEB PKG +cd /root/build/${PKG_NAME}_${PKG_VERSION} +dpkg-buildpackage -b #&> /app/out/build.log + +# COPY ARTEFACTS +rm -rf /app/out/* +cd /root/build +cp -v *.deb /app/out +cp -v *.changes /app/out + +if [[ ${PBK_EDITION} == '' ]] ; then + cp -arv dsc /app/out +fi diff --git a/packaging/pkg/scripts/rpm.sh b/packaging/pkg/scripts/rpm.sh new file mode 100755 index 000000000..fc95bf7dd --- /dev/null +++ b/packaging/pkg/scripts/rpm.sh @@ -0,0 +1,148 @@ +#!/usr/bin/env bash + +# Copyright Notice: +# © (C) Postgres Professional 2015-2016 http://www.postgrespro.ru/ +# Distributed under Apache License 2.0 +# Распространяется по лицензии Apache 2.0 + + +#yum upgrade -y || echo "some packages in docker fail to install" +#if [ -f /etc/rosa-release ]; then +# # Avoids old yum bugs on rosa-6 +# yum upgrade -y || echo "some packages in docker fail to install" +#fi + +set -xe +set -o pipefail + +# fix https://github.com/moby/moby/issues/23137 +ulimit -n 1024 + +# THere is no std/ent packages for PG 9.5 +if [[ ${PG_VERSION} == '9.5' ]] && [[ ${PBK_EDITION} != '' ]] ; then + exit 0 +fi + +# PACKAGES NEEDED +yum install -y git wget bzip2 rpm-build + +mkdir /root/build +cd /root/build +rpm --rebuilddb && yum clean all + +# Copy rpmbuild +cp -rv /app/in/specs/rpm/rpmbuild /root/ + +# download pbk +git clone $PKG_URL pg_probackup-${PKG_VERSION} +cd pg_probackup-${PKG_VERSION} +git checkout ${PKG_HASH} + +# move it to source +cd /root/build +if [[ ${PBK_EDITION} == '' ]] ; then + tar -cjf pg_probackup-${PKG_VERSION}.tar.bz2 pg_probackup-${PKG_VERSION} + mv pg_probackup-${PKG_VERSION}.tar.bz2 /root/rpmbuild/SOURCES + rm -rf pg_probackup-${PKG_VERSION} +else + mv pg_probackup-${PKG_VERSION} /root/rpmbuild/SOURCES +fi + +if [[ ${PBK_EDITION} == '' ]] ; then + + # Download PostgreSQL source + wget -q http://ftp.postgresql.org/pub/source/v${PG_FULL_VERSION}/postgresql-${PG_FULL_VERSION}.tar.bz2 -O /root/rpmbuild/SOURCES/postgresql-${PG_VERSION}.tar.bz2 + + cd /root/rpmbuild/SOURCES/ + sed -i "s/@DISTRIB@/${DISTRIB}/" pg_probackup.repo + if [ $DISTRIB == 'centos' ] + then sed -i "s/@SHORT_CODENAME@/Centos/" pg_probackup.repo + elif [ $DISTRIB == 'rhel' ] + then sed -i "s/@SHORT_CODENAME@/RedHat/" pg_probackup.repo + elif [ $DISTRIB == 'oraclelinux' ] + then sed -i "s/@SHORT_CODENAME@/Oracle/" pg_probackup.repo + fi +else + tar -xf /app/in/tarballs/pgpro.tar.bz2 -C /root/rpmbuild/SOURCES/ + cd /root/rpmbuild/SOURCES/pgpro + + PGPRO_TOC=$(echo ${PG_FULL_VERSION} | sed 's|\.|_|g') + if [[ ${PBK_EDITION} == 'std' ]] ; then + git checkout "PGPRO${PGPRO_TOC}_1" + else + git checkout "PGPROEE${PGPRO_TOC}_1" + fi + rm -rf .git + + cd /root/rpmbuild/SOURCES/ + sed -i "s/@DISTRIB@/${DISTRIB}/" pg_probackup-forks.repo + if [ $DISTRIB == 'centos' ] + then sed -i "s/@SHORT_CODENAME@/Centos/" pg_probackup-forks.repo + elif [ $DISTRIB == 'rhel' ] + then sed -i "s/@SHORT_CODENAME@/RedHat/" pg_probackup-forks.repo + elif [ $DISTRIB == 'oraclelinux' ] + then sed -i "s/@SHORT_CODENAME@/Oracle/" pg_probackup-forks.repo + fi + + mv pgpro postgrespro-${PBK_EDITION}-${PG_FULL_VERSION} + chown -R root:root postgrespro-${PBK_EDITION}-${PG_FULL_VERSION} + +# tar -cjf postgrespro-${PBK_EDITION}-${PG_FULL_VERSION}.tar.bz2 postgrespro-${PBK_EDITION}-${PG_FULL_VERSION} +fi + +cd /root/rpmbuild/SPECS +if [[ ${PBK_EDITION} == '' ]] ; then + sed -i "s/@PKG_VERSION@/${PKG_VERSION}/" pg_probackup.spec + sed -i "s/@PKG_RELEASE@/${PKG_RELEASE}/" pg_probackup.spec + sed -i "s/@PKG_HASH@/${PKG_HASH}/" pg_probackup.spec + sed -i "s/@PG_VERSION@/${PG_VERSION}/" pg_probackup.spec + sed -i "s/@PG_FULL_VERSION@/${PG_FULL_VERSION}/" pg_probackup.spec + + sed -i "s/@PKG_VERSION@/${PKG_VERSION}/" pg_probackup-repo.spec + sed -i "s/@PKG_RELEASE@/${PKG_RELEASE}/" pg_probackup-repo.spec +else + sed -i "s/@EDITION@/${PBK_EDITION}/" pg_probackup-pgpro.spec + sed -i "s/@EDITION_FULL@/${PBK_EDITION_FULL}/" pg_probackup-pgpro.spec + sed -i "s/@PKG_VERSION@/${PKG_VERSION}/" pg_probackup-pgpro.spec + sed -i "s/@PKG_RELEASE@/${PKG_RELEASE}/" pg_probackup-pgpro.spec + sed -i "s/@PKG_HASH@/${PKG_HASH}/" pg_probackup-pgpro.spec + sed -i "s/@PG_VERSION@/${PG_VERSION}/" pg_probackup-pgpro.spec + sed -i "s/@PG_FULL_VERSION@/${PG_FULL_VERSION}/" pg_probackup-pgpro.spec + + if [ ${PG_VERSION} != '9.6' ]; then + sed -i "s|@PREFIX@|/opt/pgpro/${EDITION}-${PG_VERSION}|g" pg_probackup-pgpro.spec + fi + + sed -i "s/@PKG_VERSION@/${PKG_VERSION}/" pg_probackup-repo-forks.spec + sed -i "s/@PKG_RELEASE@/${PKG_RELEASE}/" pg_probackup-repo-forks.spec +fi + +if [[ ${PBK_EDITION} == '' ]] ; then + + # install dependencies + yum-builddep -y pg_probackup.spec + + # build pg_probackup + rpmbuild -bs pg_probackup.spec + rpmbuild -ba pg_probackup.spec + + # build repo files + rpmbuild -bs pg_probackup-repo.spec + rpmbuild -ba pg_probackup-repo.spec + + # write artefacts to out directory + rm -rf /app/out/* + cp -arv /root/rpmbuild/{RPMS,SRPMS} /app/out +else + # install dependencies + yum-builddep -y pg_probackup-pgpro.spec + # build pg_probackup + rpmbuild -ba pg_probackup-pgpro.spec + + # build repo files + rpmbuild -ba pg_probackup-repo-forks.spec + + # write artefacts to out directory + rm -rf /app/out/* + cp -arv /root/rpmbuild/RPMS /app/out +fi diff --git a/packaging/pkg/scripts/suse.sh b/packaging/pkg/scripts/suse.sh new file mode 100755 index 000000000..76b444b5b --- /dev/null +++ b/packaging/pkg/scripts/suse.sh @@ -0,0 +1,95 @@ +#!/usr/bin/env bash + +# Copyright Notice: +# © (C) Postgres Professional 2015-2016 http://www.postgrespro.ru/ +# Distributed under Apache License 2.0 +# Распространяется по лицензии Apache 2.0 + + +#yum upgrade -y || echo "some packages in docker fail to install" +#if [ -f /etc/rosa-release ]; then +# # Avoids old yum bugs on rosa-6 +# yum upgrade -y || echo "some packages in docker fail to install" +#fi + +set -xe +set -o pipefail + +# currenctly we do not build std|ent packages for Suse +if [[ ${PBK_EDITION} != '' ]] ; then + exit 0 +fi + +# fix https://github.com/moby/moby/issues/23137 +ulimit -n 1024 +zypper clean + +# PACKAGES NEEDED +zypper install -y git wget bzip2 rpm-build + +mkdir /root/build +cd /root/build + +# Copy rpmbuild +cp -rv /app/in/specs/rpm/rpmbuild /root/ + +# download pbk +git clone $PKG_URL pg_probackup-${PKG_VERSION} +cd pg_probackup-${PKG_VERSION} +git checkout ${PKG_HASH} +cd .. + +# tarball it +tar -cjf pg_probackup-${PKG_VERSION}.tar.bz2 pg_probackup-${PKG_VERSION} +mv pg_probackup-${PKG_VERSION}.tar.bz2 /root/rpmbuild/SOURCES +rm -rf pg_probackup-${PKG_VERSION} + +# Download PostgreSQL source +wget -q http://ftp.postgresql.org/pub/source/v${PG_FULL_VERSION}/postgresql-${PG_FULL_VERSION}.tar.bz2 -O /root/rpmbuild/SOURCES/postgresql-${PG_VERSION}.tar.bz2 + +rm -rf /usr/src/packages +ln -s /root/rpmbuild /usr/src/packages + +cd /root/rpmbuild/SOURCES +sed -i "s/@PG_VERSION@/${PKG_VERSION}/" pg_probackup.repo + + +# change to build dir +cd /root/rpmbuild/SOURCES +sed -i "s/@DISTRIB@/${DISTRIB}/" pg_probackup.repo +if [ $DISTRIB == 'centos' ] + then sed -i "s/@SHORT_CODENAME@/Centos/" pg_probackup.repo +elif [ $DISTRIB == 'rhel' ] + then sed -i "s/@SHORT_CODENAME@/RedHat/" pg_probackup.repo +elif [ $DISTRIB == 'oraclelinux' ] + then sed -i "s/@SHORT_CODENAME@/Oracle/" pg_probackup.repo +elif [ $DISTRIB == 'suse' ] + then sed -i "s/@SHORT_CODENAME@/SUSE/" pg_probackup.repo +fi + +cd /root/rpmbuild/SPECS +sed -i "s/@PKG_VERSION@/${PKG_VERSION}/" pg_probackup.spec +sed -i "s/@PKG_RELEASE@/${PKG_RELEASE}/" pg_probackup.spec +sed -i "s/@PKG_HASH@/${PKG_HASH}/" pg_probackup.spec +sed -i "s/@PG_VERSION@/${PG_VERSION}/" pg_probackup.spec +sed -i "s/@PG_FULL_VERSION@/${PG_FULL_VERSION}/" pg_probackup.spec + +sed -i "s/@PG_VERSION@/${PG_VERSION}/" pg_probackup-repo.spec +sed -i "s/@PKG_VERSION@/${PKG_VERSION}/" pg_probackup-repo.spec +sed -i "s/@PKG_RELEASE@/${PKG_RELEASE}/" pg_probackup-repo.spec + +# install dependencies +zypper -n install \ + $(rpmspec --parse pg_probackup.spec | grep BuildRequires | cut -d':' -f2 | xargs) + +# build pg_probackup +rpmbuild -bs pg_probackup.spec +rpmbuild -ba pg_probackup.spec #2>&1 | tee -ai /app/out/build.log + +# build repo files, TODO: move to separate repo +rpmbuild -ba pg_probackup-repo.spec + +# write artefacts to out directory +rm -rf /app/out/* + +cp -arv /root/rpmbuild/{RPMS,SRPMS} /app/out diff --git a/packaging/pkg/specs/deb/pg_probackup/debian/changelog b/packaging/pkg/specs/deb/pg_probackup/debian/changelog new file mode 100644 index 000000000..5b9160220 --- /dev/null +++ b/packaging/pkg/specs/deb/pg_probackup/debian/changelog @@ -0,0 +1,11 @@ +@PKG_NAME@ (@PKG_VERSION@-@PKG_RELEASE@.@PKG_HASH@.@CODENAME@) @CODENAME@; urgency=medium + + * @PKG_VERSION@ + + -- Grigory Smolkin Wed, 9 Feb 2018 10:22:08 +0300 + +@PKG_NAME@ (2.0.14-1.@CODENAME@) @CODENAME@; urgency=medium + + * Initial package + + -- Grigory Smolkin Fri, 29 Jan 2018 10:22:08 +0300 diff --git a/packaging/pkg/specs/deb/pg_probackup/debian/compat b/packaging/pkg/specs/deb/pg_probackup/debian/compat new file mode 100644 index 000000000..ec635144f --- /dev/null +++ b/packaging/pkg/specs/deb/pg_probackup/debian/compat @@ -0,0 +1 @@ +9 diff --git a/packaging/pkg/specs/deb/pg_probackup/debian/control b/packaging/pkg/specs/deb/pg_probackup/debian/control new file mode 100644 index 000000000..8f1d42007 --- /dev/null +++ b/packaging/pkg/specs/deb/pg_probackup/debian/control @@ -0,0 +1,29 @@ +Source: @PKG_NAME@ +Section: database +Priority: optional +Maintainer: PostgresPro DBA +Uploaders: Grigory Smolkin +Build-Depends: + debhelper (>= 9), + bison, + dpkg-dev, + flex, + gettext, + zlib1g-dev | libz-dev, + libpq5 +Standards-Version: 3.9.6 +Homepage: https://github.com/postgrespro/pg_probackup + +Package: @PKG_NAME@ +Architecture: any +Depends: ${misc:Depends}, ${shlibs:Depends} +Description: Backup tool for PostgreSQL. + . + This package provides pg_probackup binary for PostgreSQL @PG_VERSION@. + +Package: @PKG_NAME@-dbg +Depends: @PKG_NAME@ +Architecture: any +Description: Backup tool for PostgreSQL. + . + This package provides detached debugging symbols for pg_probackup diff --git a/packaging/pkg/specs/deb/pg_probackup/debian/pg_probackup.install b/packaging/pkg/specs/deb/pg_probackup/debian/pg_probackup.install new file mode 100644 index 000000000..ed904ca40 --- /dev/null +++ b/packaging/pkg/specs/deb/pg_probackup/debian/pg_probackup.install @@ -0,0 +1 @@ +pg_probackup-@PG_VERSION@ /usr/bin/ \ No newline at end of file diff --git a/packaging/pkg/specs/deb/pg_probackup/debian/rules b/packaging/pkg/specs/deb/pg_probackup/debian/rules new file mode 100644 index 000000000..309a9a1d4 --- /dev/null +++ b/packaging/pkg/specs/deb/pg_probackup/debian/rules @@ -0,0 +1,29 @@ +#!/usr/bin/make -f + +# Uncomment this to turn on verbose mode. +export DH_VERBOSE=1 + +%: + dh $@ + +override_dh_auto_clean: + # skip + +override_dh_auto_build: + cd postgresql-@PG_FULL_VERSION@ && ./configure --enable-debug --without-readline --prefix=@PREFIX@ &&\ + make MAKELEVEL=0 install DESTDIR=$(CURDIR)/debian/tmp && cd .. &&\ + make USE_PGXS=1 top_srcdir=$(CURDIR)/postgresql-@PG_FULL_VERSION@ PG_CONFIG=$(CURDIR)/debian/tmp/@PREFIX@/bin/pg_config &&\ + mv pg_probackup pg_probackup-@PG_VERSION@ + +override_dh_auto_test: + # skip + +override_dh_auto_install: + # skip + +override_dh_strip: + dh_strip --dbg-package=@PKG_NAME@-dbg + +override_dh_auto_clean: + # skip + #make clean top_srcdir=$(CURDIR)/pg@PG_TOC@-source PG_CONFIG=$(CURDIR)/debian/tmp/stump/bin/pg_config diff --git a/packaging/pkg/specs/deb/pg_probackup/debian/source/format b/packaging/pkg/specs/deb/pg_probackup/debian/source/format new file mode 100644 index 000000000..163aaf8d8 --- /dev/null +++ b/packaging/pkg/specs/deb/pg_probackup/debian/source/format @@ -0,0 +1 @@ +3.0 (quilt) diff --git a/packaging/pkg/specs/rpm/rpmbuild/SOURCES/GPG-KEY-PG_PROBACKUP b/packaging/pkg/specs/rpm/rpmbuild/SOURCES/GPG-KEY-PG_PROBACKUP new file mode 100644 index 000000000..c11d9c015 --- /dev/null +++ b/packaging/pkg/specs/rpm/rpmbuild/SOURCES/GPG-KEY-PG_PROBACKUP @@ -0,0 +1,52 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: GnuPG v2.0.22 (GNU/Linux) + +mQINBFpy9DABEADd44hR3o4i4DrUephrr7iHPHcRH0Zego3A36NdOf0ymP94H8Bi +U8C6YyKFbltShh18IC3QZJK04hLRQEs6sPKC2XHwlz+Tndi49Z45pfV54xEVKmBS +IZ5AM9y1FxwQAOzu6pZGu32DWDXZzhI7nLuY8rqAMMuzKeRcGm3sQ6ZcAwYOLT+e +ZAxkUL05MBGDaLc91HtKiurRlHuMySiVdkNu9ebTGV4zZv+ocBK8iC5rJjTJCv78 +eLkrRgjp7/MuLQ7mmiwfZx5lUIO9S87HDeH940mcYWRGUsdCbj0791wHY0PXlqhH +6lCLGur9/5yM88pGU79uahCblxsYdue6fdgIZR0hQBUxnLphI2SCshP89VDEwaP2 +dlC/qESJ3xyULkdJz67wlhOrIPC9T1d2pa5MUurOK0yTFH7j4JLWjBgU59h31ZEF +NMHde+Fwv+lL/yRht2Xz7HG5Rt8ogn4/rPBloXr1v83iN34aZnnqanyhSbE9xUhP +RNK3fBxXmX9IjFsBhRelPcv5NWNnxnnMkEfhoZvrAy+ykUGLP+J+Rj+d5v/8nAUc +taxqAXlUz1VabR0BVISBsRY+ket4O2dJ1WbZ8KXG6q/F9UMpS0v9aRdb1JyzrWCw +wT/l3q9x89i27SgDZgAfEFhvbMN6hUmFyVoMBgk8kqvi4b3lZZGCeuLX5wARAQAB +tCxQb3N0Z3JlU1FMIFByb2Zlc3Npb25hbCA8ZGJhQHBvc3RncmVzcHJvLnJ1PokC +OQQTAQIAIwUCWnL0MAIbAwcLCQgHAwIBBhUIAgkKCwQWAgMBAh4BAheAAAoJEKeJ +efZjbXF+zDUP/RfYxlq3erzP/cG6/LghZlJy6hGuUgyDFj2zUVAbpoFhqCAmaNLc ++bBYMCyNRhS8/oXushCSxUV8D7LRIRIRdtbNAnd4MNl6U4ORF6JcdPPNLROzwMik +3TmIVACMdjb9IRF5+8jVrIgDPI/FVtf5qp0Ot6OBtpD5oWQ7ubZ31RPR3pacdujK +jlbzL5Y6HsonhMbSJU/d0d9DylMvX4Gcxdw7M2Pfe3E6mjPJmcHiKuCKln2eLOsg +53HA/RWKy+uYDo+vdefUsQCIdnC5VghnXG8FTuvVqeqiSeU2XdyuzjndqxKZNrMw +YK1POK7R55R1aKJaOKEwnfd5oN02p77U+R/vb/mDcfZWbXI8JrHwPKVOQiEl0S+G +ePPW57EmX9yFuWAzcOPp9yCt/+roVry1ICifrFaLOhtT+/vle0j3+rbn31BMPsjf +QbREVetHfWB0N78k/hKC8SivDdrXsdqovcGgSAjFzPEdznvx9vKopwz2CQ6DK25Q +3M4j79Akcaa08k5Wphyx48PbhzSeE/d4xVzey7ge0BwYMdNGXKeyBjT6h9e+iySE +UTZ3/3c7O1D8p2EfPUMT/aI5fWlLBXlT5fDp2yX0HMTt/NUIXAiTHb5BDnZ+4ld3 +KXjHw4WzaOfHBfGDjJDtHPgdTEJTsQbH8//D+wwU3ueNS1ho4DpLqc+YuQINBFpy +9DABEADJMkgQ2m4g4LX7FNnmQbRgDcuhL8Y0VRGST+5x6xvb2em1boQHUaTt7/3z +DnaIRrZqrFP09O6xblSjEu9FZE+JuQGNyC4TH9fjvKnkRlqTF6X87nRVGByRmrdL +lPp9XPJY2Mc7c0PisncI/j7d9PmUHOSmaWeLG/WqMbzZA+s1IWjC0tqIN2k5ivTN +PfRm+9ebEHMUN+D7yZQMBlCmFexwy6h5pAioyj4tAOHqxfNDE33qezaeBn/E1BpW +NyegKwNtPUL0t2kXTO5tspKKCcny4HJ7K60gak0fWp42qVygwSPR54ztFM+6XjCh +0MmZ/mAdzLd6OJiP8RfMCfXbXpK4793+Cw0AK3Mu+mnJ26kz1KEZ9DKiAEhBhK3r +Z3/isUc8LcVYLHIduH9b/K50FjgR0T1Lm4r6Hpf6nTROlfiFSMXJU0HepAzMPHRq +EWqTJ49UgI7Llf+aBP7fGLqRPvWJpAJaQkMiUxfP5JYYCb+45d7I54iXQCD6ToK1 +bDnh+zZIrwyUIxPfFQh1xPYyFWRELJpeOFzm+espqiVFPXpBoimVlytwNrGdbxbY +SO0eEVlE41AjD8cgk+ibAvt/moT2+Mps/t083LR+J92kj+iX/D4NHVy4CjJTrhwO +rI3FrxtdU+NFXULyj0KslOKuyG5WuHLQvfL5P3JGuTkP4iJOTQARAQABiQIfBBgB +AgAJBQJacvQwAhsMAAoJEKeJefZjbXF+8JgQAJqlO1ftIsJvZ/+4ZVVOTPx5ZmYs +ABp4/2gaiLdhajN8ynbZqtCyjtQwSCLJFf2CcDL8XUooJzdQECkqdiI7ouYSFBzO +ui3jjCuFz5oHv88OtX2cIRxHqlZQmXEHvk0gH61xDV5CWBJmjxdRcsC7n1I8DSVg +Qmuq06S+xIX6rHf2CRxYKahBip71u7OIH4BRV44y26xf1a8an+8BkqF9+mYt7zqO +vyMCJ1UftXcuE5SxY54jnNAavF7Kq/2Yp7v3aYqFREngxtbWudyo7QW5EuToSvY2 +qY6tpInahWjuXxeARsFzp4fB0Eo/yH+iqG30zkQCuxLyxzbMMcNQP4if3yV6uO14 +LqapZLrMp6IMTfHDKmbbtDQ2RpRRut3K4khXRQ1LjGKziOU4ZCEazrXEijm2AlKw +7JS3POGvM+VAiaGNBqfdHpTwXXT7zkxJjfJC3Et/6fHy1xuCtUtMs41PjHS/HWi8 +w70T8XpKub+ewVElxq2D83rx07w3HuBtVUyqG0XgcACwqQA1vMLJaR3VoX1024ho +sf2PtZqQ7SCgt0hkZAT72j05nz4bIxUIcDkAGtd9FDPQ4Ixi6fRfTJpZ7lIEV5as +Zs9C0hrxmWgJwSGgQa2Waylvw47fMwfMn+gUNRqwanyOjVYfpSJafLc6Ol43bQN/ +jCKs4enncezhjcAh +=TVZj +-----END PGP PUBLIC KEY BLOCK----- \ No newline at end of file diff --git a/packaging/pkg/specs/rpm/rpmbuild/SOURCES/GPG-KEY-PG_PROBACKUP-FORKS b/packaging/pkg/specs/rpm/rpmbuild/SOURCES/GPG-KEY-PG_PROBACKUP-FORKS new file mode 100644 index 000000000..c11d9c015 --- /dev/null +++ b/packaging/pkg/specs/rpm/rpmbuild/SOURCES/GPG-KEY-PG_PROBACKUP-FORKS @@ -0,0 +1,52 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: GnuPG v2.0.22 (GNU/Linux) + +mQINBFpy9DABEADd44hR3o4i4DrUephrr7iHPHcRH0Zego3A36NdOf0ymP94H8Bi +U8C6YyKFbltShh18IC3QZJK04hLRQEs6sPKC2XHwlz+Tndi49Z45pfV54xEVKmBS +IZ5AM9y1FxwQAOzu6pZGu32DWDXZzhI7nLuY8rqAMMuzKeRcGm3sQ6ZcAwYOLT+e +ZAxkUL05MBGDaLc91HtKiurRlHuMySiVdkNu9ebTGV4zZv+ocBK8iC5rJjTJCv78 +eLkrRgjp7/MuLQ7mmiwfZx5lUIO9S87HDeH940mcYWRGUsdCbj0791wHY0PXlqhH +6lCLGur9/5yM88pGU79uahCblxsYdue6fdgIZR0hQBUxnLphI2SCshP89VDEwaP2 +dlC/qESJ3xyULkdJz67wlhOrIPC9T1d2pa5MUurOK0yTFH7j4JLWjBgU59h31ZEF +NMHde+Fwv+lL/yRht2Xz7HG5Rt8ogn4/rPBloXr1v83iN34aZnnqanyhSbE9xUhP +RNK3fBxXmX9IjFsBhRelPcv5NWNnxnnMkEfhoZvrAy+ykUGLP+J+Rj+d5v/8nAUc +taxqAXlUz1VabR0BVISBsRY+ket4O2dJ1WbZ8KXG6q/F9UMpS0v9aRdb1JyzrWCw +wT/l3q9x89i27SgDZgAfEFhvbMN6hUmFyVoMBgk8kqvi4b3lZZGCeuLX5wARAQAB +tCxQb3N0Z3JlU1FMIFByb2Zlc3Npb25hbCA8ZGJhQHBvc3RncmVzcHJvLnJ1PokC +OQQTAQIAIwUCWnL0MAIbAwcLCQgHAwIBBhUIAgkKCwQWAgMBAh4BAheAAAoJEKeJ +efZjbXF+zDUP/RfYxlq3erzP/cG6/LghZlJy6hGuUgyDFj2zUVAbpoFhqCAmaNLc ++bBYMCyNRhS8/oXushCSxUV8D7LRIRIRdtbNAnd4MNl6U4ORF6JcdPPNLROzwMik +3TmIVACMdjb9IRF5+8jVrIgDPI/FVtf5qp0Ot6OBtpD5oWQ7ubZ31RPR3pacdujK +jlbzL5Y6HsonhMbSJU/d0d9DylMvX4Gcxdw7M2Pfe3E6mjPJmcHiKuCKln2eLOsg +53HA/RWKy+uYDo+vdefUsQCIdnC5VghnXG8FTuvVqeqiSeU2XdyuzjndqxKZNrMw +YK1POK7R55R1aKJaOKEwnfd5oN02p77U+R/vb/mDcfZWbXI8JrHwPKVOQiEl0S+G +ePPW57EmX9yFuWAzcOPp9yCt/+roVry1ICifrFaLOhtT+/vle0j3+rbn31BMPsjf +QbREVetHfWB0N78k/hKC8SivDdrXsdqovcGgSAjFzPEdznvx9vKopwz2CQ6DK25Q +3M4j79Akcaa08k5Wphyx48PbhzSeE/d4xVzey7ge0BwYMdNGXKeyBjT6h9e+iySE +UTZ3/3c7O1D8p2EfPUMT/aI5fWlLBXlT5fDp2yX0HMTt/NUIXAiTHb5BDnZ+4ld3 +KXjHw4WzaOfHBfGDjJDtHPgdTEJTsQbH8//D+wwU3ueNS1ho4DpLqc+YuQINBFpy +9DABEADJMkgQ2m4g4LX7FNnmQbRgDcuhL8Y0VRGST+5x6xvb2em1boQHUaTt7/3z +DnaIRrZqrFP09O6xblSjEu9FZE+JuQGNyC4TH9fjvKnkRlqTF6X87nRVGByRmrdL +lPp9XPJY2Mc7c0PisncI/j7d9PmUHOSmaWeLG/WqMbzZA+s1IWjC0tqIN2k5ivTN +PfRm+9ebEHMUN+D7yZQMBlCmFexwy6h5pAioyj4tAOHqxfNDE33qezaeBn/E1BpW +NyegKwNtPUL0t2kXTO5tspKKCcny4HJ7K60gak0fWp42qVygwSPR54ztFM+6XjCh +0MmZ/mAdzLd6OJiP8RfMCfXbXpK4793+Cw0AK3Mu+mnJ26kz1KEZ9DKiAEhBhK3r +Z3/isUc8LcVYLHIduH9b/K50FjgR0T1Lm4r6Hpf6nTROlfiFSMXJU0HepAzMPHRq +EWqTJ49UgI7Llf+aBP7fGLqRPvWJpAJaQkMiUxfP5JYYCb+45d7I54iXQCD6ToK1 +bDnh+zZIrwyUIxPfFQh1xPYyFWRELJpeOFzm+espqiVFPXpBoimVlytwNrGdbxbY +SO0eEVlE41AjD8cgk+ibAvt/moT2+Mps/t083LR+J92kj+iX/D4NHVy4CjJTrhwO +rI3FrxtdU+NFXULyj0KslOKuyG5WuHLQvfL5P3JGuTkP4iJOTQARAQABiQIfBBgB +AgAJBQJacvQwAhsMAAoJEKeJefZjbXF+8JgQAJqlO1ftIsJvZ/+4ZVVOTPx5ZmYs +ABp4/2gaiLdhajN8ynbZqtCyjtQwSCLJFf2CcDL8XUooJzdQECkqdiI7ouYSFBzO +ui3jjCuFz5oHv88OtX2cIRxHqlZQmXEHvk0gH61xDV5CWBJmjxdRcsC7n1I8DSVg +Qmuq06S+xIX6rHf2CRxYKahBip71u7OIH4BRV44y26xf1a8an+8BkqF9+mYt7zqO +vyMCJ1UftXcuE5SxY54jnNAavF7Kq/2Yp7v3aYqFREngxtbWudyo7QW5EuToSvY2 +qY6tpInahWjuXxeARsFzp4fB0Eo/yH+iqG30zkQCuxLyxzbMMcNQP4if3yV6uO14 +LqapZLrMp6IMTfHDKmbbtDQ2RpRRut3K4khXRQ1LjGKziOU4ZCEazrXEijm2AlKw +7JS3POGvM+VAiaGNBqfdHpTwXXT7zkxJjfJC3Et/6fHy1xuCtUtMs41PjHS/HWi8 +w70T8XpKub+ewVElxq2D83rx07w3HuBtVUyqG0XgcACwqQA1vMLJaR3VoX1024ho +sf2PtZqQ7SCgt0hkZAT72j05nz4bIxUIcDkAGtd9FDPQ4Ixi6fRfTJpZ7lIEV5as +Zs9C0hrxmWgJwSGgQa2Waylvw47fMwfMn+gUNRqwanyOjVYfpSJafLc6Ol43bQN/ +jCKs4enncezhjcAh +=TVZj +-----END PGP PUBLIC KEY BLOCK----- \ No newline at end of file diff --git a/packaging/pkg/specs/rpm/rpmbuild/SOURCES/pg_probackup-forks.repo b/packaging/pkg/specs/rpm/rpmbuild/SOURCES/pg_probackup-forks.repo new file mode 100644 index 000000000..fcef58a9c --- /dev/null +++ b/packaging/pkg/specs/rpm/rpmbuild/SOURCES/pg_probackup-forks.repo @@ -0,0 +1,6 @@ +[pg_probackup-forks] +name=PG_PROBACKUP @SHORT_CODENAME@ packages for PostgresPro Standart and Enterprise - $basearch +baseurl=https://repo.postgrespro.ru/pg_probackup-forks/rpm/latest/@DISTRIB@-$releasever-$basearch +enabled=1 +gpgcheck=1 +gpgkey=file:///etc/pki/rpm-gpg/GPG-KEY-PG_PROBACKUP diff --git a/packaging/pkg/specs/rpm/rpmbuild/SOURCES/pg_probackup.repo b/packaging/pkg/specs/rpm/rpmbuild/SOURCES/pg_probackup.repo new file mode 100644 index 000000000..33dc31a24 --- /dev/null +++ b/packaging/pkg/specs/rpm/rpmbuild/SOURCES/pg_probackup.repo @@ -0,0 +1,13 @@ +[pg_probackup] +name=PG_PROBACKUP Packages for @SHORT_CODENAME@ Linux - $basearch +baseurl=https://repo.postgrespro.ru/pg_probackup/rpm/latest/@DISTRIB@-$releasever-$basearch +enabled=1 +gpgcheck=1 +gpgkey=file:///etc/pki/rpm-gpg/GPG-KEY-PG_PROBACKUP + +[pg_probackup-sources] +name=PG_PROBACKUP Source Packages for @SHORT_CODENAME@ Linux - $basearch +baseurl=https://repo.postgrespro.ru/pg_probackup/srpm/latest/@DISTRIB@-$releasever-$basearch +enabled=1 +gpgcheck=1 +gpgkey=file:///etc/pki/rpm-gpg/GPG-KEY-PG_PROBACKUP diff --git a/packaging/pkg/specs/rpm/rpmbuild/SPECS/pg_probackup-pgpro.spec b/packaging/pkg/specs/rpm/rpmbuild/SPECS/pg_probackup-pgpro.spec new file mode 100644 index 000000000..d5811171d --- /dev/null +++ b/packaging/pkg/specs/rpm/rpmbuild/SPECS/pg_probackup-pgpro.spec @@ -0,0 +1,71 @@ +%global version @PKG_VERSION@ +%global release @PKG_RELEASE@ +%global hash @PKG_HASH@ +%global pgsql_major @PG_VERSION@ +%global pgsql_full @PG_FULL_VERSION@ +%global edition @EDITION@ +%global edition_full @EDITION_FULL@ +%global prefix @PREFIX@ + +Name: pg_probackup-%{edition}-%{pgsql_major} +Version: %{version} +Release: %{release}.%{hash} +Summary: Backup utility for PostgresPro %{edition_full} +Group: Applications/Databases +License: BSD +Url: http://postgrespro.ru/ +#Source0: postgrespro-%{edition}-%{pgsql_full}.tar.bz2 +#Source1: pg_probackup-%{version}.tar.bz2 +Source0: postgrespro-%{edition}-%{pgsql_full} +Source1: pg_probackup-%{version} +BuildRequires: gcc make perl glibc-devel +BuildRequires: openssl-devel gettext zlib-devel + + +%description +Backup tool for PostgresPro %{edition_full}. + +%prep +#%setup -q -b1 -n pg_probackup-%{version}.tar.bz2 +mv %{_topdir}/SOURCES/postgrespro-%{edition}-%{pgsql_full} %{_topdir}/BUILD +cd %{_topdir}/BUILD/postgrespro-%{edition}-%{pgsql_full} +mv %{_topdir}/SOURCES/pg_probackup-%{version} contrib/pg_probackup + +mkdir %{_topdir}/SOURCES/postgrespro-%{edition}-%{pgsql_full} +mkdir %{_topdir}/SOURCES/pg_probackup-%{version} + +%build +#cd %{_topdir}/SOURCES/postgrespro-%{edition}-%{pgsql_full} +#mv %{_topdir}/SOURCES/postgrespro-%{edition}-%{pgsql_full} ./ +#cd postgrespro-%{edition}-%{pgsql_full} +#mv %{_topdir}/SOURCES/pg_probackup-%{version} contrib/pg_probackup +cd %{_topdir}/BUILD/postgrespro-%{edition}-%{pgsql_full} + +%if "%{pgsql_major}" == "9.6" +./configure --enable-debug +%else +./configure --enable-debug --without-readline --prefix=%{prefix} +%endif +make -C 'src/common' +make -C 'src/port' +make -C 'src/interfaces' +cd contrib/pg_probackup && make + +%install +cd %{_topdir}/BUILD/postgrespro-%{edition}-%{pgsql_full} +%{__mkdir} -p %{buildroot}%{_bindir} +%{__install} -p -m 755 contrib/pg_probackup/pg_probackup %{buildroot}%{_bindir}/%{name} + +%files +%{_bindir}/%{name} + +%clean +rm -rf $RPM_BUILD_ROOT + + +%changelog +* Wed Feb 9 2018 Grigory Smolkin - %{version}-%{release}.%{hash} +- @PKG_VERSION@ + +* Fri Jan 29 2018 Grigory Smolkin - 2.0.14-1 +- Initial release. diff --git a/packaging/pkg/specs/rpm/rpmbuild/SPECS/pg_probackup-repo-forks.spec b/packaging/pkg/specs/rpm/rpmbuild/SPECS/pg_probackup-repo-forks.spec new file mode 100644 index 000000000..fd4a99f2c --- /dev/null +++ b/packaging/pkg/specs/rpm/rpmbuild/SPECS/pg_probackup-repo-forks.spec @@ -0,0 +1,49 @@ +%global version @PKG_VERSION@ +%global release @PKG_RELEASE@ + +Summary: pg_probackup repo RPM +Name: pg_probackup-repo-forks +Version: %{version} +Release: %{release} +Group: Applications/Databases +License: BSD +Url: http://postgrespro.ru/ + +Source0: http://repo.postgrespro.ru/pg_probackup-forks/keys/GPG-KEY-PG_PROBACKUP +Source1: pg_probackup-forks.repo + +BuildArch: noarch + +%description +This package contains yum configuration for @SHORT_CODENAME@, and also the GPG key +for pg_probackup RPMs for PostgresPro Standart and Enterprise. + +%prep +%setup -q -c -T +install -pm 644 %{SOURCE0} . +install -pm 644 %{SOURCE1} . + +%build + +%install +rm -rf $RPM_BUILD_ROOT + +#GPG Key +install -Dpm 644 %{SOURCE0} \ + $RPM_BUILD_ROOT%{_sysconfdir}/pki/rpm-gpg/GPG-KEY-PG_PROBACKUP + +# yum +install -dm 755 $RPM_BUILD_ROOT%{_sysconfdir}/yum.repos.d +install -pm 644 %{SOURCE1} $RPM_BUILD_ROOT%{_sysconfdir}/yum.repos.d + +%clean +rm -rf $RPM_BUILD_ROOT + +%files +%defattr(-,root,root,-) +%config(noreplace) /etc/yum.repos.d/* +/etc/pki/rpm-gpg/* + +%changelog +* Fri Oct 26 2019 Grigory Smolkin +- Initial package diff --git a/packaging/pkg/specs/rpm/rpmbuild/SPECS/pg_probackup-repo.spec b/packaging/pkg/specs/rpm/rpmbuild/SPECS/pg_probackup-repo.spec new file mode 100644 index 000000000..da54bc7b1 --- /dev/null +++ b/packaging/pkg/specs/rpm/rpmbuild/SPECS/pg_probackup-repo.spec @@ -0,0 +1,58 @@ +%global version @PKG_VERSION@ +%global release @PKG_RELEASE@ + +Summary: PG_PROBACKUP RPMs +Name: pg_probackup-repo +Version: %{version} +Release: %{release} +Group: Applications/Databases +License: BSD +Url: http://postgrespro.ru/ + +Source0: http://repo.postgrespro.ru/pg_probackup/keys/GPG-KEY-PG_PROBACKUP +Source1: pg_probackup.repo + +BuildArch: noarch + +%description +This package contains yum configuration for Centos, and also the GPG key for PG_PROBACKUP RPMs. + +%prep +%setup -q -c -T +install -pm 644 %{SOURCE0} . +install -pm 644 %{SOURCE1} . + +%build + +%install +rm -rf $RPM_BUILD_ROOT + +#GPG Key +install -Dpm 644 %{SOURCE0} \ + $RPM_BUILD_ROOT%{_sysconfdir}/pki/rpm-gpg/GPG-KEY-PG_PROBACKUP + +# yum /etc/zypp/repos.d/repo-update.repo + +%if 0%{?suse_version} + install -dm 755 $RPM_BUILD_ROOT%{_sysconfdir}/zypp/repos.d + install -pm 644 %{SOURCE1} $RPM_BUILD_ROOT%{_sysconfdir}/zypp/repos.d +%else + install -dm 755 $RPM_BUILD_ROOT%{_sysconfdir}/yum.repos.d + install -pm 644 %{SOURCE1} $RPM_BUILD_ROOT%{_sysconfdir}/yum.repos.d +%endif + +%clean +rm -rf $RPM_BUILD_ROOT + +%files +%defattr(-,root,root,-) +%if 0%{?suse_version} + %config(noreplace) /etc/zypp/repos.d/* +%else + %config(noreplace) /etc/yum.repos.d/* +%endif +/etc/pki/rpm-gpg/* + +%changelog +* Mon Jun 29 2020 Grigory Smolkin +- release update diff --git a/packaging/pkg/specs/rpm/rpmbuild/SPECS/pg_probackup.alt.forks.spec b/packaging/pkg/specs/rpm/rpmbuild/SPECS/pg_probackup.alt.forks.spec new file mode 100644 index 000000000..cbfd61a0f --- /dev/null +++ b/packaging/pkg/specs/rpm/rpmbuild/SPECS/pg_probackup.alt.forks.spec @@ -0,0 +1,67 @@ +%global version @PKG_VERSION@ +%global release @PKG_RELEASE@ +%global hash @PKG_HASH@ +%global pgsql_major @PG_VERSION@ +%global pgsql_full @PG_FULL_VERSION@ +%global edition @EDITION@ +%global edition_full @EDITION_FULL@ +%global prefix @PREFIX@ + +#%set_verify_elf_method unresolved=relaxed, rpath=relaxed +%set_verify_elf_method rpath=relaxed,unresolved=relaxed + +Name: pg_probackup-%{edition}-%{pgsql_major} +Version: %{version} +Release: %{release}.%{hash} +Summary: Backup utility for PostgresPro %{edition_full} +Group: Applications/Databases +License: BSD +Url: http://postgrespro.ru/ +#Source0: postgrespro-%{edition}-%{pgsql_full}.tar.bz2 +#Source1: pg_probackup-%{edition}-%{version}.tar.bz2 +Source0: postgrespro-%{edition}-%{pgsql_full} +Source1: pg_probackup-%{version} +BuildRequires: gcc make perl glibc-devel bison flex +BuildRequires: readline-devel openssl-devel gettext zlib-devel + + +%description +Backup tool for PostgresPro %{edition_full}. + +%prep +#%setup -q -b1 -n postgrespro-%{edition}-%{pgsql_full} +mv %{_topdir}/SOURCES/postgrespro-%{edition}-%{pgsql_full} %{_topdir}/BUILD +cd %{_topdir}/BUILD/postgrespro-%{edition}-%{pgsql_full} +mv %{_topdir}/SOURCES/pg_probackup-%{version} contrib/pg_probackup + +mkdir %{_topdir}/SOURCES/postgrespro-%{edition}-%{pgsql_full} +mkdir %{_topdir}/SOURCES/pg_probackup-%{edition}-%{version} +mkdir %{_topdir}/SOURCES/pg_probackup-%{version} + +%build +cd %{_topdir}/BUILD/postgrespro-%{edition}-%{pgsql_full} +%if "%{pgsql_major}" == "9.6" +./configure --enable-debug +%else +./configure --enable-debug --prefix=%{prefix} +%endif +make -C 'src/common' +make -C 'src/port' +make -C 'src/interfaces' +cd contrib/pg_probackup && make + +%install +cd %{_topdir}/BUILD/postgrespro-%{edition}-%{pgsql_full} +%{__mkdir} -p %{buildroot}%{_bindir} +%{__install} -p -m 755 contrib/pg_probackup/pg_probackup %{buildroot}%{_bindir}/%{name} + +%files +%{_bindir}/%{name} + +%clean +rm -rf $RPM_BUILD_ROOT + + +%changelog +* Mon Nov 17 2019 Grigory Smolkin - 2.2.6-1 +- Initial release. diff --git a/packaging/pkg/specs/rpm/rpmbuild/SPECS/pg_probackup.alt.spec b/packaging/pkg/specs/rpm/rpmbuild/SPECS/pg_probackup.alt.spec new file mode 100644 index 000000000..3105ffa67 --- /dev/null +++ b/packaging/pkg/specs/rpm/rpmbuild/SPECS/pg_probackup.alt.spec @@ -0,0 +1,48 @@ +%global version @PKG_VERSION@ +%global release @PKG_RELEASE@ +%global hash @PKG_HASH@ +%global pgsql_major @PG_VERSION@ +%global pgsql_full @PG_FULL_VERSION@ +%set_verify_elf_method rpath=relaxed + +Name: pg_probackup-%{pgsql_major} +Version: %{version} +Release: %{release}.%{hash} +Summary: Backup utility for PostgreSQL +Group: Applications/Databases +License: BSD +Url: http://postgrespro.ru/ +Source0: http://ftp.postgresql.org/pub/source/v%{pgsql_full}/postgresql-%{pgsql_major}.tar.bz2 +Source1: pg_probackup-%{version}.tar.bz2 +BuildRequires: gcc make perl glibc-devel bison flex +BuildRequires: readline-devel openssl-devel gettext zlib-devel + + +%description +Backup tool for PostgreSQL. + +%prep +%setup -q -b1 -n postgresql-%{pgsql_full} + +%build +mv %{_builddir}/pg_probackup-%{version} contrib/pg_probackup +./configure --enable-debug --without-readline +make -C 'src/common' +make -C 'src/port' +make -C 'src/interfaces' +cd contrib/pg_probackup && make + +%install +%{__mkdir} -p %{buildroot}%{_bindir} +%{__install} -p -m 755 contrib/pg_probackup/pg_probackup %{buildroot}%{_bindir}/%{name} + +%files +%{_bindir}/%{name} + +%clean +rm -rf $RPM_BUILD_ROOT + + +%changelog +* Mon Nov 17 2019 Grigory Smolkin - 2.2.6-1 +- Initial release. diff --git a/packaging/pkg/specs/rpm/rpmbuild/SPECS/pg_probackup.spec b/packaging/pkg/specs/rpm/rpmbuild/SPECS/pg_probackup.spec new file mode 100644 index 000000000..e5fb5ad48 --- /dev/null +++ b/packaging/pkg/specs/rpm/rpmbuild/SPECS/pg_probackup.spec @@ -0,0 +1,48 @@ +%global version @PKG_VERSION@ +%global release @PKG_RELEASE@ +%global hash @PKG_HASH@ +%global pgsql_major @PG_VERSION@ +%global pgsql_full @PG_FULL_VERSION@ + +Name: pg_probackup-%{pgsql_major} +Version: %{version} +Release: %{release}.%{hash} +Summary: Backup utility for PostgreSQL +Group: Applications/Databases +License: BSD +Url: http://postgrespro.ru/ +Source0: http://ftp.postgresql.org/pub/source/v%{pgsql_full}/postgresql-%{pgsql_major}.tar.bz2 +Source1: pg_probackup-%{version}.tar.bz2 +BuildRequires: gcc make perl glibc-devel openssl-devel gettext zlib-devel + +%description +Backup tool for PostgreSQL. + +%prep +%setup -q -b1 -n postgresql-%{pgsql_full} + +%build +mv %{_builddir}/pg_probackup-%{version} contrib/pg_probackup +./configure --enable-debug --without-readline +make -C 'src/common' +make -C 'src/port' +make -C 'src/interfaces' +cd contrib/pg_probackup && make + +%install +%{__mkdir} -p %{buildroot}%{_bindir} +%{__install} -p -m 755 contrib/pg_probackup/pg_probackup %{buildroot}%{_bindir}/%{name} + +%files +%{_bindir}/%{name} + +%clean +rm -rf $RPM_BUILD_ROOT + + +%changelog +* Wed Feb 9 2018 Grigory Smolkin - %{version}-%{release}.%{hash} +- @PKG_VERSION@ + +* Fri Jan 29 2018 Grigory Smolkin - 2.0.14-1 +- Initial release. diff --git a/packaging/pkg/tarballs/.gitkeep b/packaging/pkg/tarballs/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/packaging/repo/scripts/alt.sh b/packaging/repo/scripts/alt.sh new file mode 100755 index 000000000..4cda313ef --- /dev/null +++ b/packaging/repo/scripts/alt.sh @@ -0,0 +1,62 @@ +#!/usr/bin/env bash + +# Copyright Notice: +# © (C) Postgres Professional 2015-2016 http://www.postgrespro.ru/ +# Distributed under Apache License 2.0 +# Распространяется по лицензии Apache 2.0 + +set -exu +set -o errexit +set -o pipefail + +# fix https://github.com/moby/moby/issues/23137 +ulimit -n 1024 + +export INPUT_DIR=/app/in #dir with builded rpm +export OUT_DIR=/app/www/${PBK_PKG_REPO} + +apt-get update -y +apt-get install -qq -y apt-repo-tools gnupg rsync perl less wget + +if [[ ${PBK_EDITION} == '' ]] ; then + REPO_SUFFIX='vanilla' + FORK='PostgreSQL' +else + REPO_SUFFIX='forks' + FORK='PostgresPro' +fi + +cd $INPUT_DIR + +cp -arv /app/repo/$PBK_PKG_REPO/gnupg /root/.gnupg +chmod -R 0600 /root/.gnupg +for pkg in $(ls); do + for pkg_full_version in $(ls ./$pkg); do + + # THere is no std/ent packages for PG 9.5 + if [[ ${pkg} == 'pg_probackup-std-9.5' ]] || [[ ${pkg} == 'pg_probackup-ent-9.5' ]] ; then + continue; + fi + + RPM_DIR=${OUT_DIR}/rpm/${pkg_full_version}/altlinux-p${DISTRIB_VERSION}/x86_64/RPMS.${REPO_SUFFIX} + mkdir -p "$RPM_DIR" + cp -arv $INPUT_DIR/$pkg/$pkg_full_version/RPMS/x86_64/* $RPM_DIR/ + + genbasedir --architecture=x86_64 --architectures=x86_64 --origin=repo.postgrespro.ru \ + --label="${FORK} backup utility pg_probackup" --description "${FORK} pg_probackup repo" \ + --version=$pkg_full_version --bloat --progress --create \ + --topdir=${OUT_DIR}/rpm/${pkg_full_version}/altlinux-p${DISTRIB_VERSION} x86_64 ${REPO_SUFFIX} + + # SRPM is available only for vanilla + if [[ ${PBK_EDITION} == '' ]] ; then + SRPM_DIR=${OUT_DIR}/srpm/${pkg_full_version}/altlinux-p${DISTRIB_VERSION}/x86_64/SRPMS.${REPO_SUFFIX} + mkdir -p "$SRPM_DIR" + cp -arv $INPUT_DIR/$pkg/$pkg_full_version/SRPMS/* $SRPM_DIR/ + + genbasedir --architecture=x86_64 --architectures=x86_64 --origin=repo.postgrespro.ru \ + --label="${FORK} backup utility pg_probackup sources" --description "${FORK} pg_probackup repo" \ + --version=$pkg_full_version --bloat --progress --create \ + --topdir=${OUT_DIR}/srpm/${pkg_full_version}/altlinux-p${DISTRIB_VERSION} x86_64 ${REPO_SUFFIX} + fi + done +done diff --git a/packaging/repo/scripts/deb.sh b/packaging/repo/scripts/deb.sh new file mode 100755 index 000000000..6515e6b42 --- /dev/null +++ b/packaging/repo/scripts/deb.sh @@ -0,0 +1,51 @@ +#!/usr/bin/env bash + +# Copyright Notice: +# © (C) Postgres Professional 2015-2016 http://www.postgrespro.ru/ +# Distributed under Apache License 2.0 +# Распространяется по лицензии Apache 2.0 + +set -exu +set -o errexit +set -o pipefail + +# fix https://github.com/moby/moby/issues/23137 +ulimit -n 1024 + +export INPUT_DIR=/app/in # dir with builded deb +export OUT_DIR=/app/www/${PBK_PKG_REPO} +#export REPO_DIR=/app/repo + +cd $INPUT_DIR + +export DEB_DIR=$OUT_DIR/deb +export KEYS_DIR=$OUT_DIR/keys +export CONF=/app/repo/${PBK_PKG_REPO}/conf +mkdir -p "$KEYS_DIR" +cp -av /app/repo/${PBK_PKG_REPO}/gnupg /root/.gnupg + +rsync /app/repo/${PBK_PKG_REPO}/gnupg/key.public $KEYS_DIR/GPG-KEY-PG_PROBACKUP +echo -e 'User-agent: *\nDisallow: /' > $OUT_DIR/robots.txt + +mkdir -p $DEB_DIR +cd $DEB_DIR +cp -av $CONF ./ + +# make remove-debpkg tool +echo -n "#!" > remove-debpkg +echo "/bin/sh" >> remove-debpkg +echo "CODENAME=\$1" >> remove-debpkg +echo "DEBFILE=\$2" >> remove-debpkg +echo "DEBNAME=\`basename \$DEBFILE | sed -e 's/_.*//g'\`" >> remove-debpkg +echo "reprepro --waitforlock 5 remove \$CODENAME \$DEBNAME" >> remove-debpkg +chmod +x remove-debpkg + +#find $INPUT_DIR/ -name '*.changes' -exec reprepro -P optional -Vb . include ${CODENAME} {} \; +find $INPUT_DIR -name "*${CODENAME}*.deb" -exec ./remove-debpkg $CODENAME {} \; +find $INPUT_DIR -name "*${CODENAME}*.dsc" -exec reprepro --waitforlock 5 -i undefinedtarget --ignore=missingfile -P optional -S main -Vb . includedsc $CODENAME {} \; +find $INPUT_DIR -name "*${CODENAME}*.deb" -exec reprepro --waitforlock 5 -i undefinedtarget --ignore=missingfile -P optional -Vb . includedeb $CODENAME {} \; +reprepro export $CODENAME + +rm -f remove-debpkg +rm -rf ./conf +rm -rf /root/.gnupg diff --git a/packaging/repo/scripts/rpm.sh b/packaging/repo/scripts/rpm.sh new file mode 100755 index 000000000..d4e621c3e --- /dev/null +++ b/packaging/repo/scripts/rpm.sh @@ -0,0 +1,65 @@ +#!/usr/bin/env bash + +# Copyright Notice: +# © (C) Postgres Professional 2015-2016 http://www.postgrespro.ru/ +# Distributed under Apache License 2.0 +# Распространяется по лицензии Apache 2.0 + +set -ex +set -o errexit +set -o pipefail + +# fix https://github.com/moby/moby/issues/23137 +ulimit -n 1024 + +export INPUT_DIR=/app/in #dir with builded rpm +export OUT_DIR=/app/www/${PBK_PKG_REPO} +export KEYS_DIR=$OUT_DIR/keys + +# deploy keys +mkdir -p "$KEYS_DIR" +rsync /app/repo/$PBK_PKG_REPO/gnupg/key.public $KEYS_DIR/GPG-KEY-PG_PROBACKUP +chmod 755 $KEYS_DIR +chmod +x /app/repo/$PBK_PKG_REPO/autosign.sh +echo -e 'User-agent: *\nDisallow: /' > $OUT_DIR/robots.txt + +cd $INPUT_DIR + +cp -arv /app/repo/$PBK_PKG_REPO/rpmmacros /root/.rpmmacros +cp -arv /app/repo/$PBK_PKG_REPO/gnupg /root/.gnupg +chmod -R 0600 /root/.gnupg +chown -R root:root /root/.gnupg + +for pkg in $(ls ${INPUT_DIR}); do + for pkg_full_version in $(ls ${INPUT_DIR}/$pkg); do + + # THere is no std/ent packages for PG 9.5 + if [[ ${pkg} == 'pg_probackup-std-9.5' ]] || [[ ${pkg} == 'pg_probackup-ent-9.5' ]] ; then + continue; + fi + + if [[ ${PBK_EDITION} == '' ]] ; then + cp $INPUT_DIR/$pkg/$pkg_full_version/RPMS/noarch/pg_probackup-repo-*.noarch.rpm \ + $KEYS_DIR/pg_probackup-repo-$DISTRIB.noarch.rpm + else + cp $INPUT_DIR/$pkg/$pkg_full_version/RPMS/noarch/pg_probackup-repo-*.noarch.rpm \ + $KEYS_DIR/pg_probackup-repo-forks-$DISTRIB.noarch.rpm + fi + + [ ! -z "$CODENAME" ] && export DISTRIB_VERSION=$CODENAME + RPM_DIR=$OUT_DIR/rpm/$pkg_full_version/${DISTRIB}-${DISTRIB_VERSION}-x86_64 + mkdir -p "$RPM_DIR" + cp -arv $INPUT_DIR/$pkg/$pkg_full_version/RPMS/x86_64/* $RPM_DIR/ + for f in $(ls $RPM_DIR/*.rpm); do rpm --addsign $f || exit 1; done + createrepo $RPM_DIR/ + + if [[ ${PBK_EDITION} == '' ]] ; then + SRPM_DIR=$OUT_DIR/srpm/$pkg_full_version/${DISTRIB}-${DISTRIB_VERSION}-x86_64 + mkdir -p "$SRPM_DIR" + cp -arv $INPUT_DIR/$pkg/$pkg_full_version/SRPMS/* $SRPM_DIR/ + for f in $(ls $SRPM_DIR/*.rpm); do rpm --addsign $f || exit 1; done + createrepo $SRPM_DIR/ + fi + + done +done diff --git a/packaging/repo/scripts/suse.sh b/packaging/repo/scripts/suse.sh new file mode 100755 index 000000000..7253df700 --- /dev/null +++ b/packaging/repo/scripts/suse.sh @@ -0,0 +1,72 @@ +#!/usr/bin/env bash + +# Copyright Notice: +# © (C) Postgres Professional 2015-2016 http://www.postgrespro.ru/ +# Distributed under Apache License 2.0 +# Распространяется по лицензии Apache 2.0 + +set -ex +set -o errexit +set -o pipefail + +# fix https://github.com/moby/moby/issues/23137 +ulimit -n 1024 + +# currenctly we do not build std|ent packages for Suse +if [[ ${PBK_EDITION} != '' ]] ; then + exit 0 +fi + +export INPUT_DIR=/app/in #dir with builded rpm +export OUT_DIR=/app/www/${PBK_PKG_REPO} +export KEYS_DIR=$OUT_DIR/keys +# deploy keys + +zypper install -y createrepo +rm -rf /root/.gnupg + +cd $INPUT_DIR + +mkdir -p $KEYS_DIR +chmod 755 $KEYS_DIR +rsync /app/repo/$PBK_PKG_REPO/gnupg/key.public $KEYS_DIR/GPG-KEY-PG_PROBACKUP + +echo -e 'User-agent: *\nDisallow: /' > $OUT_DIR/robots.txt + +cp -arv /app/repo/$PBK_PKG_REPO/rpmmacros /root/.rpmmacros +cp -arv /app/repo/$PBK_PKG_REPO/gnupg /root/.gnupg +chmod -R 0600 /root/.gnupg + +for pkg in $(ls); do + for pkg_full_version in $(ls ./$pkg); do + + cp $INPUT_DIR/$pkg/$pkg_full_version/RPMS/noarch/pg_probackup-repo-*.noarch.rpm \ + $KEYS_DIR/pg_probackup-repo-$DISTRIB.noarch.rpm + [ ! -z "$CODENAME" ] && export DISTRIB_VERSION=$CODENAME + RPM_DIR=$OUT_DIR/rpm/$pkg_full_version/${DISTRIB}-${DISTRIB_VERSION}-x86_64 + SRPM_DIR=$OUT_DIR/srpm/$pkg_full_version/${DISTRIB}-${DISTRIB_VERSION}-x86_64 + + # rm -rf "$RPM_DIR" && mkdir -p "$RPM_DIR" + # rm -rf "$SRPM_DIR" && mkdir -p "$SRPM_DIR" + mkdir -p "$RPM_DIR" + mkdir -p "$SRPM_DIR" + + cp -arv $INPUT_DIR/$pkg/$pkg_full_version/RPMS/x86_64/* $RPM_DIR/ + cp -arv $INPUT_DIR/$pkg/$pkg_full_version/SRPMS/* $SRPM_DIR/ + + for f in $(ls $RPM_DIR/*.rpm); do rpm --addsign $f || exit 1; done + for f in $(ls $SRPM_DIR/*.rpm); do rpm --addsign $f || exit 1; done + + createrepo $RPM_DIR/ + createrepo $SRPM_DIR/ + + # rpm --addsign $RPM_DIR/repodata/repomd.xml + # rpm --addsign $SRPM_DIR/repodata/repomd.xml + + gpg --batch --yes -a --detach-sign $RPM_DIR/repodata/repomd.xml + gpg --batch --yes -a --detach-sign $SRPM_DIR/repodata/repomd.xml + + cp -a /root/.gnupg/key.public $RPM_DIR/repodata/repomd.xml.key + cp -a /root/.gnupg/key.public $SRPM_DIR/repodata/repomd.xml.key + done +done diff --git a/packaging/test/Makefile.alt b/packaging/test/Makefile.alt new file mode 100644 index 000000000..3c1899cb9 --- /dev/null +++ b/packaging/test/Makefile.alt @@ -0,0 +1,20 @@ +# ALT 9 +build/test_alt_9_9.6: + $(call test_alt,alt,9,,9.6,9.6.21) + touch build/test_alt_9_9.6 + +build/test_alt_9_10: + $(call test_alt,alt,9,,10,10.17) + touch build/test_alt_9_10 + +build/test_alt_9_11: + $(call test_alt,alt,9,,11,11.11) + touch build/test_alt_9_11 + +build/test_alt_9_12: + $(call test_alt,alt,9,,12,12.6) + touch build/test_alt_9_12 + +build/test_alt_9_13: + $(call test_alt,alt,9,,13,13.2) + touch build/test_alt_9_13 diff --git a/packaging/test/Makefile.centos b/packaging/test/Makefile.centos new file mode 100644 index 000000000..e3787c612 --- /dev/null +++ b/packaging/test/Makefile.centos @@ -0,0 +1,41 @@ +# CENTOS 7 +build/test_centos_7_9.6: + $(call test_rpm,centos,7,,9.6,9.6.21) + touch build/test_centos_7_9.6 + +build/test_centos_7_10: + $(call test_rpm,centos,7,,10,10.16) + touch build/test_centos_7_10 + +build/test_centos_7_11: + $(call test_rpm,centos,7,,11,11.11) + touch build/test_centos_7_11 + +build/test_centos_7_12: + $(call test_rpm,centos,7,,12,12.6) + touch build/test_centos_7_12 + +build/test_centos_7_13: + $(call test_rpm,centos,7,,13,13.2) + touch build/test_centos_7_13 + +# CENTOS 8 +build/test_centos_8_9.6: + $(call test_rpm,centos,8,,9.6,9.6.21) + touch build/test_centos_8_9.6 + +build/test_centos_8_10: + $(call test_rpm,centos,8,,10,10.16) + touch build/test_centos_8_10 + +build/test_centos_8_11: + $(call test_rpm,centos,8,,11,11.11) + touch build/test_centos_8_11 + +build/test_centos_8_12: + $(call test_rpm,centos,8,,12,12.6) + touch build/test_centos_8_12 + +build/test_centos_8_13: + $(call test_rpm,centos,8,,13,13.2) + touch build/test_centos_8_13 diff --git a/packaging/test/Makefile.debian b/packaging/test/Makefile.debian new file mode 100644 index 000000000..f540f9205 --- /dev/null +++ b/packaging/test/Makefile.debian @@ -0,0 +1,41 @@ +# DEBIAN 9 +build/test_debian_9_9.6: + $(call test_deb,debian,9,stretch,9.6,9.6.21) + touch build/test_debian_9_9.6 + +build/test_debian_9_10: + $(call test_deb,debian,9,stretch,10,10.16) + touch build/test_debian_9_10 + +build/test_debian_9_11: + $(call test_deb,debian,9,stretch,11,11.11) + touch build/test_debian_9_11 + +build/test_debian_9_12: + $(call test_deb,debian,9,stretch,12,12.6) + touch build/test_debian_9_12 + +build/test_debian_9_13: + $(call test_deb,debian,9,stretch,13,13.2) + touch build/test_debian_9_13 + +# DEBIAN 10 +build/test_debian_10_9.6: + $(call test_deb,debian,10,buster,9.6,9.6.21) + touch build/test_debian_10_9.6 + +build/test_debian_10_10: + $(call test_deb,debian,10,buster,10,10.16) + touch build/test_debian_10_10 + +build/test_debian_10_11: + $(call test_deb,debian,10,buster,11,11.11) + touch build/test_debian_10_11 + +build/test_debian_10_12: + $(call test_deb,debian,10,buster,12,12.6) + touch build/test_debian_10_12 + +build/test_debian_10_13: + $(call test_deb,debian,10,buster,13,13.2) + touch build/test_debian_10_13 diff --git a/packaging/test/Makefile.oraclelinux b/packaging/test/Makefile.oraclelinux new file mode 100644 index 000000000..fdf44de8b --- /dev/null +++ b/packaging/test/Makefile.oraclelinux @@ -0,0 +1,41 @@ +# ORACLE LINUX 7 +build/test_oraclelinux_7_9.6: + $(call test_rpm,oraclelinux,7,,9.6,9.6.21) + touch build/test_oraclelinux_7_9.6 + +build/test_oraclelinux_7_10: + $(call test_rpm,oraclelinux,7,,10,10.16) + touch build/test_oraclelinux_7_10 + +build/test_oraclelinux_7_11: + $(call test_rpm,oraclelinux,7,,11,11.11) + touch build/test_oraclelinux_7_11 + +build/test_oraclelinux_7_12: + $(call test_rpm,oraclelinux,7,,12,12.6) + touch build/test_oraclelinux_7_12 + +build/test_oraclelinux_7_13: + $(call test_rpm,oraclelinux,7,,13,13.2) + touch build/test_oraclelinux_7_13 + +# ORACLE LINUX 8 +build/test_oraclelinux_8_9.6: + $(call test_rpm,oraclelinux,8,,9.6,9.6.21) + touch build/test_oraclelinux_8_9.6 + +build/test_oraclelinux_8_10: + $(call test_rpm,oraclelinux,8,,10,10.16) + touch build/test_oraclelinux_8_10 + +build/test_oraclelinux_8_11: + $(call test_rpm,oraclelinux,8,,11,11.11) + touch build/test_oraclelinux_8_11 + +build/test_oraclelinux_8_12: + $(call test_rpm,oraclelinux,8,,12,12.6) + touch build/test_oraclelinux_8_12 + +build/test_oraclelinux_8_13: + $(call test_rpm,oraclelinux,8,,13,13.2) + touch build/test_oraclelinux_8_13 diff --git a/packaging/test/Makefile.rhel b/packaging/test/Makefile.rhel new file mode 100644 index 000000000..3169d11c9 --- /dev/null +++ b/packaging/test/Makefile.rhel @@ -0,0 +1,41 @@ +# RHEL 7 +build/test_rhel_7_9.6: + $(call test_rpm,rhel,7,7Server,9.6,9.6.21) + touch build/test_rhel_7_9.6 + +build/test_rhel_7_10: + $(call test_rpm,rhel,7,7Server,10,10.16) + touch build/test_rhel_7_10 + +build/test_rhel_7_11: + $(call test_rpm,rhel,7,7Server,11,11.11) + touch build/test_rhel_7_11 + +build/test_rhel_7_12: + $(call test_rpm,rhel,7,7Server,12,12.6) + touch build/test_rhel_7_12 + +build/test_rhel_7_13: + $(call test_rpm,rhel,7,7Server,13,13.2) + touch build/test_rhel_7_13 + +# RHEL 8 +build/test_rhel_8_9.6: + $(call test_rpm,rhel,8,8Server,9.6,9.6.21) + touch build/test_rhel_8_9.6 + +build/test_rhel_8_10: + $(call test_rpm,rhel,8,8Server,10,10.16) + touch build/test_rhel_8_10 + +build/test_rhel_8_11: + $(call test_rpm,rhel,8,8Server,11,11.11) + touch build/test_rhel_8_11 + +build/test_rhel_8_12: + $(call test_rpm,rhel,8,8Server,12,12.6) + touch build/test_rhel_8_12 + +build/test_rhel_8_13: + $(call test_rpm,rhel,8,8Server,13,13.2) + touch build/test_rhel_8_13 diff --git a/packaging/test/Makefile.suse b/packaging/test/Makefile.suse new file mode 100644 index 000000000..9257bdbfd --- /dev/null +++ b/packaging/test/Makefile.suse @@ -0,0 +1,41 @@ +# Suse 15.1 +build/test_suse_15.1_9.6: + $(call test_suse,suse,15.1,,9.6,9.6.21) + touch build/test_suse_15.1_9.6 + +build/test_suse_15.1_10: + $(call test_suse,suse,15.1,,10,10.16) + touch build/test_suse_15.1_10 + +build/test_suse_15.1_11: + $(call test_suse,suse,15.1,,11,11.11) + touch build/test_suse_15.1_11 + +build/test_suse_15.1_12: + $(call test_suse,suse,15.1,,12,12.6) + touch build/test_suse_15.1_12 + +build/test_suse_15.1_13: + $(call test_suse,suse,15.1,,13,13.2) + touch build/test_suse_15.1_13 + +# Suse 15.2 +build/test_suse_15.2_9.6: + $(call test_suse,suse,15.2,,9.6,9.6.21) + touch build/test_suse_15.2_9.6 + +build/test_suse_15.2_10: + $(call test_suse,suse,15.2,,10,10.16) + touch build/test_suse_15.2_10 + +build/test_suse_15.2_11: + $(call test_suse,suse,15.2,,11,11.11) + touch build/test_suse_15.2_11 + +build/test_suse_15.2_12: + $(call test_suse,suse,15.2,,12,12.6) + touch build/test_suse_15.2_12 + +build/test_suse_15.2_13: + $(call test_suse,suse,15.2,,13,13.2) + touch build/test_suse_15.2_13 diff --git a/packaging/test/Makefile.ubuntu b/packaging/test/Makefile.ubuntu new file mode 100644 index 000000000..9e201a30b --- /dev/null +++ b/packaging/test/Makefile.ubuntu @@ -0,0 +1,62 @@ +# UBUNTU 16.04 +build/test_ubuntu_16.04_9.6: + $(call test_deb,ubuntu,16.04,xenial,9.6,9.6.21) + touch build/test_ubuntu_16.04_9.6 + +build/test_ubuntu_16.04_10: + $(call test_deb,ubuntu,16.04,xenial,10,10.16) + touch build/test_ubuntu_16.04_10 + +build/test_ubuntu_16.04_11: + $(call test_deb,ubuntu,16.04,xenial,11,11.11) + touch build/test_ubuntu_16.04_11 + +build/test_ubuntu_16.04_12: + $(call test_deb,ubuntu,16.04,xenial,12,12.6) + touch build/test_ubuntu_16.04_12 + +build/test_ubuntu_16.04_13: + $(call test_deb,ubuntu,16.04,xenial,13,13.2) + touch build/test_ubuntu_16.04_13 + +# UBUNTU 18.04 +build/test_ubuntu_18.04_9.6: + $(call test_deb,ubuntu,18.04,bionic,9.6,9.6.21) + touch build/test_ubuntu_18.04_9.6 + +build/test_ubuntu_18.04_10: + $(call test_deb,ubuntu,18.04,bionic,10,10.16) + touch build/test_ubuntu_18.04_10 + +build/test_ubuntu_18.04_11: + $(call test_deb,ubuntu,18.04,bionic,11,11.11) + touch build/test_ubuntu_18.04_11 + +build/test_ubuntu_18.04_12: + $(call test_deb,ubuntu,18.04,bionic,12,12.6) + touch build/test_ubuntu_18.04_12 + +build/test_ubuntu_18.04_13: + $(call test_deb,ubuntu,18.04,bionic,13,13.2) + touch build/test_ubuntu_18.04_13 + +# UBUNTU 20.04 +build/test_ubuntu_20.04_9.6: + $(call test_deb,ubuntu,20.04,focal,9.6,9.6.21) + touch build/test_ubuntu_20.04_9.6 + +build/test_ubuntu_20.04_10: + $(call test_deb,ubuntu,20.04,focal,10,10.16) + touch build/test_ubuntu_20.04_10 + +build/test_ubuntu_20.04_11: + $(call test_deb,ubuntu,20.04,focal,11,11.11) + touch build/test_ubuntu_20.04_11 + +build/test_ubuntu_20.04_12: + $(call test_deb,ubuntu,20.04,focal,12,12.6) + touch build/test_ubuntu_20.04_12 + +build/test_ubuntu_20.04_13: + $(call test_deb,ubuntu,20.04,focal,13,13.2) + touch build/test_ubuntu_20.04_13 diff --git a/packaging/test/scripts/alt.sh b/packaging/test/scripts/alt.sh new file mode 100755 index 000000000..262864474 --- /dev/null +++ b/packaging/test/scripts/alt.sh @@ -0,0 +1,72 @@ +#!/usr/bin/env bash + +set -xe +set -o pipefail + +ulimit -n 1024 + +apt-get clean -y +apt-get update -y +apt-get install nginx su -y + +adduser nginx + +cat < /etc/nginx/nginx.conf +user nginx; +worker_processes 1; +error_log /var/log/nginx/error.log; +events { + worker_connections 1024; +} +http { + server { + listen 80 default; + root /app/www; + } +} +EOF + +/etc/init.d/nginx start + +# install POSTGRESQL + +export PGDATA=/var/lib/pgsql/${PG_VERSION}/data + +# install old packages +echo "rpm http://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-p7 x86_64 vanilla" > /etc/apt/sources.list.d/pg_probackup.list +apt-get update +apt-get install ${PKG_NAME} -y +${PKG_NAME} --help +${PKG_NAME} --version + +# install new packages +echo "127.0.0.1 repo.postgrespro.ru" >> /etc/hosts +echo "rpm http://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-p${DISTRIB_VERSION} x86_64 vanilla" > /etc/apt/sources.list.d/pg_probackup.list +echo "rpm [p${DISTRIB_VERSION}] http://mirror.yandex.ru/altlinux p${DISTRIB_VERSION}/branch/x86_64 debuginfo" > /etc/apt/sources.list.d/debug.list + +apt-get update -y +apt-get install ${PKG_NAME} -y +${PKG_NAME} --help +${PKG_NAME} --version + +exit 0 + +# TODO: run init, add-instance, backup and restore +su postgres -c "/usr/pgsql-${PG_VERSION}/bin/pgbench --no-vacuum -t 1000 -c 1" +su postgres -c "${PKG_NAME} backup --instance=node -b page -B /tmp/backup -D ${PGDATA} --no-sync --compress" +su postgres -c "${PKG_NAME} show --instance=node -B /tmp/backup -D ${PGDATA}" + +su postgres -c "/usr/pgsql-${PG_VERSION}/bin/pg_ctl stop -D ${PGDATA}" +rm -rf ${PGDATA} + +su postgres -c "${PKG_NAME} restore --instance=node -B /tmp/backup -D ${PGDATA} --no-sync" +su postgres -c "/usr/pgsql-${PG_VERSION}/bin/pg_ctl start -w -D ${PGDATA}" + +sleep 5 + +echo "select count(*) from pgbench_accounts;" | su postgres -c "/usr/pgsql-${PG_VERSION}/bin/psql" || exit 1 + +exit 0 # while PG12 is not working + +# SRC PACKAGE +cd /mnt diff --git a/packaging/test/scripts/alt_forks.sh b/packaging/test/scripts/alt_forks.sh new file mode 100755 index 000000000..c406e5358 --- /dev/null +++ b/packaging/test/scripts/alt_forks.sh @@ -0,0 +1,75 @@ +#!/usr/bin/env bash + +# Copyright Notice: +# © (C) Postgres Professional 2015-2016 http://www.postgrespro.ru/ +# Distributed under Apache License 2.0 +# Распространяется по лицензии Apache 2.0 + +set -xe +set -o pipefail + +ulimit -n 1024 + +if [ ${PBK_EDITION} == 'ent' ]; then + exit 0 +fi + +apt-get clean -y +apt-get update -y +apt-get install nginx su -y + +adduser nginx + +cat < /etc/nginx/nginx.conf +user nginx; +worker_processes 1; +error_log /var/log/nginx/error.log; +events { + worker_connections 1024; +} +http { + server { + listen 80 default; + root /app/www; + } +} +EOF + +/etc/init.d/nginx start + +# install POSTGRESQL + +export PGDATA=/var/lib/pgsql/${PG_VERSION}/data + +# install old packages + +# install new packages +echo "127.0.0.1 repo.postgrespro.ru" >> /etc/hosts +echo "rpm http://repo.postgrespro.ru/pg_probackup-forks/rpm/latest/altlinux-p${DISTRIB_VERSION} x86_64 forks" > /etc/apt/sources.list.d/pg_probackup.list +echo "rpm [p${DISTRIB_VERSION}] http://mirror.yandex.ru/altlinux p${DISTRIB_VERSION}/branch/x86_64 debuginfo" > /etc/apt/sources.list.d/debug.list + +apt-get update -y +apt-get install ${PKG_NAME} ${PKG_NAME}-debuginfo -y +${PKG_NAME} --help +${PKG_NAME} --version + +exit 0 + +su postgres -c "/usr/pgsql-${PG_VERSION}/bin/pgbench --no-vacuum -t 1000 -c 1" +su postgres -c "${PKG_NAME} backup --instance=node -b page -B /tmp/backup -D ${PGDATA}" +su postgres -c "${PKG_NAME} show --instance=node -B /tmp/backup -D ${PGDATA}" + +su postgres -c "/usr/pgsql-${PG_VERSION}/bin/pg_ctl stop -D ${PGDATA}" +rm -rf ${PGDATA} + +su postgres -c "${PKG_NAME} restore --instance=node -B /tmp/backup -D ${PGDATA}" +su postgres -c "/usr/pgsql-${PG_VERSION}/bin/pg_ctl start -w -D ${PGDATA}" + +sleep 5 + +echo "select count(*) from pgbench_accounts;" | su postgres -c "/usr/pgsql-${PG_VERSION}/bin/psql" || exit 1 + +exit 0 # while PG12 is not working + +# SRC PACKAGE +cd /mnt diff --git a/packaging/test/scripts/deb.sh b/packaging/test/scripts/deb.sh new file mode 100755 index 000000000..76e3bb043 --- /dev/null +++ b/packaging/test/scripts/deb.sh @@ -0,0 +1,136 @@ +#!/usr/bin/env bash + +# Copyright Notice: +# © (C) Postgres Professional 2015-2021 http://www.postgrespro.ru/ +# Distributed under Apache License 2.0 +# Распространяется по лицензии Apache 2.0 + +set -xe +set -o pipefail + +ulimit -n 1024 + +PG_TOG=$(echo $PG_VERSION | sed 's|\.||g') + +# upgrade and utils +# export parameters +export DEBIAN_FRONTEND=noninteractive +echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections + +apt-get -qq update +apt-get -qq install -y wget nginx gnupg lsb-release +#apt-get -qq install -y libterm-readline-gnu-perl dialog gnupg procps + +# echo -e 'Package: *\nPin: origin test.postgrespro.ru\nPin-Priority: 800' >\ +# /etc/apt/preferences.d/pgpro-800 + +# install nginx +echo "127.0.0.1 test.postgrespro.ru" >> /etc/hosts +cat < /etc/nginx/nginx.conf +user www-data; +worker_processes 1; +error_log /var/log/nginx/error.log; +events { + worker_connections 1024; +} +http { + server { + listen 80 default; + root /app/www; + } +} +EOF +nginx -s reload || (pkill -9 nginx || nginx -c /etc/nginx/nginx.conf &) + +# install POSTGRESQL +#if [ ${CODENAME} == 'precise' ] && [ ${PG_VERSION} != '10' ] && [ ${PG_VERSION} != '11' ]; then + sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt/ $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list' + wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - + apt-get update -y + apt-get install -y postgresql-${PG_VERSION} +#fi + +# install pg_probackup from current public repo +echo "deb [arch=amd64] http://repo.postgrespro.ru/pg_probackup/deb/ $(lsb_release -cs) main-$(lsb_release -cs)" >\ + /etc/apt/sources.list.d/pg_probackup-old.list +wget -O - http://repo.postgrespro.ru/pg_probackup/keys/GPG-KEY-PG_PROBACKUP | apt-key add - && apt-get update + +apt-get install -y pg-probackup-${PG_VERSION} +pg_probackup-${PG_VERSION} --help +pg_probackup-${PG_VERSION} --version + +# Artful do no have PostgreSQL packages at all, Precise do not have PostgreSQL 10 +#if [ ${CODENAME} == 'precise' ] && [ ${PG_VERSION} != '10' ] && [ ${PG_VERSION} != '11' ]; then + export PGDATA=/var/lib/postgresql/${PG_VERSION}/data + su postgres -c "/usr/lib/postgresql/${PG_VERSION}/bin/initdb -k -D ${PGDATA}" + su postgres -c "pg_probackup-${PG_VERSION} init -B /tmp/backup" + su postgres -c "pg_probackup-${PG_VERSION} add-instance --instance=node -B /tmp/backup -D ${PGDATA}" + + echo "wal_level=hot_standby" >> ${PGDATA}/postgresql.auto.conf + echo "fsync=off" >> ${PGDATA}/postgresql.auto.conf + echo "archive_mode=on" >> ${PGDATA}/postgresql.auto.conf + echo "archive_command='pg_probackup-${PG_VERSION} archive-push --no-sync -B /tmp/backup compress --instance=node --wal-file-path %p --wal-file-name %f'" >> ${PGDATA}/postgresql.auto.conf + + su postgres -c "/usr/lib/postgresql/${PG_VERSION}/bin/pg_ctl start -D ${PGDATA}" + sleep 5 + su postgres -c "pg_probackup-${PG_VERSION} backup --instance=node -b full -B /tmp/backup -D ${PGDATA} --no-sync" + su postgres -c "pg_probackup-${PG_VERSION} show --instance=node -B /tmp/backup -D ${PGDATA}" + su postgres -c "pg_probackup-${PG_VERSION} show --instance=node -B /tmp/backup --archive -D ${PGDATA}" + + su postgres -c "/usr/lib/postgresql/${PG_VERSION}/bin/pgbench --no-vacuum -i -s 5" + su postgres -c "pg_probackup-${PG_VERSION} backup --instance=node -b page -B /tmp/backup -D ${PGDATA} --no-sync" +#fi + +# install new packages +echo "deb [arch=amd64] http://test.postgrespro.ru/pg_probackup/deb/ $(lsb_release -cs) main-$(lsb_release -cs)" >\ + /etc/apt/sources.list.d/pg_probackup-new.list +wget -O - http://test.postgrespro.ru/pg_probackup/keys/GPG-KEY-PG_PROBACKUP | apt-key add - +apt-get update +apt-get install -y pg-probackup-${PG_VERSION} +pg_probackup-${PG_VERSION} --help +pg_probackup-${PG_VERSION} --version + +#if [ ${CODENAME} == 'precise' ] && [ ${PG_VERSION} != '10' ] && [ ${PG_VERSION} != '11' ]; then +# echo "wal_level=hot_standby" >> ${PGDATA}/postgresql.auto.conf +# echo "archive_mode=on" >> ${PGDATA}/postgresql.auto.conf +# echo "archive_command='${PKG_NAME} archive-push -B /tmp/backup --compress --instance=node --wal-file-path %p --wal-file-name %f'" >> ${PGDATA}/postgresql.auto.conf +# su postgres -c "/usr/lib/postgresql/${PG_VERSION}/bin/pg_ctl restart -D ${PGDATA}" +# sleep 5 +# su postgres -c "${PKG_NAME} init -B /tmp/backup" +# su postgres -c "${PKG_NAME} add-instance --instance=node -B /tmp/backup -D ${PGDATA}" + +# su postgres -c "pg_probackup-${PG_VERSION} init -B /tmp/backup" +# su postgres -c "pg_probackup-${PG_VERSION} add-instance --instance=node -B /tmp/backup -D ${PGDATA}" + su postgres -c "pg_probackup-${PG_VERSION} backup --instance=node --compress -b delta -B /tmp/backup -D ${PGDATA} --no-sync" + su postgres -c "/usr/lib/postgresql/${PG_VERSION}/bin/pgbench --no-vacuum -t 1000 -c 1" + su postgres -c "pg_probackup-${PG_VERSION} backup --instance=node -b page -B /tmp/backup -D ${PGDATA} --no-sync" + su postgres -c "pg_probackup-${PG_VERSION} show --instance=node -B /tmp/backup -D ${PGDATA}" + + su postgres -c "/usr/lib/postgresql/${PG_VERSION}/bin/pg_ctl stop -D ${PGDATA}" + rm -rf ${PGDATA} + + su postgres -c "pg_probackup-${PG_VERSION} restore --instance=node -B /tmp/backup -D ${PGDATA} --no-sync" + su postgres -c "/usr/lib/postgresql/${PG_VERSION}/bin/pg_ctl start -w -t 60 -D ${PGDATA}" + +sleep 5 +echo "select count(*) from pgbench_accounts;" | su postgres -c "/usr/lib/postgresql/${PG_VERSION}/bin/psql" || exit 1 +#fi + +# CHECK SRC package +apt-get install -y dpkg-dev +echo "deb-src [arch=amd64] http://test.postgrespro.ru/pg_probackup/deb/ $(lsb_release -cs) main-$(lsb_release -cs)" >>\ + /etc/apt/sources.list.d/pg_probackup.list + +wget -O - http://test.postgrespro.ru/pg_probackup/keys/GPG-KEY-PG_PROBACKUP | apt-key add - && apt-get update + +apt-get update -y + +cd /mnt +apt-get source pg-probackup-${PG_VERSION} +exit 0 + +cd pg-probackup-${PG_VERSION}-${PKG_VERSION} +#mk-build-deps --install --remove --tool 'apt-get --no-install-recommends --yes' debian/control +#rm -rf ./*.deb +apt-get install -y debhelper bison flex gettext zlib1g-dev +dpkg-buildpackage -us -uc diff --git a/packaging/test/scripts/deb_forks.sh b/packaging/test/scripts/deb_forks.sh new file mode 100755 index 000000000..5175f38db --- /dev/null +++ b/packaging/test/scripts/deb_forks.sh @@ -0,0 +1,149 @@ +#!/usr/bin/env bash + +# Copyright Notice: +# © (C) Postgres Professional 2015-2016 http://www.postgrespro.ru/ +# Distributed under Apache License 2.0 +# Распространяется по лицензии Apache 2.0 + +set -xe +set -o pipefail + +# fix https://github.com/moby/moby/issues/23137 +ulimit -n 1024 + +# TODO: remove after release +exit 0 + +if [ ${PBK_EDITION} == 'ent' ]; then + exit 0 +fi + +if [ ${PBK_EDITION} == 'std' ] && [ ${PG_VERSION} == '9.6' ]; then + exit 0 +fi + +# upgrade and utils +# export parameters +export DEBIAN_FRONTEND=noninteractive +echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections + +#if [ ${CODENAME} == 'jessie' ]; then +#printf "deb http://archive.debian.org/debian/ jessie main\ndeb-src http://archive.debian.org/debian/ jessie main\ndeb http://security.debian.org jessie/updates main\ndeb-src http://security.debian.org jessie/updates main" > /etc/apt/sources.list +#fi + +apt-get -qq update +apt-get -qq install -y wget nginx gnupg lsb-release apt-transport-https +#apt-get -qq install -y libterm-readline-gnu-perl dialog gnupg procps + +# echo -e 'Package: *\nPin: origin test.postgrespro.ru\nPin-Priority: 800' >\ +# /etc/apt/preferences.d/pgpro-800 + +# install nginx +echo "127.0.0.1 test.postgrespro.ru" >> /etc/hosts +cat < /etc/nginx/nginx.conf +user www-data; +worker_processes 1; +error_log /var/log/nginx/error.log; +events { + worker_connections 1024; +} +http { + server { + listen 80 default; + root /app/www; + } +} +EOF +nginx -s reload || (pkill -9 nginx || nginx -c /etc/nginx/nginx.conf &) + +# install POSTGRESPRO +if [ ${PBK_EDITION} == 'std' ]; then + sh -c 'echo "deb https://repo.postgrespro.ru/pgpro-${PG_VERSION}/${DISTRIB}/ $(lsb_release -cs) main" > /etc/apt/sources.list.d/pgpro.list' + wget --quiet -O - https://repo.postgrespro.ru/pgpro-${PG_VERSION}/keys/GPG-KEY-POSTGRESPRO | apt-key add - + apt-get update -y + + apt-get install -y postgrespro-std-${PG_VERSION} + BINDIR="/opt/pgpro/std-${PG_VERSION}/bin" + export LD_LIBRARY_PATH=/opt/pgpro/std-${PG_VERSION}/lib/ +fi + +# install pg_probackup from current public repo +echo "deb [arch=amd64] http://repo.postgrespro.ru/pg_probackup-forks/deb/ $(lsb_release -cs) main-$(lsb_release -cs)" >\ + /etc/apt/sources.list.d/pg_probackup-old.list +wget -O - http://repo.postgrespro.ru/pg_probackup-forks/keys/GPG-KEY-PG_PROBACKUP | apt-key add - && apt-get update + +apt-get install -y pg-probackup-${PBK_EDITION}-${PG_VERSION} +pg_probackup-${PBK_EDITION}-${PG_VERSION} --help +pg_probackup-${PBK_EDITION}-${PG_VERSION} --version + + +if [ ${PBK_EDITION} == 'std' ]; then + export PGDATA=/tmp/data + su postgres -c "${BINDIR}/initdb -k -D ${PGDATA}" + su postgres -c "pg_probackup-${PBK_EDITION}-${PG_VERSION} init -B /tmp/backup" + su postgres -c "pg_probackup-${PBK_EDITION}-${PG_VERSION} add-instance --instance=node -B /tmp/backup -D ${PGDATA}" + + echo "wal_level=hot_standby" >> ${PGDATA}/postgresql.auto.conf + echo "fsync=off" >> ${PGDATA}/postgresql.auto.conf + echo "archive_mode=on" >> ${PGDATA}/postgresql.auto.conf + echo "archive_command='pg_probackup-${PBK_EDITION}-${PG_VERSION} archive-push --no-sync -B /tmp/backup compress --instance=node --wal-file-path %p --wal-file-name %f'" >> ${PGDATA}/postgresql.auto.conf + + su postgres -c "${BINDIR}/pg_ctl stop -w -t 60 -D /var/lib/pgpro/std-${PG_VERSION}/data" || echo "it is all good" + su postgres -c "${BINDIR}/pg_ctl start -D ${PGDATA}" + sleep 5 + su postgres -c "pg_probackup-${PBK_EDITION}-${PG_VERSION} backup --instance=node -b full -B /tmp/backup -D ${PGDATA} --no-sync" + su postgres -c "pg_probackup-${PBK_EDITION}-${PG_VERSION} show --instance=node -B /tmp/backup -D ${PGDATA}" + su postgres -c "pg_probackup-${PBK_EDITION}-${PG_VERSION} show --instance=node -B /tmp/backup -D ${PGDATA} --archive" + + su postgres -c "${BINDIR}/pgbench --no-vacuum -i -s 5" + su postgres -c "pg_probackup-${PBK_EDITION}-${PG_VERSION} backup --instance=node -b page -B /tmp/backup -D ${PGDATA} --no-sync" +fi + +# install new packages +echo "deb [arch=amd64] http://test.postgrespro.ru/pg_probackup-forks/deb/ $(lsb_release -cs) main-$(lsb_release -cs)" >\ + /etc/apt/sources.list.d/pg_probackup-new.list +wget -O - http://test.postgrespro.ru/pg_probackup-forks/keys/GPG-KEY-PG_PROBACKUP | apt-key add - +apt-get update -y + +#if [ ${PBK_EDITION} == 'std' ] && [ ${PG_VERSION} == '9.6' ]; then +# apt-get install -y libpq5 pg-probackup-${PBK_EDITION}-${PG_VERSION} +#else +# apt-get install -y pg-probackup-${PBK_EDITION}-${PG_VERSION} +#fi + +apt-get install -y pg-probackup-${PBK_EDITION}-${PG_VERSION} + +# in Ent 11 and 10 because of PQselect vanilla libpq5 is incompatible with Ent pg_probackup +if [ ${PBK_EDITION} == 'ent' ]; then + if [ ${PG_VERSION} == '11' ] || [ ${PG_VERSION} == '10' ] || [ ${PG_VERSION} == '9.6' ]; then + exit 0 + fi +fi + +pg_probackup-${PBK_EDITION}-${PG_VERSION} --help +pg_probackup-${PBK_EDITION}-${PG_VERSION} --version + +if [ ${PBK_EDITION} == 'ent' ]; then + exit 0 +fi + +if [ ${PBK_EDITION} == 'std' ] && [ ${PG_VERSION} == '9.6' ]; then + exit 0 +fi + + +#if [ ${CODENAME} == 'precise' ] && [ ${PG_VERSION} != '10' ] && [ ${PG_VERSION} != '11' ]; then + su postgres -c "${BINDIR}/pgbench --no-vacuum -t 1000 -c 1" + su postgres -c "pg_probackup-${PBK_EDITION}-${PG_VERSION} backup --instance=node -b page -B /tmp/backup -D ${PGDATA}" + su postgres -c "pg_probackup-${PBK_EDITION}-${PG_VERSION} show --instance=node -B /tmp/backup -D ${PGDATA}" + + su postgres -c "${BINDIR}/pg_ctl stop -w -t 60 -D ${PGDATA}" + rm -rf ${PGDATA} + + su postgres -c "pg_probackup-${PBK_EDITION}-${PG_VERSION} restore --instance=node -B /tmp/backup -D ${PGDATA}" + su postgres -c "${BINDIR}/pg_ctl start -w -t 60 -D ${PGDATA}" + +sleep 5 +echo "select count(*) from pgbench_accounts;" | su postgres -c "${BINDIR}/psql" || exit 1 + +exit 0 diff --git a/packaging/test/scripts/rpm.sh b/packaging/test/scripts/rpm.sh new file mode 100755 index 000000000..3f24cc7e5 --- /dev/null +++ b/packaging/test/scripts/rpm.sh @@ -0,0 +1,166 @@ +#!/usr/bin/env bash + +# Copyright Notice: +# © (C) Postgres Professional 2015-2016 http://www.postgrespro.ru/ +# Distributed under Apache License 2.0 +# Распространяется по лицензии Apache 2.0 + +set -xe +set -o pipefail + +# fix https://github.com/moby/moby/issues/23137 +ulimit -n 1024 + +PG_TOG=$(echo $PG_VERSION | sed 's|\.||g') + +# yum upgrade -y || echo 'some packages in docker failed to upgrade' +# yum install -y sudo +if [ ${DISTRIB} == 'rhel' ] && [ ${PG_TOG} == '13' ]; then # no packages for PG13 on PGDG + exit 0 +fi + +#if [ ${DISTRIB} == 'oraclelinux' ] && [ ${DISTRIB_VERSION} == '6' ] && [ ${PG_TOG} == '13' ]; then # no packages for PG13 on PGDG +# exit 0 +#fi + +if [ ${DISTRIB_VERSION} == '6' ]; then + yum install -y https://nginx.org/packages/rhel/6/x86_64/RPMS/nginx-1.8.1-1.el6.ngx.x86_64.rpm +elif [ ${DISTRIB} == 'oraclelinux' ] && [ ${DISTRIB_VERSION} == '8' ]; then + yum install -y nginx +elif [ ${DISTRIB_VERSION} == '7' ]; then + yum install -y https://nginx.org/packages/rhel/7/x86_64/RPMS/nginx-1.8.1-1.el7.ngx.x86_64.rpm +else + yum install epel-release -y + yum install -y nginx +fi + +if ! getent group nginx > /dev/null 2>&1 ; then + addgroup --system --quiet nginx +fi +if ! getent passwd nginx > /dev/null 2>&1 ; then + adduser --quiet \ + --system --disabled-login --ingroup nginx \ + --home /var/run/nginx/ --no-create-home \ + nginx +fi + +cat < /etc/nginx/nginx.conf +user nginx; +worker_processes 1; +error_log /var/log/nginx/error.log; +events { + worker_connections 1024; +} +http { + server { + listen 80 default; + root /app/www; + } +} +EOF +nginx -s reload || (pkill -9 nginx || nginx -c /etc/nginx/nginx.conf &) + +# install POSTGRESQL +rpm -ivh https://download.postgresql.org/pub/repos/yum/reporpms/EL-${DISTRIB_VERSION}-x86_64/pgdg-redhat-repo-latest.noarch.rpm + +if [ ${DISTRIB} == 'oraclelinux' ] && [ ${DISTRIB_VERSION} == '8' ]; then + dnf -qy module disable postgresql +fi + +if [ ${DISTRIB} == 'centos' ] && [ ${DISTRIB_VERSION} == '8' ]; then + dnf -qy module disable postgresql +fi + +yum install -y postgresql${PG_TOG}-server.x86_64 +export PGDATA=/var/lib/pgsql/${PG_VERSION}/data + +# install old packages +yum install -y http://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-${DISTRIB}.noarch.rpm +yum install -y ${PKG_NAME} +${PKG_NAME} --help +${PKG_NAME} --version + +su postgres -c "/usr/pgsql-${PG_VERSION}/bin/initdb -k -D ${PGDATA}" +echo "fsync=off" >> ${PGDATA}/postgresql.auto.conf +echo "wal_level=hot_standby" >> ${PGDATA}/postgresql.auto.conf +echo "archive_mode=on" >> ${PGDATA}/postgresql.auto.conf +echo "archive_command='${PKG_NAME} archive-push --no-sync -B /tmp/backup --instance=node --wal-file-path %p --wal-file-name %f'" >> ${PGDATA}/postgresql.auto.conf +su postgres -c "/usr/pgsql-${PG_VERSION}/bin/pg_ctl start -D ${PGDATA}" +sleep 5 + +su postgres -c "${PKG_NAME} init -B /tmp/backup" +su postgres -c "${PKG_NAME} add-instance --instance=node -B /tmp/backup -D ${PGDATA}" +su postgres -c "${PKG_NAME} backup --instance=node --compress -b full -B /tmp/backup -D ${PGDATA} --no-sync" +su postgres -c "${PKG_NAME} show --instance=node -B /tmp/backup -D ${PGDATA} --archive" + +su postgres -c "/usr/pgsql-${PG_VERSION}/bin/pgbench --no-vacuum -i -s 5" +su postgres -c "${PKG_NAME} backup --instance=node -b page -B /tmp/backup -D ${PGDATA} --no-sync" + +# install new packages +echo "127.0.0.1 repo.postgrespro.ru" >> /etc/hosts + +# yum remove -y pg_probackup-repo +#yum install -y http://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-${DISTRIB}.noarch.rpm +yum clean all -y + +sed -i "s/https/http/g" /etc/yum.repos.d/pg_probackup.repo + +yum update -y ${PKG_NAME} +${PKG_NAME} --help +${PKG_NAME} --version + +su postgres -c "/usr/pgsql-${PG_VERSION}/bin/pgbench --no-vacuum -t 1000 -c 1" +su postgres -c "${PKG_NAME} backup --instance=node -b page -B /tmp/backup -D ${PGDATA} --no-sync" +su postgres -c "${PKG_NAME} show --instance=node -B /tmp/backup -D ${PGDATA}" + +su postgres -c "/usr/pgsql-${PG_VERSION}/bin/pg_ctl stop -D ${PGDATA}" +rm -rf ${PGDATA} + +su postgres -c "${PKG_NAME} restore --instance=node -B /tmp/backup -D ${PGDATA} --no-sync" +su postgres -c "/usr/pgsql-${PG_VERSION}/bin/pg_ctl start -w -D ${PGDATA}" + +sleep 5 + +echo "select count(*) from pgbench_accounts;" | su postgres -c "/usr/pgsql-${PG_VERSION}/bin/psql" || exit 1 + +#else +# echo "127.0.0.1 repo.postgrespro.ru" >> /etc/hosts +# rpm -ivh http://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-${DISTRIB}.noarch.rpm +# yum install -y ${PKG_NAME} +# ${PKG_NAME} --help +# su postgres -c "/usr/pgsql-${PG_VERSION}/bin/initdb -k -D ${PGDATA}" +# su postgres -c "${PKG_NAME} init -B /tmp/backup" +# su postgres -c "${PKG_NAME} add-instance --instance=node -B /tmp/backup -D ${PGDATA}" +# echo "wal_level=hot_standby" >> ${PGDATA}/postgresql.auto.conf +# echo "archive_mode=on" >> ${PGDATA}/postgresql.auto.conf +# echo "archive_command='${PKG_NAME} archive-push -B /tmp/backup --instance=node --wal-file-path %p --wal-file-name %f'" >> ${PGDATA}/postgresql.auto.conf +# su postgres -c "/usr/pgsql-${PG_VERSION}/bin/pg_ctl start -D ${PGDATA}" +# sleep 5 +# su postgres -c "${PKG_NAME} backup --instance=node --compress -b full -B /tmp/backup -D ${PGDATA}" +# su postgres -c "/usr/pgsql-${PG_VERSION}/bin/pgbench --no-vacuum -i -s 10" +# su postgres -c "${PKG_NAME} backup --instance=node -b page -B /tmp/backup -D ${PGDATA}" +# su postgres -c "/usr/pgsql-${PG_VERSION}/bin/pgbench --no-vacuum -t 1000 -c 1" +# su postgres -c "${PKG_NAME} backup --instance=node -b page -B /tmp/backup -D ${PGDATA}" +# su postgres -c "${PKG_NAME} show --instance=node -B /tmp/backup -D ${PGDATA}" +# su postgres -c "/usr/pgsql-${PG_VERSION}/bin/pg_ctl stop -D ${PGDATA}" +# rm -rf ${PGDATA} +# su postgres -c "${PKG_NAME} restore --instance=node -B /tmp/backup -D ${PGDATA}" +# su postgres -c "/usr/pgsql-${PG_VERSION}/bin/pg_ctl start -D ${PGDATA}" +# sleep 10 +# echo "select count(*) from pgbench_accounts;" | su postgres -c "/usr/pgsql-${PG_VERSION}/bin/psql" || exit 1 +#fi + +exit 0 # while PG12 is not working + +# SRC PACKAGE +cd /mnt +yum install yum-utils rpm-build -y +yumdownloader --source ${PKG_NAME} +rpm -ivh ./*.rpm +cd /root/rpmbuild/SPECS +exit 0 + +# build pg_probackup +yum-builddep -y pg_probackup.spec +rpmbuild -bs pg_probackup.spec +rpmbuild -ba pg_probackup.spec #2>&1 | tee -ai /app/out/build.log diff --git a/packaging/test/scripts/rpm_forks.sh b/packaging/test/scripts/rpm_forks.sh new file mode 100755 index 000000000..8596f6656 --- /dev/null +++ b/packaging/test/scripts/rpm_forks.sh @@ -0,0 +1,173 @@ +#!/usr/bin/env bash + +# Copyright Notice: +# © (C) Postgres Professional 2015-2016 http://www.postgrespro.ru/ +# Distributed under Apache License 2.0 +# Распространяется по лицензии Apache 2.0 + +set -xe +set -o pipefail + +# fix https://github.com/moby/moby/issues/23137 +ulimit -n 1024 + +PG_TOG=$(echo $PG_VERSION | sed 's|\.||g') + +if [ ${PBK_PBK_EDITION} == 'ent' ]; then + exit 0 +fi + +# yum upgrade -y || echo 'some packages in docker failed to upgrade' +# yum install -y sudo + +if [ ${DISTRIB} == 'rhel' ] && [ ${DISTRIB_VERSION} == '6' ]; then + exit 0; +elif [ ${DISTRIB} == 'oraclelinux' ] && [ ${DISTRIB_VERSION} == '6' ]; then + exit 0; +elif [ ${DISTRIB} == 'oraclelinux' ] && [ ${DISTRIB_VERSION} == '8' ]; then + yum install -y nginx +elif [ ${DISTRIB_VERSION} == '7' ]; then + yum install -y https://nginx.org/packages/rhel/7/x86_64/RPMS/nginx-1.8.1-1.el7.ngx.x86_64.rpm +elif [ ${DISTRIB} == 'oraclelinux' ] && [ ${DISTRIB_VERSION} == '6' ]; then + yum install -y https://nginx.org/packages/rhel/6/x86_64/RPMS/nginx-1.8.1-1.el6.ngx.x86_64.rpm +else + yum install epel-release -y + yum install -y nginx +fi + +if ! getent group nginx > /dev/null 2>&1 ; then + addgroup --system --quiet nginx +fi +if ! getent passwd nginx > /dev/null 2>&1 ; then + adduser --quiet \ + --system --disabled-login --ingroup nginx \ + --home /var/run/nginx/ --no-create-home \ + nginx +fi + +cat < /etc/nginx/nginx.conf +user nginx; +worker_processes 1; +error_log /var/log/nginx/error.log; +events { + worker_connections 1024; +} +http { + server { + listen 80 default; + root /app/www; + } +} +EOF +nginx -s reload || (pkill -9 nginx || nginx -c /etc/nginx/nginx.conf &) + +# if [ ${DISTRIB} == 'centos' ]; then + +# install old packages +yum install -y http://repo.postgrespro.ru/pg_probackup-forks/keys/pg_probackup-repo-forks-${DISTRIB}.noarch.rpm +sed -i "s/https/http/g" /etc/yum.repos.d/pg_probackup-forks.repo + +yum install -y ${PKG_NAME} +${PKG_NAME} --help +${PKG_NAME} --version + +if [ $PBK_EDITION == 'std' ] ; then + + # install POSTGRESQL + # rpm -ivh https://download.postgresql.org/pub/repos/yum/reporpms/EL-${DISTRIB_VERSION}-x86_64/pgdg-redhat-repo-latest.noarch.rpm + if [[ ${PG_VERSION} == '11' ]] || [[ ${PG_VERSION} == '12' ]]; then + rpm -ivh https://repo.postgrespro.ru/pgpro-${PG_VERSION}/keys/postgrespro-std-${PG_VERSION}.${DISTRIB}.yum-${PG_VERSION}-0.3.noarch.rpm + else + rpm -ivh https://repo.postgrespro.ru/pgpro-${PG_VERSION}/keys/postgrespro-std-${PG_VERSION}.${DISTRIB}.yum-${PG_VERSION}-0.3.noarch.rpm + fi + + if [[ ${PG_VERSION} == '9.6' ]]; then + yum install -y postgrespro${PG_TOG}-server.x86_64 + BINDIR="/usr/pgpro-${PG_VERSION}/bin" + else + yum install -y postgrespro-std-${PG_TOG}-server.x86_64 + BINDIR="/opt/pgpro/std-${PG_VERSION}/bin" + export LD_LIBRARY_PATH=/opt/pgpro/std-${PG_VERSION}/lib/ + fi + + export PGDATA=/tmp/data + + su postgres -c "${BINDIR}/initdb -k -D ${PGDATA}" + echo "wal_level=hot_standby" >> ${PGDATA}/postgresql.auto.conf + echo "archive_mode=on" >> ${PGDATA}/postgresql.auto.conf + echo "fsync=off" >> ${PGDATA}/postgresql.auto.conf + echo "archive_command='${PKG_NAME} archive-push -B /tmp/backup --instance=node --wal-file-path %p --wal-file-name %f'" >> ${PGDATA}/postgresql.auto.conf + su postgres -c "${BINDIR}/pg_ctl start -D ${PGDATA}" + sleep 5 + + su postgres -c "${PKG_NAME} init -B /tmp/backup" + su postgres -c "${PKG_NAME} add-instance --instance=node -B /tmp/backup -D ${PGDATA}" + su postgres -c "${PKG_NAME} backup --instance=node --compress -b full -B /tmp/backup -D ${PGDATA}" + su postgres -c "${PKG_NAME} show --instance=node -B /tmp/backup -D ${PGDATA} --archive" + + su postgres -c "${BINDIR}/pgbench --no-vacuum -i -s 5" + su postgres -c "${PKG_NAME} backup --instance=node -b page -B /tmp/backup -D ${PGDATA}" +fi + +# install new packages +echo "127.0.0.1 repo.postgrespro.ru" >> /etc/hosts + +# yum remove -y pg_probackup-repo +#yum install -y http://repo.postgrespro.ru/pg_probackup-forks/keys/pg_probackup-repo-forks-${DISTRIB}.noarch.rpm +#yum clean all -y + +sed -i "s/https/http/g" /etc/yum.repos.d/pg_probackup-forks.repo + +# yum update -y ${PKG_NAME} +yum install -y ${PKG_NAME} + +${PKG_NAME} --help +${PKG_NAME} --version + +if [ $PBK_EDITION == 'ent' ]; then + exit 0 +fi + +# su postgres -c "${BINDIR}/pgbench --no-vacuum -t 1000 -c 1" +su postgres -c "${BINDIR}/pgbench --no-vacuum -i -s 5" +su postgres -c "${PKG_NAME} backup --instance=node -b full -B /tmp/backup -D ${PGDATA}" +su postgres -c "${PKG_NAME} show --instance=node -B /tmp/backup -D ${PGDATA}" + +su postgres -c "${BINDIR}/pg_ctl stop -D ${PGDATA}" +rm -rf ${PGDATA} + +su postgres -c "${PKG_NAME} restore --instance=node -B /tmp/backup -D ${PGDATA}" +su postgres -c "${BINDIR}/pg_ctl start -w -D ${PGDATA}" + +sleep 5 + +echo "select count(*) from pgbench_accounts;" | su postgres -c "${BINDIR}/psql" || exit 1 + +#else +# echo "127.0.0.1 repo.postgrespro.ru" >> /etc/hosts +# rpm -ivh http://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-${DISTRIB}.noarch.rpm +# yum install -y ${PKG_NAME} +# ${PKG_NAME} --help +# su postgres -c "/usr/pgsql-${PG_VERSION}/bin/initdb -k -D ${PGDATA}" +# su postgres -c "${PKG_NAME} init -B /tmp/backup" +# su postgres -c "${PKG_NAME} add-instance --instance=node -B /tmp/backup -D ${PGDATA}" +# echo "wal_level=hot_standby" >> ${PGDATA}/postgresql.auto.conf +# echo "archive_mode=on" >> ${PGDATA}/postgresql.auto.conf +# echo "archive_command='${PKG_NAME} archive-push -B /tmp/backup --instance=node --wal-file-path %p --wal-file-name %f'" >> ${PGDATA}/postgresql.auto.conf +# su postgres -c "/usr/pgsql-${PG_VERSION}/bin/pg_ctl start -D ${PGDATA}" +# sleep 5 +# su postgres -c "${PKG_NAME} backup --instance=node --compress -b full -B /tmp/backup -D ${PGDATA}" +# su postgres -c "/usr/pgsql-${PG_VERSION}/bin/pgbench --no-vacuum -i -s 10" +# su postgres -c "${PKG_NAME} backup --instance=node -b page -B /tmp/backup -D ${PGDATA}" +# su postgres -c "/usr/pgsql-${PG_VERSION}/bin/pgbench --no-vacuum -t 1000 -c 1" +# su postgres -c "${PKG_NAME} backup --instance=node -b page -B /tmp/backup -D ${PGDATA}" +# su postgres -c "${PKG_NAME} show --instance=node -B /tmp/backup -D ${PGDATA}" +# su postgres -c "/usr/pgsql-${PG_VERSION}/bin/pg_ctl stop -D ${PGDATA}" +# rm -rf ${PGDATA} +# su postgres -c "${PKG_NAME} restore --instance=node -B /tmp/backup -D ${PGDATA}" +# su postgres -c "/usr/pgsql-${PG_VERSION}/bin/pg_ctl start -D ${PGDATA}" +# sleep 10 +# echo "select count(*) from pgbench_accounts;" | su postgres -c "/usr/pgsql-${PG_VERSION}/bin/psql" || exit 1 +#fi + +exit 0 diff --git a/packaging/test/scripts/suse.sh b/packaging/test/scripts/suse.sh new file mode 100755 index 000000000..ff630b479 --- /dev/null +++ b/packaging/test/scripts/suse.sh @@ -0,0 +1,128 @@ +#!/usr/bin/env bash + +# Copyright Notice: +# © (C) Postgres Professional 2015-2016 http://www.postgrespro.ru/ +# Distributed under Apache License 2.0 +# Распространяется по лицензии Apache 2.0 + +set -xe +set -o pipefail + +# fix https://github.com/moby/moby/issues/23137 +ulimit -n 1024 + +# currenctly we do not build std|ent packages for Suse +if [[ ${PBK_EDITION} != '' ]] ; then + exit 0 +fi + +PG_TOG=$(echo $PG_VERSION | sed 's|\.||g') + +if [ ${PG_TOG} == '13' ]; then # no packages for PG13 + exit 0 +fi + +if [ ${PG_TOG} == '11' ]; then # no packages for PG11 + exit 0 +fi + +if [ ${PG_TOG} == '95' ]; then # no packages for PG95 + exit 0 +fi + +zypper install -y nginx +if ! getent group nginx > /dev/null 2>&1 ; then + addgroup --system --quiet nginx +fi +if ! getent passwd nginx > /dev/null 2>&1 ; then + adduser --quiet \ + --system --disabled-login --ingroup nginx \ + --home /var/run/nginx/ --no-create-home \ + nginx +fi + +useradd postgres + +cat < /etc/nginx/nginx.conf +user nginx; +worker_processes 1; +error_log /var/log/nginx/error.log; +events { + worker_connections 1024; +} +http { + server { + listen 80 default; + root /app/www; + } +} +EOF +nginx -s reload || (pkill -9 nginx || nginx -c /etc/nginx/nginx.conf &) + +# install POSTGRESQL +zypper install -y postgresql${PG_TOG} postgresql${PG_TOG}-server postgresql${PG_TOG}-contrib +export PGDATA=/tmp/data + +# install old packages +zypper install --allow-unsigned-rpm -y http://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-${DISTRIB}.noarch.rpm +zypper --gpg-auto-import-keys install -y ${PKG_NAME} +${PKG_NAME} --help +${PKG_NAME} --version + +su postgres -c "/usr/lib/postgresql${PG_TOG}/bin/initdb -k -D ${PGDATA}" +echo "fsync=off" >> ${PGDATA}/postgresql.auto.conf +echo "wal_level=hot_standby" >> ${PGDATA}/postgresql.auto.conf +echo "archive_mode=on" >> ${PGDATA}/postgresql.auto.conf +echo "archive_command='${PKG_NAME} archive-push --no-sync -B /tmp/backup --instance=node --wal-file-path %p --wal-file-name %f'" >> ${PGDATA}/postgresql.auto.conf +su postgres -c "/usr/lib/postgresql${PG_TOG}/bin/pg_ctl start -D ${PGDATA}" +sleep 5 + +su postgres -c "${PKG_NAME} init -B /tmp/backup" +su postgres -c "${PKG_NAME} add-instance --instance=node -B /tmp/backup -D ${PGDATA}" +su postgres -c "${PKG_NAME} backup --instance=node --compress -b full -B /tmp/backup -D ${PGDATA} --no-sync" +su postgres -c "${PKG_NAME} show --instance=node -B /tmp/backup -D ${PGDATA} --archive" + +su postgres -c "/usr/lib/postgresql${PG_TOG}/bin/pgbench --no-vacuum -i -s 5" +su postgres -c "${PKG_NAME} backup --instance=node -b page -B /tmp/backup -D ${PGDATA} --no-sync" + +# install new packages +echo "127.0.0.1 repo.postgrespro.ru" >> /etc/hosts +zypper clean all -y + +sed -i "s/https/http/g" /etc/zypp/repos.d/pg_probackup.repo + +zypper update -y ${PKG_NAME} +${PKG_NAME} --help +${PKG_NAME} --version + +su postgres -c "/usr/lib/postgresql${PG_TOG}/bin/pgbench --no-vacuum -t 1000 -c 1" +su postgres -c "${PKG_NAME} backup --instance=node -b page -B /tmp/backup -D ${PGDATA} --no-sync" + +su postgres -c "/usr/lib/postgresql${PG_TOG}/bin/pgbench --no-vacuum -t 1000 -c 1" +su postgres -c "${PKG_NAME} backup --instance=node -b page -B /tmp/backup -D ${PGDATA} --no-sync" +su postgres -c "${PKG_NAME} show --instance=node -B /tmp/backup -D ${PGDATA}" + +su postgres -c "/usr/lib/postgresql${PG_TOG}/bin/pg_ctl stop -D ${PGDATA}" +rm -rf ${PGDATA} + +su postgres -c "${PKG_NAME} restore --instance=node -B /tmp/backup -D ${PGDATA} --no-sync" +su postgres -c "/usr/lib/postgresql${PG_TOG}/bin/pg_ctl start -w -D ${PGDATA}" + +sleep 5 + +echo "select count(*) from pgbench_accounts;" | su postgres -c "/usr/lib/postgresql${PG_TOG}/bin/psql" || exit 1 + +exit 0 + +# SRC PACKAGE +cd /mnt +yum install yum-utils rpm-build -y +yumdownloader --source ${PKG_NAME} +rpm -ivh ./*.rpm +cd /root/rpmbuild/SPECS +exit 0 + +# build pg_probackup +yum-builddep -y pg_probackup.spec +rpmbuild -bs pg_probackup.spec +rpmbuild -ba pg_probackup.spec #2>&1 | tee -ai /app/out/build.log diff --git a/packaging/test/scripts/suse_forks.sh b/packaging/test/scripts/suse_forks.sh new file mode 100644 index 000000000..b83f1ddd9 --- /dev/null +++ b/packaging/test/scripts/suse_forks.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +set -xe +set -o pipefail +exit 0 From 2e2a8b8dca14f6118c650b6eab8db5a53897e1d3 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Sun, 26 Sep 2021 15:40:00 +0300 Subject: [PATCH 201/525] [Issue #360] add test coverage --- tests/exclude.py | 64 ++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 62 insertions(+), 2 deletions(-) diff --git a/tests/exclude.py b/tests/exclude.py index 83743bf0b..b98a483d0 100644 --- a/tests/exclude.py +++ b/tests/exclude.py @@ -181,8 +181,7 @@ def test_exclude_unlogged_tables_1(self): self.backup_node( backup_dir, 'node', node, backup_type='delta', - options=['--stream'] - ) + options=['--stream']) pgdata = self.pgdata_content(node.data_dir) @@ -201,6 +200,67 @@ def test_exclude_unlogged_tables_1(self): # Clean after yourself self.del_test_dir(module_name, fname) + # @unittest.skip("skip") + def test_exclude_unlogged_tables_2(self): + """ + make node, create unlogged, take FULL, check + that unlogged was not backed up + """ + fname = self.id().split('.')[3] + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + "shared_buffers": "10MB"}) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + for backup_type in ['full', 'delta', 'page']: + + if backup_type == 'full': + node.safe_psql( + 'postgres', + 'create unlogged table test as select generate_series(0,20050000)::text') + else: + node.safe_psql( + 'postgres', + 'insert into test select generate_series(0,20050000)::text') + + rel_path = node.safe_psql( + 'postgres', + "select pg_relation_filepath('test')").decode('utf-8').rstrip() + + backup_id = self.backup_node( + backup_dir, 'node', node, + backup_type=backup_type, options=['--stream']) + + filelist = self.get_backup_filelist( + backup_dir, 'node', backup_id) + + self.assertNotIn( + rel_path, filelist, + "Unlogged table was not excluded") + + self.assertNotIn( + rel_path + '.1', filelist, + "Unlogged table was not excluded") + + self.assertNotIn( + rel_path + '.2', filelist, + "Unlogged table was not excluded") + + self.assertNotIn( + rel_path + '.3', filelist, + "Unlogged table was not excluded") + + # Clean after yourself + self.del_test_dir(module_name, fname) + # @unittest.skip("skip") def test_exclude_log_dir(self): """ From 7f690abdf67d02c2c12ac3359cdbe6aed8a0ed59 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Sun, 26 Sep 2021 15:46:02 +0300 Subject: [PATCH 202/525] [Issue #360] correctly exclude unlogged relations from backup --- src/dir.c | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/src/dir.c b/src/dir.c index c5c5b3297..00a4c4f82 100644 --- a/src/dir.c +++ b/src/dir.c @@ -730,21 +730,32 @@ dir_check_file(pgFile *file, bool backup_logs) if (fork_name) { /* Auxiliary fork of the relfile */ - if (strcmp(fork_name, "vm") == 0) + if (strcmp(fork_name, "_vm") == 0) file->forkName = vm; - else if (strcmp(fork_name, "fsm") == 0) + else if (strcmp(fork_name, "_fsm") == 0) file->forkName = fsm; - else if (strcmp(fork_name, "cfm") == 0) + else if (strcmp(fork_name, "_cfm") == 0) file->forkName = cfm; - else if (strcmp(fork_name, "ptrack") == 0) + else if (strcmp(fork_name, "_ptrack") == 0) file->forkName = ptrack; - else if (strcmp(fork_name, "init") == 0) + else if (strcmp(fork_name, "_init") == 0) file->forkName = init; + // extract relOid for certain forks + if (file->forkName == vm || + file->forkName == fsm || + file->forkName == init || + file->forkName == cfm) + { + // sanity + if (sscanf(file->name, "%u_*", &(file->relOid)) != 1) + file->relOid = 0; + } + /* Do not backup ptrack files */ if (file->forkName == ptrack) return CHECK_FALSE; From 6081c08f6128d18273fc10ad56796881178a6498 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Sat, 2 Oct 2021 12:52:21 +0300 Subject: [PATCH 203/525] Fix link to latest windows installers --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 344b03fb3..e8d25c5a6 100644 --- a/README.md +++ b/README.md @@ -66,7 +66,7 @@ For detailed release plans check [Milestones](https://github.com/postgrespro/pg_ ## Installation and Setup ### Windows Installation -Installers are available in release **assets**. [Latests](https://github.com/postgrespro/pg_probackup/releases/2.4.9). +Installers are available in release **assets**. [Latests](https://github.com/postgrespro/pg_probackup/releases/2.4.15). ### Linux Installation #### pg_probackup for vanilla PostgreSQL From 7feb7489053efe267d717f8efea51be4b0a6969e Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Wed, 13 Oct 2021 05:30:20 +0300 Subject: [PATCH 204/525] Prerelease test stabilization ptrack.PtrackTest.test_ptrack_threads backup.BackupTest.test_backup_with_least_privileges_role ptrack.PtrackTest.test_ptrack_without_full option.OptionTest.test_help_1 --- tests/backup.py | 2 +- tests/expected/option_help.out | 20 +++++++++++++++++--- tests/ptrack.py | 8 ++++++++ 3 files changed, 26 insertions(+), 4 deletions(-) diff --git a/tests/backup.py b/tests/backup.py index 558c62de3..3548fa56b 100644 --- a/tests/backup.py +++ b/tests/backup.py @@ -1959,7 +1959,7 @@ def test_backup_with_least_privileges_role(self): node.safe_psql( "backupdb", "GRANT EXECUTE ON FUNCTION ptrack.ptrack_get_pagemapset(pg_lsn) TO backup; " - "GRANT EXECUTE ON FUNCTION 'ptrack.ptrack_init_lsn()' TO backup; ") + "GRANT EXECUTE ON FUNCTION ptrack.ptrack_init_lsn() TO backup;") if ProbackupTest.enterprise: node.safe_psql( diff --git a/tests/expected/option_help.out b/tests/expected/option_help.out index c2b15e7ac..01384a893 100644 --- a/tests/expected/option_help.out +++ b/tests/expected/option_help.out @@ -42,7 +42,7 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database. pg_probackup backup -B backup-path -b backup-mode --instance=instance_name [-D pgdata-path] [-C] - [--stream [-S slot-name]] [--temp-slot] + [--stream [-S slot-name] [--temp-slot]] [--backup-pg-log] [-j num-threads] [--progress] [--no-validate] [--skip-block-validation] [--external-dirs=external-directories-paths] @@ -53,7 +53,7 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database. [--error-log-filename=error-log-filename] [--log-directory=log-directory] [--log-rotation-size=log-rotation-size] - [--log-rotation-age=log-rotation-age] + [--log-rotation-age=log-rotation-age] [--no-color] [--delete-expired] [--delete-wal] [--merge-expired] [--retention-redundancy=retention-redundancy] [--retention-window=retention-window] @@ -113,7 +113,7 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database. pg_probackup show -B backup-path [--instance=instance_name [-i backup-id]] [--format=format] [--archive] - [--help] + [--no-color] [--help] pg_probackup delete -B backup-path --instance=instance_name [-j num-threads] [--progress] @@ -165,5 +165,19 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database. [--ssh-options] [--help] + pg_probackup catchup -b catchup-mode + --source-pgdata=path_to_pgdata_on_remote_server + --destination-pgdata=path_to_local_dir + [--stream [-S slot-name] [--temp-slot | --perm-slot]] + [-j num-threads] + [-T OLDDIR=NEWDIR] + [--exclude-path=path_prefix] + [-d dbname] [-h host] [-p port] [-U username] + [-w --no-password] [-W --password] + [--remote-proto] [--remote-host] + [--remote-port] [--remote-path] [--remote-user] + [--ssh-options] + [--help] + Read the website for details. Report bugs to . diff --git a/tests/ptrack.py b/tests/ptrack.py index bcc8dc20a..a20be54b8 100644 --- a/tests/ptrack.py +++ b/tests/ptrack.py @@ -100,6 +100,10 @@ def test_ptrack_without_full(self): self.set_archiving(backup_dir, 'node', node) node.slow_start() + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") + try: self.backup_node(backup_dir, 'node', node, backup_type="ptrack") # we should die here because exception is what we expect to happen @@ -136,6 +140,10 @@ def test_ptrack_threads(self): self.set_archiving(backup_dir, 'node', node) node.slow_start() + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") + self.backup_node( backup_dir, 'node', node, backup_type="full", options=["-j", "4"]) From 396155e5bcc69a6ec21598012592a0d5cf31b2eb Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" <16117281+kulaginm@users.noreply.github.com> Date: Wed, 13 Oct 2021 05:56:44 +0300 Subject: [PATCH 205/525] Issue 439 (#440) * [Issue #439] skip unsupported tests in 9.5 (tests with backups from replica and with pg_control_checkpoint() calling) --- tests/archive.py | 29 ++++++++++++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) diff --git a/tests/archive.py b/tests/archive.py index 0ade2d66a..4b07c1dbd 100644 --- a/tests/archive.py +++ b/tests/archive.py @@ -83,6 +83,12 @@ def test_pgpro434_2(self): pg_options={ 'checkpoint_timeout': '30s'} ) + + if self.get_version(node) < self.version_to_num('9.6.0'): + self.del_test_dir(module_name, fname) + return unittest.skip( + 'Skipped because pg_control_checkpoint() is not supported in PG 9.5') + self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -693,6 +699,11 @@ def test_replica_archive(self): 'checkpoint_timeout': '30s', 'max_wal_size': '32MB'}) + if self.get_version(master) < self.version_to_num('9.6.0'): + self.del_test_dir(module_name, fname) + return unittest.skip( + 'Skipped because backup from replica is not supported in PG 9.5') + self.init_pb(backup_dir) # ADD INSTANCE 'MASTER' self.add_instance(backup_dir, 'master', master) @@ -818,6 +829,12 @@ def test_master_and_replica_parallel_archiving(self): pg_options={ 'archive_timeout': '10s'} ) + + if self.get_version(master) < self.version_to_num('9.6.0'): + self.del_test_dir(module_name, fname) + return unittest.skip( + 'Skipped because backup from replica is not supported in PG 9.5') + replica = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'replica')) replica.cleanup() @@ -908,6 +925,11 @@ def test_basic_master_and_replica_concurrent_archiving(self): 'checkpoint_timeout': '30s', 'archive_timeout': '10s'}) + if self.get_version(master) < self.version_to_num('9.6.0'): + self.del_test_dir(module_name, fname) + return unittest.skip( + 'Skipped because backup from replica is not supported in PG 9.5') + replica = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'replica')) replica.cleanup() @@ -2009,6 +2031,11 @@ def test_archive_pg_receivexlog_partial_handling(self): set_replication=True, initdb_params=['--data-checksums']) + if self.get_version(node) < self.version_to_num('9.6.0'): + self.del_test_dir(module_name, fname) + return unittest.skip( + 'Skipped because backup from replica is not supported in PG 9.5') + self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -2655,4 +2682,4 @@ def test_archive_empty_history_file(self): #t2 ---------------- # / #t1 -A-------- -# \ No newline at end of file +# From f7a81aa5fd07e6dc38192d1af6c896e4c7d61a9c Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Wed, 13 Oct 2021 13:48:53 +0300 Subject: [PATCH 206/525] [Issue #413] packaging: bump postgres versions, add 14, remove 9.5, some workarounds (caused by old docker images) --- Makefile | 2 +- packaging/Makefile.pkg | 42 +++++++++++----------- packaging/pkg/Makefile.alt | 43 ++++++++++++++-------- packaging/pkg/Makefile.centos | 28 +++++++++------ packaging/pkg/Makefile.debian | 56 ++++++++++++++++++----------- packaging/pkg/Makefile.oraclelinux | 43 ++++++++++++++-------- packaging/pkg/Makefile.rhel | 28 +++++++++------ packaging/pkg/Makefile.suse | 28 +++++++++------ packaging/pkg/Makefile.ubuntu | 57 +++++++++++++++++++----------- packaging/pkg/scripts/deb.sh | 6 ++-- packaging/pkg/scripts/rpm.sh | 19 ++++++---- 11 files changed, 220 insertions(+), 132 deletions(-) diff --git a/Makefile b/Makefile index 4e463bf7c..aca1df356 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ PROGRAM = pg_probackup WORKDIR ?= $(CURDIR) BUILDDIR = $(WORKDIR)/build/ -PBK_GIT_REPO = http://github.com/postgrespro/pg_probackup +PBK_GIT_REPO = https://github.com/postgrespro/pg_probackup # utils OBJS = src/utils/configuration.o src/utils/json.o src/utils/logger.o \ diff --git a/packaging/Makefile.pkg b/packaging/Makefile.pkg index bfe2043c3..fc92ae408 100644 --- a/packaging/Makefile.pkg +++ b/packaging/Makefile.pkg @@ -38,39 +38,39 @@ build/prepare: build/clean: build/prepare find $(BUILDDIR) -maxdepth 1 -type f -exec rm -f {} \; -build/all: build/debian build/ubuntu build/centos build/oraclelinux build/alt build/suse # build/rhel +build/all: build/debian build/ubuntu build/centos build/oraclelinux build/alt build/suse build/rhel @echo Packaging is done ### DEBIAN build/debian: build/debian_8 build/debian_9 build/debian_10 build/debian_11 @echo Debian: done -build/debian_8: build/debian_8_9.5 build/debian_8_9.6 build/debian_8_10 build/debian_8_11 build/debian_8_12 build/debian_8_13 +build/debian_8: build/debian_8_9.6 build/debian_8_10 build/debian_8_11 build/debian_8_12 build/debian_8_13 build/debian_8_14 @echo Debian 8: done -build/debian_9: build/debian_9_9.5 build/debian_9_9.6 build/debian_9_10 build/debian_9_11 build/debian_9_12 build/debian_9_13 +build/debian_9: build/debian_9_9.6 build/debian_9_10 build/debian_9_11 build/debian_9_12 build/debian_9_13 build/debian_9_14 @echo Debian 9: done -build/debian_10: build/debian_10_9.5 build/debian_10_9.6 build/debian_10_10 build/debian_10_11 build/debian_10_12 build/debian_10_13 +build/debian_10: build/debian_10_9.6 build/debian_10_10 build/debian_10_11 build/debian_10_12 build/debian_10_13 build/debian_10_14 @echo Debian 10: done -build/debian_11: build/debian_11_9.5 build/debian_11_9.6 build/debian_11_10 build/debian_11_11 build/debian_11_12 build/debian_11_13 +build/debian_11: build/debian_11_9.6 build/debian_11_10 build/debian_11_11 build/debian_11_12 build/debian_11_13 build/debian_11_14 @echo Debian 11: done ### UBUNTU build/ubuntu: build/ubuntu_14.04 build/ubuntu_16.04 build/ubuntu_18.04 build/ubuntu_20.04 @echo Ubuntu: done -build/ubuntu_14.04: build/ubuntu_14.04_9.5 build/ubuntu_14.04_9.6 build/ubuntu_14.04_10 build/ubuntu_14.04_11 build/ubuntu_14.04_12 build/ubuntu_14.04_13 +build/ubuntu_14.04: build/ubuntu_14.04_9.6 build/ubuntu_14.04_10 build/ubuntu_14.04_11 build/ubuntu_14.04_12 build/ubuntu_14.04_13 build/ubuntu_14.04_14 @echo Ubuntu 14.04: done -build/ubuntu_16.04: build/ubuntu_16.04_9.5 build/ubuntu_16.04_9.6 build/ubuntu_16.04_10 build/ubuntu_16.04_11 build/ubuntu_16.04_12 build/ubuntu_16.04_13 +build/ubuntu_16.04: build/ubuntu_16.04_9.6 build/ubuntu_16.04_10 build/ubuntu_16.04_11 build/ubuntu_16.04_12 build/ubuntu_16.04_13 build/ubuntu_16.04_14 @echo Ubuntu 16.04: done -build/ubuntu_18.04: build/ubuntu_18.04_9.5 build/ubuntu_18.04_9.6 build/ubuntu_18.04_10 build/ubuntu_18.04_11 build/ubuntu_18.04_12 build/ubuntu_18.04_13 +build/ubuntu_18.04: build/ubuntu_18.04_9.6 build/ubuntu_18.04_10 build/ubuntu_18.04_11 build/ubuntu_18.04_12 build/ubuntu_18.04_13 build/ubuntu_18.04_14 @echo Ubuntu 18.04: done -build/ubuntu_20.04: build/ubuntu_20.04_9.5 build/ubuntu_20.04_9.6 build/ubuntu_20.04_10 build/ubuntu_20.04_11 build/ubuntu_20.04_12 build/ubuntu_20.04_13 +build/ubuntu_20.04: build/ubuntu_20.04_9.6 build/ubuntu_20.04_10 build/ubuntu_20.04_11 build/ubuntu_20.04_12 build/ubuntu_20.04_13 build/ubuntu_20.04_14 @echo Ubuntu 20.04: done define build_deb @@ -92,33 +92,33 @@ include packaging/pkg/Makefile.ubuntu build/centos: build/centos_7 build/centos_8 #build/rpm_repo_package_centos @echo Centos: done -build/centos_7: build/centos_7_9.5 build/centos_7_9.6 build/centos_7_10 build/centos_7_11 build/centos_7_12 build/centos_7_13 +build/centos_7: build/centos_7_9.6 build/centos_7_10 build/centos_7_11 build/centos_7_12 build/centos_7_13 build/centos_7_14 @echo Centos 7: done -build/centos_8: build/centos_8_9.5 build/centos_8_9.6 build/centos_8_10 build/centos_8_11 build/centos_8_12 build/centos_8_13 +build/centos_8: build/centos_8_9.6 build/centos_8_10 build/centos_8_11 build/centos_8_12 build/centos_8_13 build/centos_8_14 @echo Centos 8: done # Oracle Linux build/oraclelinux: build/oraclelinux_6 build/oraclelinux_7 build/oraclelinux_8 #build/rpm_repo_package_oraclelinux @echo Oraclelinux: done -build/oraclelinux_6: build/oraclelinux_6_9.5 build/oraclelinux_6_9.6 build/oraclelinux_6_10 build/oraclelinux_6_11 build/oraclelinux_6_12 build/oraclelinux_6_13 +build/oraclelinux_6: build/oraclelinux_6_9.6 build/oraclelinux_6_10 build/oraclelinux_6_11 build/oraclelinux_6_12 build/oraclelinux_6_13 build/oraclelinux_6_14 @echo Oraclelinux 6: done -build/oraclelinux_7: build/oraclelinux_7_9.5 build/oraclelinux_7_9.6 build/oraclelinux_7_10 build/oraclelinux_7_11 build/oraclelinux_7_12 build/oraclelinux_7_13 +build/oraclelinux_7: build/oraclelinux_7_9.6 build/oraclelinux_7_10 build/oraclelinux_7_11 build/oraclelinux_7_12 build/oraclelinux_7_13 build/oraclelinux_7_14 @echo Oraclelinux 7: done -build/oraclelinux_8: build/oraclelinux_8_9.5 build/oraclelinux_8_9.6 build/oraclelinux_8_10 build/oraclelinux_8_11 build/oraclelinux_8_12 build/oraclelinux_8_13 +build/oraclelinux_8: build/oraclelinux_8_9.6 build/oraclelinux_8_10 build/oraclelinux_8_11 build/oraclelinux_8_12 build/oraclelinux_8_13 build/oraclelinux_8_14 @echo Oraclelinux 8: done # RHEL build/rhel: build/rhel_7 build/rhel_8 #build/rpm_repo_package_rhel @echo Rhel: done -build/rhel_7: build/rhel_7_9.5 build/rhel_7_9.6 build/rhel_7_10 build/rhel_7_11 build/rhel_7_12 build/rhel_7_13 +build/rhel_7: build/rhel_7_9.6 build/rhel_7_10 build/rhel_7_11 build/rhel_7_12 build/rhel_7_13 build/rhel_7_14 @echo Rhel 7: done -build/rhel_8: build/rhel_8_9.5 build/rhel_8_9.6 build/rhel_8_10 build/rhel_8_11 build/rhel_8_12 build/rhel_8_13 +build/rhel_8: build/rhel_8_9.6 build/rhel_8_10 build/rhel_8_11 build/rhel_8_12 build/rhel_8_13 build/rhel_8_14 @echo Rhel 8: done @@ -143,13 +143,13 @@ include packaging/pkg/Makefile.oraclelinux build/alt: build/alt_7 build/alt_8 build/alt_9 @echo Alt Linux: done -build/alt_7: build/alt_7_9.5 build/alt_7_9.6 build/alt_7_10 build/alt_7_11 build/alt_7_12 build/alt_7_13 +build/alt_7: build/alt_7_9.6 build/alt_7_10 build/alt_7_11 build/alt_7_12 build/alt_7_13 build/alt_7_14 @echo Alt Linux 7: done -build/alt_8: build/alt_8_9.5 build/alt_8_9.6 build/alt_8_10 build/alt_8_11 build/alt_8_12 build/alt_8_13 +build/alt_8: build/alt_8_9.6 build/alt_8_10 build/alt_8_11 build/alt_8_12 build/alt_8_13 build/alt_8_14 @echo Alt Linux 8: done -build/alt_9: build/alt_9_9.5 build/alt_9_9.6 build/alt_9_10 build/alt_9_11 build/alt_9_12 build/alt_9_13 +build/alt_9: build/alt_9_9.6 build/alt_9_10 build/alt_9_11 build/alt_9_12 build/alt_9_13 build/alt_9_14 @echo Alt Linux 9: done define build_alt @@ -170,10 +170,10 @@ include packaging/pkg/Makefile.alt build/suse: build/suse_15.1 build/suse_15.2 @echo Suse: done -build/suse_15.1: build/suse_15.1_9.5 build/suse_15.1_9.6 build/suse_15.1_10 build/suse_15.1_11 build/suse_15.1_12 build/suse_15.1_13 +build/suse_15.1: build/suse_15.1_9.6 build/suse_15.1_10 build/suse_15.1_11 build/suse_15.1_12 build/suse_15.1_13 build/suse_15.1_14 @echo Rhel 15.1: done -build/suse_15.2: build/suse_15.2_9.5 build/suse_15.2_9.6 build/suse_15.2_10 build/suse_15.2_11 build/suse_15.2_12 build/suse_15.2_13 +build/suse_15.2: build/suse_15.2_9.6 build/suse_15.2_10 build/suse_15.2_11 build/suse_15.2_12 build/suse_15.2_13 build/suse_15.2_14 @echo Rhel 15.1: done define build_suse diff --git a/packaging/pkg/Makefile.alt b/packaging/pkg/Makefile.alt index e3fbae26e..919d3f58c 100644 --- a/packaging/pkg/Makefile.alt +++ b/packaging/pkg/Makefile.alt @@ -4,71 +4,84 @@ build/alt_7_9.5: touch build/alt_7_9.5 build/alt_7_9.6: - $(call build_alt,alt,7,,9.6,9.6.21) + $(call build_alt,alt,7,,9.6,9.6.23) touch build/alt_7_9.6 build/alt_7_10: - $(call build_alt,alt,7,,10,10.17) + $(call build_alt,alt,7,,10,10.18) touch build/alt_7_10 build/alt_7_11: - $(call build_alt,alt,7,,11,11.11) + $(call build_alt,alt,7,,11,11.13) touch build/alt_7_11 build/alt_7_12: - $(call build_alt,alt,7,,12,12.6) + $(call build_alt,alt,7,,12,12.8) touch build/alt_7_12 build/alt_7_13: - $(call build_alt,alt,7,,13,13.2) + $(call build_alt,alt,7,,13,13.4) touch build/alt_7_13 +build/alt_7_14: + $(call build_alt,alt,7,,14,14.0) + touch build/alt_7_14 + # ALT 8 build/alt_8_9.5: $(call build_alt,alt,8,,9.5,9.5.25) touch build/alt_8_9.5 build/alt_8_9.6: - $(call build_alt,alt,8,,9.6,9.6.21) + $(call build_alt,alt,8,,9.6,9.6.23) touch build/alt_8_9.6 build/alt_8_10: - $(call build_alt,alt,8,,10,10.17) + $(call build_alt,alt,8,,10,10.18) touch build/alt_8_10 build/alt_8_11: - $(call build_alt,alt,8,,11,11.11) + $(call build_alt,alt,8,,11,11.13) touch build/alt_8_11 build/alt_8_12: - $(call build_alt,alt,8,,12,12.6) + $(call build_alt,alt,8,,12,12.8) touch build/alt_8_12 build/alt_8_13: - $(call build_alt,alt,8,,13,13.2) + $(call build_alt,alt,8,,13,13.4) touch build/alt_8_13 +build/alt_8_14: + $(call build_alt,alt,8,,14,14.0) + touch build/alt_8_14 + # ALT 9 build/alt_9_9.5: $(call build_alt,alt,9,,9.5,9.5.25) touch build/alt_9_9.5 build/alt_9_9.6: - $(call build_alt,alt,9,,9.6,9.6.21) + $(call build_alt,alt,9,,9.6,9.6.23) touch build/alt_9_9.6 build/alt_9_10: - $(call build_alt,alt,9,,10,10.17) + $(call build_alt,alt,9,,10,10.18) touch build/alt_9_10 build/alt_9_11: - $(call build_alt,alt,9,,11,11.11) + $(call build_alt,alt,9,,11,11.13) touch build/alt_9_11 build/alt_9_12: - $(call build_alt,alt,9,,12,12.6) + $(call build_alt,alt,9,,12,12.8) touch build/alt_9_12 build/alt_9_13: - $(call build_alt,alt,9,,13,13.2) + $(call build_alt,alt,9,,13,13.4) touch build/alt_9_13 + +build/alt_9_14: + $(call build_alt,alt,9,,14,14.0) + touch build/alt_9_14 + diff --git a/packaging/pkg/Makefile.centos b/packaging/pkg/Makefile.centos index 9353b2cde..9542a5202 100644 --- a/packaging/pkg/Makefile.centos +++ b/packaging/pkg/Makefile.centos @@ -4,46 +4,54 @@ build/centos_7_9.5: touch build/centos_7_9.5 build/centos_7_9.6: - $(call build_rpm,centos,7,,9.6,9.6.21) + $(call build_rpm,centos,7,,9.6,9.6.23) touch build/centos_7_9.6 build/centos_7_10: - $(call build_rpm,centos,7,,10,10.16) + $(call build_rpm,centos,7,,10,10.18) touch build/centos_7_10 build/centos_7_11: - $(call build_rpm,centos,7,,11,11.11) + $(call build_rpm,centos,7,,11,11.13) touch build/centos_7_11 build/centos_7_12: - $(call build_rpm,centos,7,,12,12.6) + $(call build_rpm,centos,7,,12,12.8) touch build/centos_7_12 build/centos_7_13: - $(call build_rpm,centos,7,,13,13.2) + $(call build_rpm,centos,7,,13,13.4) touch build/centos_7_13 +build/centos_7_14: + $(call build_rpm,centos,7,,14,14.0) + touch build/centos_7_14 + # CENTOS 8 build/centos_8_9.5: $(call build_rpm,centos,8,,9.5,9.5.25) touch build/centos_8_9.5 build/centos_8_9.6: - $(call build_rpm,centos,8,,9.6,9.6.21) + $(call build_rpm,centos,8,,9.6,9.6.23) touch build/centos_8_9.6 build/centos_8_10: - $(call build_rpm,centos,8,,10,10.16) + $(call build_rpm,centos,8,,10,10.18) touch build/centos_8_10 build/centos_8_11: - $(call build_rpm,centos,8,,11,11.11) + $(call build_rpm,centos,8,,11,11.13) touch build/centos_8_11 build/centos_8_12: - $(call build_rpm,centos,8,,12,12.6) + $(call build_rpm,centos,8,,12,12.8) touch build/centos_8_12 build/centos_8_13: - $(call build_rpm,centos,8,,13,13.2) + $(call build_rpm,centos,8,,13,13.4) touch build/centos_8_13 + +build/centos_8_14: + $(call build_rpm,centos,8,,14,14.0) + touch build/centos_8_14 diff --git a/packaging/pkg/Makefile.debian b/packaging/pkg/Makefile.debian index 9625a14e9..7c82a412b 100644 --- a/packaging/pkg/Makefile.debian +++ b/packaging/pkg/Makefile.debian @@ -4,96 +4,112 @@ build/debian_8_9.5: touch build/debian_8_9.5 build/debian_8_9.6: - $(call build_deb,debian,8,jessie,9.6,9.6.21) + $(call build_deb,debian,8,jessie,9.6,9.6.23) touch build/debian_8_9.6 build/debian_8_10: - $(call build_deb,debian,8,jessie,10,10.16) + $(call build_deb,debian,8,jessie,10,10.18) touch build/debian_8_10 build/debian_8_11: - $(call build_deb,debian,8,jessie,11,11.11) + $(call build_deb,debian,8,jessie,11,11.13) touch build/debian_8_11 build/debian_8_12: - $(call build_deb,debian,8,jessie,12,12.6) + $(call build_deb,debian,8,jessie,12,12.8) touch build/debian_8_12 build/debian_8_13: - $(call build_deb,debian,8,jessie,13,13.2) + $(call build_deb,debian,8,jessie,13,13.4) touch build/debian_8_13 +build/debian_8_14: + $(call build_deb,debian,8,jessie,14,14.0) + touch build/debian_8_14 + # DEBIAN 9 build/debian_9_9.5: $(call build_deb,debian,9,stretch,9.5,9.5.25) touch build/debian_9_9.5 build/debian_9_9.6: - $(call build_deb,debian,9,stretch,9.6,9.6.21) + $(call build_deb,debian,9,stretch,9.6,9.6.23) touch build/debian_9_9.6 build/debian_9_10: - $(call build_deb,debian,9,stretch,10,10.16) + $(call build_deb,debian,9,stretch,10,10.18) touch build/debian_9_10 build/debian_9_11: - $(call build_deb,debian,9,stretch,11,11.11) + $(call build_deb,debian,9,stretch,11,11.13) touch build/debian_9_11 build/debian_9_12: - $(call build_deb,debian,9,stretch,12,12.6) + $(call build_deb,debian,9,stretch,12,12.8) touch build/debian_9_12 build/debian_9_13: - $(call build_deb,debian,9,stretch,13,13.2) + $(call build_deb,debian,9,stretch,13,13.4) touch build/debian_9_13 +build/debian_9_14: + $(call build_deb,debian,9,stretch,14,14.0) + touch build/debian_9_14 + # DEBIAN 10 build/debian_10_9.5: $(call build_deb,debian,10,buster,9.5,9.5.25) touch build/debian_10_9.5 build/debian_10_9.6: - $(call build_deb,debian,10,buster,9.6,9.6.21) + $(call build_deb,debian,10,buster,9.6,9.6.23) touch build/debian_10_9.6 build/debian_10_10: - $(call build_deb,debian,10,buster,10,10.16) + $(call build_deb,debian,10,buster,10,10.18) touch build/debian_10_10 build/debian_10_11: - $(call build_deb,debian,10,buster,11,11.11) + $(call build_deb,debian,10,buster,11,11.13) touch build/debian_10_11 build/debian_10_12: - $(call build_deb,debian,10,buster,12,12.6) + $(call build_deb,debian,10,buster,12,12.8) touch build/debian_10_12 build/debian_10_13: - $(call build_deb,debian,10,buster,13,13.2) + $(call build_deb,debian,10,buster,13,13.4) touch build/debian_10_13 +build/debian_10_14: + $(call build_deb,debian,10,buster,14,14.0) + touch build/debian_10_14 + # DEBIAN 11 build/debian_11_9.5: $(call build_deb,debian,11,bullseye,9.5,9.5.25) touch build/debian_11_9.5 build/debian_11_9.6: - $(call build_deb,debian,11,bullseye,9.6,9.6.21) + $(call build_deb,debian,11,bullseye,9.6,9.6.23) touch build/debian_11_9.6 build/debian_11_10: - $(call build_deb,debian,11,bullseye,10,10.16) + $(call build_deb,debian,11,bullseye,10,10.18) touch build/debian_11_10 build/debian_11_11: - $(call build_deb,debian,11,bullseye,11,11.11) + $(call build_deb,debian,11,bullseye,11,11.13) touch build/debian_11_11 build/debian_11_12: - $(call build_deb,debian,11,bullseye,12,12.6) + $(call build_deb,debian,11,bullseye,12,12.8) touch build/debian_11_12 build/debian_11_13: - $(call build_deb,debian,11,bullseye,13,13.2) + $(call build_deb,debian,11,bullseye,13,13.4) touch build/debian_11_13 + +build/debian_11_14: + $(call build_deb,debian,11,bullseye,14,14.0) + touch build/debian_11_14 diff --git a/packaging/pkg/Makefile.oraclelinux b/packaging/pkg/Makefile.oraclelinux index f4eada23f..3dbdbd424 100644 --- a/packaging/pkg/Makefile.oraclelinux +++ b/packaging/pkg/Makefile.oraclelinux @@ -4,71 +4,84 @@ build/oraclelinux_6_9.5: touch build/oraclelinux_6_9.5 build/oraclelinux_6_9.6: - $(call build_rpm,oraclelinux,6,,9.6,9.6.21) + $(call build_rpm,oraclelinux,6,,9.6,9.6.23) touch build/oraclelinux_6_9.6 build/oraclelinux_6_10: - $(call build_rpm,oraclelinux,6,,10,10.16) + $(call build_rpm,oraclelinux,6,,10,10.18) touch build/oraclelinux_6_10 build/oraclelinux_6_11: - $(call build_rpm,oraclelinux,6,,11,11.11) + $(call build_rpm,oraclelinux,6,,11,11.13) touch build/oraclelinux_6_11 build/oraclelinux_6_12: - $(call build_rpm,oraclelinux,6,,12,12.6) + $(call build_rpm,oraclelinux,6,,12,12.8) touch build/oraclelinux_6_12 build/oraclelinux_6_13: - $(call build_rpm,oraclelinux,6,,13,13.2) + $(call build_rpm,oraclelinux,6,,13,13.4) touch build/oraclelinux_6_13 +build/oraclelinux_6_14: + $(call build_rpm,oraclelinux,6,,14,14.0) + touch build/oraclelinux_6_14 + # ORACLE LINUX 7 build/oraclelinux_7_9.5: $(call build_rpm,oraclelinux,7,,9.5,9.5.25) touch build/oraclelinux_7_9.5 build/oraclelinux_7_9.6: - $(call build_rpm,oraclelinux,7,,9.6,9.6.21) + $(call build_rpm,oraclelinux,7,,9.6,9.6.23) touch build/oraclelinux_7_9.6 build/oraclelinux_7_10: - $(call build_rpm,oraclelinux,7,,10,10.16) + $(call build_rpm,oraclelinux,7,,10,10.18) touch build/oraclelinux_7_10 build/oraclelinux_7_11: - $(call build_rpm,oraclelinux,7,,11,11.11) + $(call build_rpm,oraclelinux,7,,11,11.13) touch build/oraclelinux_7_11 build/oraclelinux_7_12: - $(call build_rpm,oraclelinux,7,,12,12.6) + $(call build_rpm,oraclelinux,7,,12,12.8) touch build/oraclelinux_7_12 build/oraclelinux_7_13: - $(call build_rpm,oraclelinux,7,,13,13.2) + $(call build_rpm,oraclelinux,7,,13,13.4) touch build/oraclelinux_7_13 +build/oraclelinux_7_14: + $(call build_rpm,oraclelinux,7,,14,14.0) + touch build/oraclelinux_7_14 + # ORACLE LINUX 8 build/oraclelinux_8_9.5: $(call build_rpm,oraclelinux,8,,9.5,9.5.25) touch build/oraclelinux_8_9.5 build/oraclelinux_8_9.6: - $(call build_rpm,oraclelinux,8,,9.6,9.6.21) + $(call build_rpm,oraclelinux,8,,9.6,9.6.23) touch build/oraclelinux_8_9.6 build/oraclelinux_8_10: - $(call build_rpm,oraclelinux,8,,10,10.16) + $(call build_rpm,oraclelinux,8,,10,10.18) touch build/oraclelinux_8_10 build/oraclelinux_8_11: - $(call build_rpm,oraclelinux,8,,11,11.11) + $(call build_rpm,oraclelinux,8,,11,11.13) touch build/oraclelinux_8_11 build/oraclelinux_8_12: - $(call build_rpm,oraclelinux,8,,12,12.6) + $(call build_rpm,oraclelinux,8,,12,12.8) touch build/oraclelinux_8_12 build/oraclelinux_8_13: - $(call build_rpm,oraclelinux,8,,13,13.2) + $(call build_rpm,oraclelinux,8,,13,13.4) touch build/oraclelinux_8_13 + +build/oraclelinux_8_14: + $(call build_rpm,oraclelinux,8,,14,14.0) + touch build/oraclelinux_8_14 + diff --git a/packaging/pkg/Makefile.rhel b/packaging/pkg/Makefile.rhel index f266966cf..b604a990d 100644 --- a/packaging/pkg/Makefile.rhel +++ b/packaging/pkg/Makefile.rhel @@ -4,46 +4,54 @@ build/rhel_7_9.5: touch build/rhel_7_9.5 build/rhel_7_9.6: - $(call build_rpm,rhel,7,7Server,9.6,9.6.21) + $(call build_rpm,rhel,7,7Server,9.6,9.6.23) touch build/rhel_7_9.6 build/rhel_7_10: - $(call build_rpm,rhel,7,7Server,10,10.16) + $(call build_rpm,rhel,7,7Server,10,10.18) touch build/rhel_7_10 build/rhel_7_11: - $(call build_rpm,rhel,7,7Server,11,11.11) + $(call build_rpm,rhel,7,7Server,11,11.13) touch build/rhel_7_11 build/rhel_7_12: - $(call build_rpm,rhel,7,7Server,12,12.6) + $(call build_rpm,rhel,7,7Server,12,12.8) touch build/rhel_7_12 build/rhel_7_13: - $(call build_rpm,rhel,7,7Server,13,13.2) + $(call build_rpm,rhel,7,7Server,13,13.4) touch build/rhel_7_13 +build/rhel_7_14: + $(call build_rpm,rhel,7,7Server,14,14.0) + touch build/rhel_7_14 + # RHEL 8 build/rhel_8_9.5: $(call build_rpm,rhel,8,8Server,9.5,9.5.25) touch build/rhel_8_9.5 build/rhel_8_9.6: - $(call build_rpm,rhel,8,8Server,9.6,9.6.21) + $(call build_rpm,rhel,8,8Server,9.6,9.6.23) touch build/rhel_8_9.6 build/rhel_8_10: - $(call build_rpm,rhel,8,8Server,10,10.16) + $(call build_rpm,rhel,8,8Server,10,10.18) touch build/rhel_8_10 build/rhel_8_11: - $(call build_rpm,rhel,8,8Server,11,11.11) + $(call build_rpm,rhel,8,8Server,11,11.13) touch build/rhel_8_11 build/rhel_8_12: - $(call build_rpm,rhel,8,8Server,12,12.6) + $(call build_rpm,rhel,8,8Server,12,12.8) touch build/rhel_8_12 build/rhel_8_13: - $(call build_rpm,rhel,8,8Server,13,13.2) + $(call build_rpm,rhel,8,8Server,13,13.4) touch build/rhel_8_13 + +build/rhel_8_14: + $(call build_rpm,rhel,8,8Server,14,14.0) + touch build/rhel_8_14 diff --git a/packaging/pkg/Makefile.suse b/packaging/pkg/Makefile.suse index a9f1eaa36..5af22c5d0 100644 --- a/packaging/pkg/Makefile.suse +++ b/packaging/pkg/Makefile.suse @@ -4,46 +4,54 @@ build/suse_15.1_9.5: touch build/suse_15.1_9.5 build/suse_15.1_9.6: - $(call build_suse,suse,15.1,,9.6,9.6.21) + $(call build_suse,suse,15.1,,9.6,9.6.23) touch build/suse_15.1_9.6 build/suse_15.1_10: - $(call build_suse,suse,15.1,,10,10.16) + $(call build_suse,suse,15.1,,10,10.18) touch build/suse_15.1_10 build/suse_15.1_11: - $(call build_suse,suse,15.1,,11,11.11) + $(call build_suse,suse,15.1,,11,11.13) touch build/suse_15.1_11 build/suse_15.1_12: - $(call build_suse,suse,15.1,,12,12.6) + $(call build_suse,suse,15.1,,12,12.8) touch build/suse_15.1_12 build/suse_15.1_13: - $(call build_suse,suse,15.1,,13,13.2) + $(call build_suse,suse,15.1,,13,13.4) touch build/suse_15.1_13 +build/suse_15.1_14: + $(call build_suse,suse,15.1,,14,14.0) + touch build/suse_15.1_14 + # Suse 15.2 build/suse_15.2_9.5: $(call build_suse,suse,15.2,,9.5,9.5.25) touch build/suse_15.2_9.5 build/suse_15.2_9.6: - $(call build_suse,suse,15.2,,9.6,9.6.21) + $(call build_suse,suse,15.2,,9.6,9.6.23) touch build/suse_15.2_9.6 build/suse_15.2_10: - $(call build_suse,suse,15.2,,10,10.16) + $(call build_suse,suse,15.2,,10,10.18) touch build/suse_15.2_10 build/suse_15.2_11: - $(call build_suse,suse,15.2,,11,11.11) + $(call build_suse,suse,15.2,,11,11.13) touch build/suse_15.2_11 build/suse_15.2_12: - $(call build_suse,suse,15.2,,12,12.6) + $(call build_suse,suse,15.2,,12,12.8) touch build/suse_15.2_12 build/suse_15.2_13: - $(call build_suse,suse,15.2,,13,13.2) + $(call build_suse,suse,15.2,,13,13.4) touch build/suse_15.2_13 + +build/suse_15.2_14: + $(call build_suse,suse,15.2,,14,14.0) + touch build/suse_15.2_14 diff --git a/packaging/pkg/Makefile.ubuntu b/packaging/pkg/Makefile.ubuntu index 3f76de516..88803c64f 100644 --- a/packaging/pkg/Makefile.ubuntu +++ b/packaging/pkg/Makefile.ubuntu @@ -4,96 +4,113 @@ build/ubuntu_20.04_9.5: touch build/ubuntu_20.04_9.5 build/ubuntu_20.04_9.6: - $(call build_deb,ubuntu,20.04,focal,9.6,9.6.21) + $(call build_deb,ubuntu,20.04,focal,9.6,9.6.23) touch build/ubuntu_20.04_9.6 build/ubuntu_20.04_10: - $(call build_deb,ubuntu,20.04,focal,10,10.16) + $(call build_deb,ubuntu,20.04,focal,10,10.18) touch build/ubuntu_20.04_10 build/ubuntu_20.04_11: - $(call build_deb,ubuntu,20.04,focal,11,11.11) + $(call build_deb,ubuntu,20.04,focal,11,11.13) touch build/ubuntu_20.04_11 build/ubuntu_20.04_12: - $(call build_deb,ubuntu,20.04,focal,12,12.6) + $(call build_deb,ubuntu,20.04,focal,12,12.8) touch build/ubuntu_20.04_12 build/ubuntu_20.04_13: - $(call build_deb,ubuntu,20.04,focal,13,13.2) + $(call build_deb,ubuntu,20.04,focal,13,13.4) touch build/ubuntu_20.04_13 +build/ubuntu_20.04_14: + $(call build_deb,ubuntu,20.04,focal,14,14.0) + touch build/ubuntu_20.04_14 + # UBUNTU 18.04 build/ubuntu_18.04_9.5: $(call build_deb,ubuntu,18.04,bionic,9.5,9.5.25) touch build/ubuntu_18.04_9.5 build/ubuntu_18.04_9.6: - $(call build_deb,ubuntu,18.04,bionic,9.6,9.6.21) + $(call build_deb,ubuntu,18.04,bionic,9.6,9.6.23) touch build/ubuntu_18.04_9.6 build/ubuntu_18.04_10: - $(call build_deb,ubuntu,18.04,bionic,10,10.16) + $(call build_deb,ubuntu,18.04,bionic,10,10.18) touch build/ubuntu_18.04_10 build/ubuntu_18.04_11: - $(call build_deb,ubuntu,18.04,bionic,11,11.11) + $(call build_deb,ubuntu,18.04,bionic,11,11.13) touch build/ubuntu_18.04_11 build/ubuntu_18.04_12: - $(call build_deb,ubuntu,18.04,bionic,12,12.6) + $(call build_deb,ubuntu,18.04,bionic,12,12.8) touch build/ubuntu_18.04_12 build/ubuntu_18.04_13: - $(call build_deb,ubuntu,18.04,bionic,13,13.2) + $(call build_deb,ubuntu,18.04,bionic,13,13.4) touch build/ubuntu_18.04_13 +build/ubuntu_18.04_14: + $(call build_deb,ubuntu,18.04,bionic,14,14.0) + touch build/ubuntu_18.04_14 + # UBUNTU 16.04 build/ubuntu_16.04_9.5: $(call build_deb,ubuntu,16.04,xenial,9.5,9.5.25) touch build/ubuntu_16.04_9.5 build/ubuntu_16.04_9.6: - $(call build_deb,ubuntu,16.04,xenial,9.6,9.6.21) + $(call build_deb,ubuntu,16.04,xenial,9.6,9.6.23) touch build/ubuntu_16.04_9.6 build/ubuntu_16.04_10: - $(call build_deb,ubuntu,16.04,xenial,10,10.16) + $(call build_deb,ubuntu,16.04,xenial,10,10.18) touch build/ubuntu_16.04_10 build/ubuntu_16.04_11: - $(call build_deb,ubuntu,16.04,xenial,11,11.11) + $(call build_deb,ubuntu,16.04,xenial,11,11.13) touch build/ubuntu_16.04_11 build/ubuntu_16.04_12: - $(call build_deb,ubuntu,16.04,xenial,12,12.6) + $(call build_deb,ubuntu,16.04,xenial,12,12.8) touch build/ubuntu_16.04_12 build/ubuntu_16.04_13: - $(call build_deb,ubuntu,16.04,xenial,13,13.2) + $(call build_deb,ubuntu,16.04,xenial,13,13.4) touch build/ubuntu_16.04_13 +build/ubuntu_16.04_14: + $(call build_deb,ubuntu,16.04,xenial,14,14.0) + touch build/ubuntu_16.04_14 + + # UBUNTU 14.04 build/ubuntu_14.04_9.5: $(call build_deb,ubuntu,14.04,trusty,9.5,9.5.25) touch build/ubuntu_14.04_9.5 build/ubuntu_14.04_9.6: - $(call build_deb,ubuntu,14.04,trusty,9.6,9.6.21) + $(call build_deb,ubuntu,14.04,trusty,9.6,9.6.23) touch build/ubuntu_14.04_9.6 build/ubuntu_14.04_10: - $(call build_deb,ubuntu,14.04,trusty,10,10.16) + $(call build_deb,ubuntu,14.04,trusty,10,10.18) touch build/ubuntu_14.04_10 build/ubuntu_14.04_11: - $(call build_deb,ubuntu,14.04,trusty,11,11.11) + $(call build_deb,ubuntu,14.04,trusty,11,11.13) touch build/ubuntu_14.04_11 build/ubuntu_14.04_12: - $(call build_deb,ubuntu,14.04,trusty,12,12.6) + $(call build_deb,ubuntu,14.04,trusty,12,12.8) touch build/ubuntu_14.04_12 build/ubuntu_14.04_13: - $(call build_deb,ubuntu,14.04,trusty,13,13.2) + $(call build_deb,ubuntu,14.04,trusty,13,13.4) touch build/ubuntu_14.04_13 + +build/ubuntu_14.04_14: + $(call build_deb,ubuntu,14.04,trusty,14,14.0) + touch build/ubuntu_14.04_14 diff --git a/packaging/pkg/scripts/deb.sh b/packaging/pkg/scripts/deb.sh index 2fe2018b6..6e134635a 100755 --- a/packaging/pkg/scripts/deb.sh +++ b/packaging/pkg/scripts/deb.sh @@ -11,20 +11,20 @@ set -o pipefail # fix https://github.com/moby/moby/issues/23137 ulimit -n 1024 -# THere is no std/ent packages for PG 9.5 +# There is no std/ent packages for PG 9.5 if [[ ${PG_VERSION} == '9.5' ]] && [[ ${PBK_EDITION} != '' ]] ; then exit 0 fi # PACKAGES NEEDED -apt-get update -y && apt-get install -y git wget bzip2 devscripts equivs +apt-get --allow-releaseinfo-change update -y && apt-get install -y git wget bzip2 devscripts equivs # Prepare export DEBIAN_FRONTEND=noninteractive echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections if [ ${CODENAME} == 'jessie' ]; then -printf "deb http://archive.debian.org/debian/ jessie main\ndeb-src http://archive.debian.org/debian/ jessie main\ndeb http://security.debian.org jessie/updates main\ndeb-src http://security.debian.org jessie/updates main" > /etc/apt/sources.list + printf "deb http://archive.debian.org/debian/ jessie main\ndeb-src http://archive.debian.org/debian/ jessie main\ndeb http://security.debian.org jessie/updates main\ndeb-src http://security.debian.org jessie/updates main" > /etc/apt/sources.list fi apt-get -qq update -y diff --git a/packaging/pkg/scripts/rpm.sh b/packaging/pkg/scripts/rpm.sh index fc95bf7dd..ffd681b75 100755 --- a/packaging/pkg/scripts/rpm.sh +++ b/packaging/pkg/scripts/rpm.sh @@ -23,6 +23,11 @@ if [[ ${PG_VERSION} == '9.5' ]] && [[ ${PBK_EDITION} != '' ]] ; then exit 0 fi +if [ -f /etc/centos-release ] ; then + sed -i 's|^baseurl=http://|baseurl=https://|g' /etc/yum.repos.d/*.repo + yum update -y +fi + # PACKAGES NEEDED yum install -y git wget bzip2 rpm-build @@ -67,11 +72,11 @@ else cd /root/rpmbuild/SOURCES/pgpro PGPRO_TOC=$(echo ${PG_FULL_VERSION} | sed 's|\.|_|g') - if [[ ${PBK_EDITION} == 'std' ]] ; then - git checkout "PGPRO${PGPRO_TOC}_1" - else - git checkout "PGPROEE${PGPRO_TOC}_1" - fi + if [[ ${PBK_EDITION} == 'std' ]] ; then + git checkout "PGPRO${PGPRO_TOC}_1" + else + git checkout "PGPROEE${PGPRO_TOC}_1" + fi rm -rf .git cd /root/rpmbuild/SOURCES/ @@ -110,7 +115,7 @@ else sed -i "s/@PG_FULL_VERSION@/${PG_FULL_VERSION}/" pg_probackup-pgpro.spec if [ ${PG_VERSION} != '9.6' ]; then - sed -i "s|@PREFIX@|/opt/pgpro/${EDITION}-${PG_VERSION}|g" pg_probackup-pgpro.spec + sed -i "s|@PREFIX@|/opt/pgpro/${EDITION}-${PG_VERSION}|g" pg_probackup-pgpro.spec fi sed -i "s/@PKG_VERSION@/${PKG_VERSION}/" pg_probackup-repo-forks.spec @@ -145,4 +150,4 @@ else # write artefacts to out directory rm -rf /app/out/* cp -arv /root/rpmbuild/RPMS /app/out -fi +fi From bd81f7fc12df7b0f45c5ce9105495c74f7da582f Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" <16117281+kulaginm@users.noreply.github.com> Date: Wed, 13 Oct 2021 21:18:03 +0300 Subject: [PATCH 207/525] =?UTF-8?q?[PGPRO-5673]=20add=20missing=20grants?= =?UTF-8?q?=20(caused=20by=20CVE-2018-1058=20fixes=20#415=20P=E2=80=A6=20(?= =?UTF-8?q?#441)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [PGPRO-5673] add missing grants (caused by CVE-2018-1058 fixes #415 PGPRO-5315) * tests.backup.BackupTest.test_missing_replication_permission_1: fix test for changed 14s output * tests.backup.BackupTest.test_missing_replication_permission: fix test for 9.5 * tests.checkdb.CheckdbTest.test_checkdb_with_least_privileges: remove grant for nonexistent (in 10) bt_index_check(regclass, bool) * tests.checkdb.CheckdbTest.test_checkdb_with_least_privileges: remove grant for nonexistent (in 9.5) pg_catalog.pg_control_system() * tests.checkdb.CheckdbTest.test_checkdb_with_least_privileges: remove grant for nonexistent (in amcheck_next) bt_index_check(regclass) * adapt tests/restore.py to Python-3.5 (used in travis tests) * skip issue_313 test --- doc/pgprobackup.xml | 4 ++++ tests/backup.py | 40 ++++++++++++++++++++++++---------------- tests/checkdb.py | 35 +++++++++++++++++++++++++++++++---- tests/ptrack.py | 6 ++++++ tests/restore.py | 18 +++++++++++++----- 5 files changed, 78 insertions(+), 25 deletions(-) diff --git a/doc/pgprobackup.xml b/doc/pgprobackup.xml index 740517313..6a634ea05 100644 --- a/doc/pgprobackup.xml +++ b/doc/pgprobackup.xml @@ -606,6 +606,7 @@ BEGIN; CREATE ROLE backup WITH LOGIN; GRANT USAGE ON SCHEMA pg_catalog TO backup; GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; +GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; @@ -624,6 +625,7 @@ BEGIN; CREATE ROLE backup WITH LOGIN; GRANT USAGE ON SCHEMA pg_catalog TO backup; GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; +GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; @@ -644,6 +646,7 @@ BEGIN; CREATE ROLE backup WITH LOGIN; GRANT USAGE ON SCHEMA pg_catalog TO backup; GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; +GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; @@ -5531,6 +5534,7 @@ BEGIN; CREATE ROLE backup WITH LOGIN REPLICATION; GRANT USAGE ON SCHEMA pg_catalog TO backup; GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; +GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; diff --git a/tests/backup.py b/tests/backup.py index 60e70cc28..a2a242534 100644 --- a/tests/backup.py +++ b/tests/backup.py @@ -2020,10 +2020,12 @@ def test_backup_with_least_privileges_role(self): "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; " @@ -2053,10 +2055,12 @@ def test_backup_with_least_privileges_role(self): "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " @@ -2091,8 +2095,10 @@ def test_backup_with_least_privileges_role(self): "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " @@ -3249,10 +3255,7 @@ def test_missing_replication_permission(self): if ProbackupTest.enterprise: node.safe_psql( "backupdb", - "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup") - - node.safe_psql( - "backupdb", + "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup") sleep(2) @@ -3270,9 +3273,11 @@ def test_missing_replication_permission(self): "\n Output: {0} \n CMD: {1}".format( repr(self.output), self.cmd)) except ProbackupException as e: - self.assertIn( - "FATAL: must be superuser or replication role to start walsender", + # 9.5: ERROR: must be superuser or replication role to run a backup + # >=9.6: FATAL: must be superuser or replication role to start walsender + self.assertRegex( e.message, + "ERROR: must be superuser or replication role to run a backup|FATAL: must be superuser or replication role to start walsender", "\n Unexpected Error Message: {0}\n CMD: {1}".format( repr(e.message), self.cmd)) @@ -3330,7 +3335,8 @@ def test_missing_replication_permission_1(self): "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + ) # PG 9.6 elif self.get_version(node) > 90600 and self.get_version(node) < 100000: node.safe_psql( @@ -3353,7 +3359,8 @@ def test_missing_replication_permission_1(self): "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + ) # >= 10 else: node.safe_psql( @@ -3381,10 +3388,7 @@ def test_missing_replication_permission_1(self): if ProbackupTest.enterprise: node.safe_psql( "backupdb", - "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup") - - node.safe_psql( - "backupdb", + "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup") replica.promote() @@ -3398,10 +3402,14 @@ def test_missing_replication_permission_1(self): self.assertIn( 'WARNING: Valid full backup on current timeline 2 is not found, trying to look up on previous timelines', output) - - self.assertIn( - 'WARNING: could not connect to database backupdb: FATAL: must be superuser or replication role to start walsender', - output) + + # Messages before 14 + # 'WARNING: could not connect to database backupdb: FATAL: must be superuser or replication role to start walsender' + # Messages for >=14 + # 'WARNING: could not connect to database backupdb: connection to server on socket "/tmp/.s.PGSQL.30983" failed: FATAL: must be superuser or replication role to start walsender' + self.assertRegex( + output, + r'WARNING: could not connect to database backupdb: (connection to server on socket "/tmp/.s.PGSQL.\d+" failed: ){0,1}FATAL: must be superuser or replication role to start walsender') # Clean after yourself self.del_test_dir(module_name, fname) diff --git a/tests/checkdb.py b/tests/checkdb.py index aecd4bde1..fcc40b2bf 100644 --- a/tests/checkdb.py +++ b/tests/checkdb.py @@ -562,15 +562,14 @@ def test_checkdb_with_least_privileges(self): 'GRANT SELECT ON TABLE pg_catalog.pg_index TO backup; ' 'GRANT SELECT ON TABLE pg_catalog.pg_namespace TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.namene(name, name) TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.int8(integer) TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.charne("char", "char") TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; ' - 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass) TO backup; ' - 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool) TO backup;' + 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool) TO backup;' # amcheck-next function ) # PG 9.6 elif self.get_version(node) > 90600 and self.get_version(node) < 100000: @@ -588,6 +587,7 @@ def test_checkdb_with_least_privileges(self): 'GRANT SELECT ON TABLE pg_catalog.pg_index TO backup; ' 'GRANT SELECT ON TABLE pg_catalog.pg_namespace TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.namene(name, name) TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.int8(integer) TO backup; ' @@ -598,7 +598,33 @@ def test_checkdb_with_least_privileges(self): # 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass) TO backup; ' 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool) TO backup;' ) - # >= 10 + # PG 10 + elif self.get_version(node) > 100000 and self.get_version(node) < 110000: + node.safe_psql( + 'backupdb', + 'CREATE ROLE backup WITH LOGIN; ' + 'GRANT CONNECT ON DATABASE backupdb to backup; ' + 'GRANT USAGE ON SCHEMA pg_catalog TO backup; ' + 'GRANT USAGE ON SCHEMA public TO backup; ' + 'GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; ' + 'GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; ' + 'GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; ' + 'GRANT SELECT ON TABLE pg_catalog.pg_am TO backup; ' + 'GRANT SELECT ON TABLE pg_catalog.pg_class TO backup; ' + 'GRANT SELECT ON TABLE pg_catalog.pg_index TO backup; ' + 'GRANT SELECT ON TABLE pg_catalog.pg_namespace TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.namene(name, name) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.int8(integer) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.charne("char", "char") TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; ' + 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass) TO backup;' + ) + # >= 11 else: node.safe_psql( 'backupdb', @@ -614,6 +640,7 @@ def test_checkdb_with_least_privileges(self): 'GRANT SELECT ON TABLE pg_catalog.pg_index TO backup; ' 'GRANT SELECT ON TABLE pg_catalog.pg_namespace TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.namene(name, name) TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.int8(integer) TO backup; ' diff --git a/tests/ptrack.py b/tests/ptrack.py index aa0bbadc1..7a1090a81 100644 --- a/tests/ptrack.py +++ b/tests/ptrack.py @@ -402,10 +402,12 @@ def test_ptrack_unprivileged(self): "GRANT USAGE ON SCHEMA pg_catalog TO backup; " "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; " @@ -434,10 +436,12 @@ def test_ptrack_unprivileged(self): "GRANT USAGE ON SCHEMA pg_catalog TO backup; " "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " @@ -470,8 +474,10 @@ def test_ptrack_unprivileged(self): "GRANT USAGE ON SCHEMA pg_catalog TO backup; " "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " diff --git a/tests/restore.py b/tests/restore.py index d0353d05f..47419e5a9 100644 --- a/tests/restore.py +++ b/tests/restore.py @@ -4,7 +4,7 @@ import subprocess import sys from time import sleep -from datetime import datetime, timedelta +from datetime import datetime, timedelta, timezone import hashlib import shutil import json @@ -2140,7 +2140,8 @@ def test_restore_target_new_options(self): target_name = 'savepoint' - target_time = datetime.now().astimezone().strftime("%Y-%m-%d %H:%M:%S %z") + # in python-3.6+ it can be ...now()..astimezone()... + target_time = datetime.utcnow().replace(tzinfo=timezone.utc).astimezone().strftime("%Y-%m-%d %H:%M:%S %z") with node.connect("postgres") as con: res = con.execute( "INSERT INTO tbl0005 VALUES ('inserted') RETURNING (xmin)") @@ -2503,7 +2504,7 @@ def test_partial_restore_exclude(self): db_list_raw = node.safe_psql( 'postgres', 'SELECT to_json(a) ' - 'FROM (SELECT oid, datname FROM pg_database) a').rstrip() + 'FROM (SELECT oid, datname FROM pg_database) a').decode('utf-8').rstrip() db_list_splitted = db_list_raw.splitlines() @@ -2742,7 +2743,7 @@ def test_partial_restore_include(self): db_list_raw = node.safe_psql( 'postgres', 'SELECT to_json(a) ' - 'FROM (SELECT oid, datname FROM pg_database) a').rstrip() + 'FROM (SELECT oid, datname FROM pg_database) a').decode('utf-8').rstrip() db_list_splitted = db_list_raw.splitlines() @@ -3222,10 +3223,12 @@ def test_missing_database_map(self): "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; " @@ -3255,10 +3258,12 @@ def test_missing_database_map(self): "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " @@ -3292,8 +3297,10 @@ def test_missing_database_map(self): "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " @@ -3868,7 +3875,8 @@ def test_concurrent_restore(self): # Clean after yourself self.del_test_dir(module_name, fname) - # @unittest.skip("skip") + # skip this test until https://github.com/postgrespro/pg_probackup/pull/399 + @unittest.skip("skip") def test_restore_issue_313(self): """ Check that partially restored PostgreSQL instance cannot be started From 76acd88e1555b0f9ab360ef5021bfa074a5f01d1 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Thu, 14 Oct 2021 14:40:13 +0300 Subject: [PATCH 208/525] Stabilize tests.catchup.CatchupTest.test_tli_ptrack_catchup --- tests/catchup.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tests/catchup.py b/tests/catchup.py index 45d999629..79ebdec9f 100644 --- a/tests/catchup.py +++ b/tests/catchup.py @@ -357,6 +357,11 @@ def test_tli_ptrack_catchup(self): self.set_replica(dst_pg, src_pg) # fake replication src_pg.slow_start(replica = True) src_pg.promote() + + src_pg.safe_psql("postgres", "CHECKPOINT") # force postgres to update tli in 'SELECT timeline_id FROM pg_catalog.pg_control_checkpoint()' + src_tli = src_pg.safe_psql("postgres", "SELECT timeline_id FROM pg_catalog.pg_control_checkpoint()").decode('utf-8').rstrip() + self.assertEqual(src_tli, "2", "Postgres didn't update TLI after promote") + src_pg.safe_psql("postgres", "CREATE TABLE ultimate_question AS SELECT 42 AS answer") src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") From de497aad52697ad3746bd2471153aefca1e86b87 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Thu, 14 Oct 2021 15:11:08 +0300 Subject: [PATCH 209/525] Stabilize tests.ptrack.PtrackTest.test_horizon_lsn_ptrack --- tests/helpers/ptrack_helpers.py | 7 +++++++ tests/ptrack.py | 14 ++++---------- 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index 90cfb7be0..6db6fa04d 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -1602,6 +1602,13 @@ def get_version(self, node): return self.version_to_num( testgres.get_pg_config()['VERSION'].split(" ")[1]) + def get_ptrack_version(self, node): + version = node.safe_psql( + "postgres", + "SELECT extversion " + "FROM pg_catalog.pg_extension WHERE extname = 'ptrack'").decode('utf-8').rstrip() + return self.version_to_num(version) + def get_bin_path(self, binary): return testgres.get_bin_path(binary) diff --git a/tests/ptrack.py b/tests/ptrack.py index a20be54b8..3dfbea0a0 100644 --- a/tests/ptrack.py +++ b/tests/ptrack.py @@ -4465,16 +4465,10 @@ def test_horizon_lsn_ptrack(self): "postgres", "CREATE EXTENSION ptrack") - # TODO: ptrack version must be 2.1 - ptrack_version = node.safe_psql( - "postgres", - "SELECT extversion " - "FROM pg_catalog.pg_extension WHERE extname = 'ptrack'").decode('utf-8').rstrip() - - self.assertEqual( - ptrack_version, - "2.1", - "You need ptrack 2.1 for this test") + self.assertGreaterEqual( + self.get_ptrack_version(node), + self.version_to_num("2.1"), + "You need ptrack >=2.1 for this test") # set map_size to a minimal value self.set_auto_conf(node, {'ptrack.map_size': '1'}) From da5eb961c4e4c37ea82a4a018abd41050ba5ca49 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Thu, 14 Oct 2021 16:06:10 +0300 Subject: [PATCH 210/525] Stabilize tests.backup.BackupTest.test_backup_modes_archive --- tests/backup.py | 8 ++++---- tests/helpers/ptrack_helpers.py | 6 ++++++ 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/tests/backup.py b/tests/backup.py index 3548fa56b..d59445337 100644 --- a/tests/backup.py +++ b/tests/backup.py @@ -57,16 +57,16 @@ def test_backup_modes_archive(self): backup_dir, 'node', node, backup_type="page") show_backup_1 = self.show_pb(backup_dir, 'node')[1] - self.assertEqual(show_backup['status'], "OK") - self.assertEqual(show_backup['backup-mode'], "PAGE") + self.assertEqual(show_backup_1['status'], "OK") + self.assertEqual(show_backup_1['backup-mode'], "PAGE") # delta backup mode delta_backup_id = self.backup_node( backup_dir, 'node', node, backup_type="delta") show_backup_2 = self.show_pb(backup_dir, 'node')[2] - self.assertEqual(show_backup['status'], "OK") - self.assertEqual(show_backup['backup-mode'], "DELTA") + self.assertEqual(show_backup_2['status'], "OK") + self.assertEqual(show_backup_2['backup-mode'], "DELTA") # Check parent backup self.assertEqual( diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index 6db6fa04d..38977f108 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -980,6 +980,12 @@ def backup_node( if not old_binary: cmd_list += ['--no-sync'] + if self.verbose: + cmd_list += [ + '--log-level-file=VERBOSE', + '--log-directory={0}'.format(node.logs_dir) + ] + return self.run_pb(cmd_list + options, asynchronous, gdb, old_binary, return_id, env=env) def checkdb_node( From 21abadfff731f95c720c9dd217d8eb0a8cd880ce Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Thu, 14 Oct 2021 16:23:49 +0300 Subject: [PATCH 211/525] Fix broken in f26c95964 tests.config.ConfigTest.test_corrupt_backup_content --- src/catalog.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/catalog.c b/src/catalog.c index 9775968b8..a4af1d2a3 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -1144,7 +1144,9 @@ get_backup_filelist(pgBackup *backup, bool strict) { elog(WARNING, "Invalid CRC of backup control file '%s': %u. Expected: %u", backup_filelist_path, content_crc, backup->content_crc); - return NULL; + parray_free(files); + files = NULL; + } /* redundant sanity? */ From 80e88588c7c798b6bca8b284bd3d1c09a5828993 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Thu, 14 Oct 2021 16:52:25 +0300 Subject: [PATCH 212/525] Fix broken in da5eb96 tests.validate.ValidateTest.test_basic_validate_nullified_heap_page_backup --- tests/helpers/ptrack_helpers.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index 38977f108..6db6fa04d 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -980,12 +980,6 @@ def backup_node( if not old_binary: cmd_list += ['--no-sync'] - if self.verbose: - cmd_list += [ - '--log-level-file=VERBOSE', - '--log-directory={0}'.format(node.logs_dir) - ] - return self.run_pb(cmd_list + options, asynchronous, gdb, old_binary, return_id, env=env) def checkdb_node( From 01db7adecaf72c06f73c679133fdf1c682140b8d Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Thu, 14 Oct 2021 17:27:57 +0300 Subject: [PATCH 213/525] test_ptrack_vacuum_full name was duplicated, rename one of them --- tests/ptrack.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ptrack.py b/tests/ptrack.py index 3dfbea0a0..5282649ce 100644 --- a/tests/ptrack.py +++ b/tests/ptrack.py @@ -3615,7 +3615,7 @@ def test_ptrack_vacuum_bits_visibility(self): # @unittest.skip("skip") # @unittest.expectedFailure - def test_ptrack_vacuum_full(self): + def test_ptrack_vacuum_full_2(self): node = self.make_simple_node( base_dir=os.path.join(module_name, self.fname, 'node'), set_replication=True, From c83a8d4ab97f6e57054c31ec5836d03c65522b1c Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Thu, 14 Oct 2021 18:10:48 +0300 Subject: [PATCH 214/525] Stabilize tests.ptrack.PtrackTest.test_ptrack_vacuum_full_2 --- tests/ptrack.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/ptrack.py b/tests/ptrack.py index 5282649ce..71a1ddfd9 100644 --- a/tests/ptrack.py +++ b/tests/ptrack.py @@ -3619,7 +3619,8 @@ def test_ptrack_vacuum_full_2(self): node = self.make_simple_node( base_dir=os.path.join(module_name, self.fname, 'node'), set_replication=True, - ptrack_enable=True) + ptrack_enable=True, + pg_options={ 'wal_log_hints': 'on' }) backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') self.init_pb(backup_dir) From bd79fbbbbb2d0867e9ffe9b9fbf50da2e6d1ee3a Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Thu, 14 Oct 2021 18:37:05 +0300 Subject: [PATCH 215/525] test_ptrack_vacuum_truncate name was duplicated, rename one of them --- tests/ptrack.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ptrack.py b/tests/ptrack.py index 71a1ddfd9..e482d8f41 100644 --- a/tests/ptrack.py +++ b/tests/ptrack.py @@ -3794,7 +3794,7 @@ def test_ptrack_vacuum_full_replica(self): # @unittest.skip("skip") # @unittest.expectedFailure - def test_ptrack_vacuum_truncate(self): + def test_ptrack_vacuum_truncate_2(self): node = self.make_simple_node( base_dir=os.path.join(module_name, self.fname, 'node'), set_replication=True, From 0545dd4a9257702944efd18b55a31fa8cfd940e7 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Fri, 15 Oct 2021 12:53:07 +0300 Subject: [PATCH 216/525] Running tests.ptrack.PtrackTest.test_horizon_lsn_ptrack now depends on PGPROBACKUPBIN_OLD --- tests/ptrack.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/ptrack.py b/tests/ptrack.py index e482d8f41..e8c291a79 100644 --- a/tests/ptrack.py +++ b/tests/ptrack.py @@ -4441,11 +4441,14 @@ def test_corrupt_ptrack_map(self): # Clean after yourself self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") + # @unittest.skip("skip") def test_horizon_lsn_ptrack(self): """ https://github.com/postgrespro/pg_probackup/pull/386 """ + if not self.probackup_old_path: + self.skipTest("You must specify PGPROBACKUPBIN_OLD" + " for run this test") self.assertLessEqual( self.version_to_num(self.old_probackup_version), self.version_to_num('2.4.15'), From d9ba5d05ea0dba342b88ab1fdc139edc2fb8aafe Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Sat, 16 Oct 2021 18:25:43 +0300 Subject: [PATCH 217/525] Change README for upcoming 2.5.2 release --- README.md | 70 +++++++++++++++++++++++++++---------------------------- 1 file changed, 35 insertions(+), 35 deletions(-) diff --git a/README.md b/README.md index b7e170cf5..95581ecfc 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,7 @@ `pg_probackup` is a utility to manage backup and recovery of PostgreSQL database clusters. It is designed to perform periodic backups of the PostgreSQL instance that enable you to restore the server in case of a failure. The utility is compatible with: -* PostgreSQL 9.5, 9.6, 10, 11, 12, 13, 14; +* PostgreSQL 9.6, 10, 11, 12, 13, 14; As compared to other backup solutions, `pg_probackup` offers the following benefits that can help you implement different backup strategies and deal with large amounts of data: * Incremental backup: page-level incremental backup allows you to save disk space, speed up backup and restore. With three different incremental modes, you can plan the backup strategy in accordance with your data flow. @@ -60,7 +60,7 @@ Documentation can be found at [github](https://postgrespro.github.io/pg_probacku * Stable version state can be found under the respective [release tag](https://github.com/postgrespro/pg_probackup/releases). * `master` branch contains minor fixes that are planned to the nearest minor release. -* Upcoming major release is developed in a release branch i.e. `release_2_5`. +* Upcoming major release is developed in a release branch i.e. `release_2_6`. For detailed release plans check [Milestones](https://github.com/postgrespro/pg_probackup/milestones) @@ -74,57 +74,57 @@ Installers are available in release **assets**. [Latests](https://github.com/pos #DEB Ubuntu|Debian Packages sudo sh -c 'echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup/deb/ $(lsb_release -cs) main-$(lsb_release -cs)" > /etc/apt/sources.list.d/pg_probackup.list' sudo wget -O - https://repo.postgrespro.ru/pg_probackup/keys/GPG-KEY-PG_PROBACKUP | sudo apt-key add - && sudo apt-get update -sudo apt-get install pg-probackup-{13,12,11,10,9.6,9.5} -sudo apt-get install pg-probackup-{13,12,11,10,9.6,9.5}-dbg +sudo apt-get install pg-probackup-{14,13,12,11,10,9.6} +sudo apt-get install pg-probackup-{14,13,12,11,10,9.6}-dbg #DEB-SRC Packages sudo sh -c 'echo "deb-src [arch=amd64] https://repo.postgrespro.ru/pg_probackup/deb/ $(lsb_release -cs) main-$(lsb_release -cs)" >>\ /etc/apt/sources.list.d/pg_probackup.list' && sudo apt-get update -sudo apt-get source pg-probackup-{13,12,11,10,9.6,9.5} +sudo apt-get source pg-probackup-{14,13,12,11,10,9.6} #RPM Centos Packages rpm -ivh https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-centos.noarch.rpm -yum install pg_probackup-{13,12,11,10,9.6,9.5} -yum install pg_probackup-{13,12,11,10,9.6,9.5}-debuginfo +yum install pg_probackup-{14,13,12,11,10,9.6} +yum install pg_probackup-{14,13,12,11,10,9.6}-debuginfo #RPM RHEL Packages rpm -ivh https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-rhel.noarch.rpm -yum install pg_probackup-{13,12,11,10,9.6,9.5} -yum install pg_probackup-{13,12,11,10,9.6,9.5}-debuginfo +yum install pg_probackup-{14,13,12,11,10,9.6} +yum install pg_probackup-{14,13,12,11,10,9.6}-debuginfo #RPM Oracle Linux Packages rpm -ivh https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-oraclelinux.noarch.rpm -yum install pg_probackup-{13,12,11,10,9.6,9.5} -yum install pg_probackup-{13,12,11,10,9.6,9.5}-debuginfo +yum install pg_probackup-{14,13,12,11,10,9.6} +yum install pg_probackup-{14,13,12,11,10,9.6}-debuginfo #SRPM Centos|RHEL|OracleLinux Packages -yumdownloader --source pg_probackup-{13,12,11,10,9.6,9.5} +yumdownloader --source pg_probackup-{14,13,12,11,10,9.6} #RPM SUSE|SLES Packages zypper install --allow-unsigned-rpm -y https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-suse.noarch.rpm -zypper --gpg-auto-import-keys install -y pg_probackup-{13,12,11,10,9.6,9.5} -zypper install pg_probackup-{13,12,11,10,9.6,9.5}-debuginfo +zypper --gpg-auto-import-keys install -y pg_probackup-{14,13,12,11,10,9.6} +zypper install pg_probackup-{14,13,12,11,10,9.6}-debuginfo #SRPM SUSE|SLES Packages -zypper si pg_probackup-{13,12,11,10,9.6,9.5} +zypper si pg_probackup-{14,13,12,11,10,9.6} #RPM ALT Linux 7 sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-p7 x86_64 vanilla" > /etc/apt/sources.list.d/pg_probackup.list' sudo apt-get update -sudo apt-get install pg_probackup-{13,12,11,10,9.6,9.5} -sudo apt-get install pg_probackup-{13,12,11,10,9.6,9.5}-debuginfo +sudo apt-get install pg_probackup-{14,13,12,11,10,9.6} +sudo apt-get install pg_probackup-{14,13,12,11,10,9.6}-debuginfo #RPM ALT Linux 8 sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-p8 x86_64 vanilla" > /etc/apt/sources.list.d/pg_probackup.list' sudo apt-get update -sudo apt-get install pg_probackup-{13,12,11,10,9.6,9.5} -sudo apt-get install pg_probackup-{13,12,11,10,9.6,9.5}-debuginfo +sudo apt-get install pg_probackup-{14,13,12,11,10,9.6} +sudo apt-get install pg_probackup-{14,13,12,11,10,9.6}-debuginfo #RPM ALT Linux 9 sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-p9 x86_64 vanilla" > /etc/apt/sources.list.d/pg_probackup.list' sudo apt-get update -sudo apt-get install pg_probackup-{13,12,11,10,9.6,9.5} -sudo apt-get install pg_probackup-{13,12,11,10,9.6,9.5}-debuginfo +sudo apt-get install pg_probackup-{14,13,12,11,10,9.6} +sudo apt-get install pg_probackup-{14,13,12,11,10,9.6}-debuginfo ``` #### pg_probackup for PostgresPro Standard and Enterprise @@ -132,40 +132,40 @@ sudo apt-get install pg_probackup-{13,12,11,10,9.6,9.5}-debuginfo #DEB Ubuntu|Debian Packages sudo sh -c 'echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup-forks/deb/ $(lsb_release -cs) main-$(lsb_release -cs)" > /etc/apt/sources.list.d/pg_probackup-forks.list' sudo wget -O - https://repo.postgrespro.ru/pg_probackup-forks/keys/GPG-KEY-PG_PROBACKUP | sudo apt-key add - && sudo apt-get update -sudo apt-get install pg-probackup-{std,ent}-{12,11,10,9.6} -sudo apt-get install pg-probackup-{std,ent}-{12,11,10,9.6}-dbg +sudo apt-get install pg-probackup-{std,ent}-{13,12,11,10,9.6} +sudo apt-get install pg-probackup-{std,ent}-{13,12,11,10,9.6}-dbg #RPM Centos Packages rpm -ivh https://repo.postgrespro.ru/pg_probackup-forks/keys/pg_probackup-repo-forks-centos.noarch.rpm -yum install pg_probackup-{std,ent}-{12,11,10,9.6} -yum install pg_probackup-{std,ent}-{12,11,10,9.6}-debuginfo +yum install pg_probackup-{std,ent}-{13,12,11,10,9.6} +yum install pg_probackup-{std,ent}-{13,12,11,10,9.6}-debuginfo #RPM RHEL Packages rpm -ivh https://repo.postgrespro.ru/pg_probackup-forks/keys/pg_probackup-repo-forks-rhel.noarch.rpm -yum install pg_probackup-{std,ent}-{12,11,10,9.6} -yum install pg_probackup-{std,ent}-{12,11,10,9.6}-debuginfo +yum install pg_probackup-{std,ent}-{13,12,11,10,9.6} +yum install pg_probackup-{std,ent}-{13,12,11,10,9.6}-debuginfo #RPM Oracle Linux Packages rpm -ivh https://repo.postgrespro.ru/pg_probackup-forks/keys/pg_probackup-repo-forks-oraclelinux.noarch.rpm -yum install pg_probackup-{std,ent}-{12,11,10,9.6} -yum install pg_probackup-{std,ent}-{12,11,10,9.6}-debuginfo +yum install pg_probackup-{std,ent}-{13,12,11,10,9.6} +yum install pg_probackup-{std,ent}-{13,12,11,10,9.6}-debuginfo #RPM ALT Linux 7 sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup-forks/rpm/latest/altlinux-p7 x86_64 forks" > /etc/apt/sources.list.d/pg_probackup_forks.list' sudo apt-get update -sudo apt-get install pg_probackup-{std,ent}-{12,11,10,9.6} -sudo apt-get install pg_probackup-{std,ent}-{12,11,10,9.6}-debuginfo +sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10,9.6} +sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10,9.6}-debuginfo #RPM ALT Linux 8 sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup-forks/rpm/latest/altlinux-p8 x86_64 forks" > /etc/apt/sources.list.d/pg_probackup_forks.list' sudo apt-get update -sudo apt-get install pg_probackup-{std,ent}-{12,11,10,9.6} -sudo apt-get install pg_probackup-{std,ent}-{12,11,10,9.6}-debuginfo +sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10,9.6} +sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10,9.6}-debuginfo #RPM ALT Linux 9 sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup-forks/rpm/latest/altlinux-p9 x86_64 forks" > /etc/apt/sources.list.d/pg_probackup_forks.list' && sudo apt-get update -sudo apt-get install pg_probackup-{std,ent}-{12,11,10,9.6} -sudo apt-get install pg_probackup-{std,ent}-{12,11,10,9.6}-debuginfo +sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10,9.6} +sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10,9.6}-debuginfo ``` Once you have `pg_probackup` installed, complete [the setup](https://postgrespro.github.io/pg_probackup/#pbk-install-and-setup). From eb7eb165810fe5fab0ad208168043dcd697eebad Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Sat, 16 Oct 2021 18:31:14 +0300 Subject: [PATCH 218/525] PTRACK now provides a separate patch for version 14 (see 3d6ccc6 ptrack commit), update travis testing configuration to use this standalone patch --- .travis.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index 873dd8f20..70a906e57 100644 --- a/.travis.yml +++ b/.travis.yml @@ -26,8 +26,8 @@ notifications: # Default MODE is basic, i.e. all tests with PG_PROBACKUP_TEST_BASIC=ON env: - - PG_VERSION=15 PG_BRANCH=master PTRACK_PATCH_PG_VERSION=13 - - PG_VERSION=14 PG_BRANCH=REL_14_STABLE PTRACK_PATCH_PG_VERSION=13 + - PG_VERSION=15 PG_BRANCH=master PTRACK_PATCH_PG_VERSION=master + - PG_VERSION=14 PG_BRANCH=REL_14_STABLE PTRACK_PATCH_PG_VERSION=14 - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_VERSION=13 - PG_VERSION=12 PG_BRANCH=REL_12_STABLE PTRACK_PATCH_PG_VERSION=12 - PG_VERSION=11 PG_BRANCH=REL_11_STABLE PTRACK_PATCH_PG_VERSION=11 From e22cb930341d8fb83ce7d919e8bbb712ddb6ee85 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Sat, 16 Oct 2021 23:08:04 +0300 Subject: [PATCH 219/525] Follow-up to 7ca590c6c, fix tests.backup.BackupTest.test_backup_modes_archive --- tests/backup.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/backup.py b/tests/backup.py index d59445337..a68dd48d3 100644 --- a/tests/backup.py +++ b/tests/backup.py @@ -21,8 +21,7 @@ def test_backup_modes_archive(self): fname = self.id().split('.')[3] node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), - initdb_params=['--data-checksums'], - ptrack_enable=True) + initdb_params=['--data-checksums']) backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') self.init_pb(backup_dir) From 034c597cf8e4ebdd765495aeabaa9da5ffdb40d6 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Sat, 16 Oct 2021 23:11:49 +0300 Subject: [PATCH 220/525] Follow-up to 7ca590c6c, fix tests.backup.BackupTest.test_incremental_backup_without_full --- tests/backup.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/backup.py b/tests/backup.py index a68dd48d3..3c5f81c91 100644 --- a/tests/backup.py +++ b/tests/backup.py @@ -112,8 +112,7 @@ def test_incremental_backup_without_full(self): fname = self.id().split('.')[3] node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), - initdb_params=['--data-checksums'], - ptrack_enable=True) + initdb_params=['--data-checksums']) backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') self.init_pb(backup_dir) From 807df12d3dc816927673eeedfcd3d0f846d0ac42 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Sat, 16 Oct 2021 23:30:27 +0300 Subject: [PATCH 221/525] =?UTF-8?q?travis:=20more=20universal=20indication?= =?UTF-8?q?=20of=20the=20name=20of=20the=20ptra=D1=81k=20patch?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .travis.yml | 35 ++++++++++++++++++----------------- travis/Dockerfile.in | 2 +- travis/make_dockerfile.sh | 8 ++++---- travis/run_tests.sh | 4 ++-- 4 files changed, 25 insertions(+), 24 deletions(-) diff --git a/.travis.yml b/.travis.yml index 70a906e57..4e86b2e29 100644 --- a/.travis.yml +++ b/.travis.yml @@ -26,30 +26,31 @@ notifications: # Default MODE is basic, i.e. all tests with PG_PROBACKUP_TEST_BASIC=ON env: - - PG_VERSION=15 PG_BRANCH=master PTRACK_PATCH_PG_VERSION=master - - PG_VERSION=14 PG_BRANCH=REL_14_STABLE PTRACK_PATCH_PG_VERSION=14 - - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_VERSION=13 - - PG_VERSION=12 PG_BRANCH=REL_12_STABLE PTRACK_PATCH_PG_VERSION=12 - - PG_VERSION=11 PG_BRANCH=REL_11_STABLE PTRACK_PATCH_PG_VERSION=11 + - PG_VERSION=15 PG_BRANCH=master PTRACK_PATCH_PG_BRANCH=master + - PG_VERSION=14 PG_BRANCH=REL_14_STABLE PTRACK_PATCH_PG_BRANCH=REL_14_STABLE + - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE + - PG_VERSION=12 PG_BRANCH=REL_12_STABLE PTRACK_PATCH_PG_BRANCH=REL_12_STABLE + - PG_VERSION=11 PG_BRANCH=REL_11_STABLE PTRACK_PATCH_PG_BRANCH=REL_11_STABLE - PG_VERSION=10 PG_BRANCH=REL_10_STABLE - PG_VERSION=9.6 PG_BRANCH=REL9_6_STABLE - PG_VERSION=9.5 PG_BRANCH=REL9_5_STABLE -# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_VERSION=off MODE=archive -# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_VERSION=13 MODE=backup -# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_VERSION=13 MODE=catchup -# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_VERSION=off MODE=compression -# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_VERSION=off MODE=delta -# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_VERSION=off MODE=locking -# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_VERSION=13 MODE=merge -# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_VERSION=off MODE=page -# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_VERSION=13 MODE=ptrack -# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_VERSION=13 MODE=replica -# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_VERSION=off MODE=retention -# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_VERSION=13 MODE=restore +# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=off MODE=archive +# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=backup +# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=catchup +# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=off MODE=compression +# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=off MODE=delta +# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=off MODE=locking +# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=merge +# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=off MODE=page +# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=ptrack +# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=replica +# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=off MODE=retention +# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=restore jobs: allow_failures: - if: env(PG_BRANCH) = master + - if: env(PG_BRANCH) = 9.5 # - if: env(MODE) IN (archive, backup, delta, locking, merge, replica, retention, restore) # Only run CI for master branch commits to limit our travis usage diff --git a/travis/Dockerfile.in b/travis/Dockerfile.in index e6bbedb61..a67663d3b 100644 --- a/travis/Dockerfile.in +++ b/travis/Dockerfile.in @@ -10,7 +10,7 @@ RUN python3 -m pip install virtualenv # Environment ENV PG_MAJOR=${PG_VERSION} PG_BRANCH=${PG_BRANCH} -ENV PTRACK_PATCH_PG_VERSION=${PTRACK_PATCH_PG_VERSION} +ENV PTRACK_PATCH_PG_BRANCH=${PTRACK_PATCH_PG_BRANCH} ENV PGPROBACKUP_GDB=${PGPROBACKUP_GDB} ENV LANG=C.UTF-8 PGHOME=/pg/testdir/pgbin diff --git a/travis/make_dockerfile.sh b/travis/make_dockerfile.sh index fc2742cdb..119125ced 100755 --- a/travis/make_dockerfile.sh +++ b/travis/make_dockerfile.sh @@ -14,8 +14,8 @@ if [ -z ${MODE+x} ]; then MODE=basic fi -if [ -z ${PTRACK_PATCH_PG_VERSION+x} ]; then - PTRACK_PATCH_PG_VERSION=off +if [ -z ${PTRACK_PATCH_PG_BRANCH+x} ]; then + PTRACK_PATCH_PG_BRANCH=off fi if [ -z ${PGPROBACKUP_GDB+x} ]; then @@ -25,13 +25,13 @@ fi echo PG_VERSION=${PG_VERSION} echo PG_BRANCH=${PG_BRANCH} echo MODE=${MODE} -echo PTRACK_PATCH_PG_VERSION=${PTRACK_PATCH_PG_VERSION} +echo PTRACK_PATCH_PG_BRANCH=${PTRACK_PATCH_PG_BRANCH} echo PGPROBACKUP_GDB=${PGPROBACKUP_GDB} sed \ -e 's/${PG_VERSION}/'${PG_VERSION}/g \ -e 's/${PG_BRANCH}/'${PG_BRANCH}/g \ -e 's/${MODE}/'${MODE}/g \ - -e 's/${PTRACK_PATCH_PG_VERSION}/'${PTRACK_PATCH_PG_VERSION}/g \ + -e 's/${PTRACK_PATCH_PG_BRANCH}/'${PTRACK_PATCH_PG_BRANCH}/g \ -e 's/${PGPROBACKUP_GDB}/'${PGPROBACKUP_GDB}/g \ Dockerfile.in > Dockerfile diff --git a/travis/run_tests.sh b/travis/run_tests.sh index 4a64fed80..44815407e 100755 --- a/travis/run_tests.sh +++ b/travis/run_tests.sh @@ -33,7 +33,7 @@ echo "############### Getting Postgres sources:" git clone https://github.com/postgres/postgres.git -b $PG_BRANCH --depth=1 # Clone ptrack -if [ "$PTRACK_PATCH_PG_VERSION" != "off" ]; then +if [ "$PTRACK_PATCH_PG_BRANCH" != "off" ]; then git clone https://github.com/postgrespro/ptrack.git -b master --depth=1 export PG_PROBACKUP_PTRACK=on else @@ -45,7 +45,7 @@ fi echo "############### Compiling Postgres:" cd postgres # Go to postgres dir if [ "$PG_PROBACKUP_PTRACK" = "on" ]; then - git apply -3 ../ptrack/patches/REL_${PTRACK_PATCH_PG_VERSION}_STABLE-ptrack-core.diff + git apply -3 ../ptrack/patches/${PTRACK_PATCH_PG_BRANCH}-ptrack-core.diff fi CFLAGS="-O0" ./configure --prefix=$PGHOME --enable-debug --enable-cassert --enable-depend --enable-tap-tests make -s -j$(nproc) install From 9ba98ad2ef7437a5c68e3f78695d512fba9d86e4 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Sat, 16 Oct 2021 23:33:16 +0300 Subject: [PATCH 222/525] travis: make 9.5 test allowed to fail --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 4e86b2e29..876289e82 100644 --- a/.travis.yml +++ b/.travis.yml @@ -50,7 +50,7 @@ env: jobs: allow_failures: - if: env(PG_BRANCH) = master - - if: env(PG_BRANCH) = 9.5 + - if: env(PG_BRANCH) = REL9_5_STABLE # - if: env(MODE) IN (archive, backup, delta, locking, merge, replica, retention, restore) # Only run CI for master branch commits to limit our travis usage From 0c3aff742991589e2fd6b439eb84dd07af129a4b Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Sun, 17 Oct 2021 00:07:05 +0300 Subject: [PATCH 223/525] Fix tests.pgpro2068.BugTest.test_minrecpoint_on_replica on 9.6 --- tests/pgpro2068.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/pgpro2068.py b/tests/pgpro2068.py index fc0cb50bd..a80d317d4 100644 --- a/tests/pgpro2068.py +++ b/tests/pgpro2068.py @@ -144,7 +144,7 @@ def test_minrecpoint_on_replica(self): DO $$ relations = plpy.execute("select class.oid from pg_class class WHERE class.relkind IN ('r', 'i', 't', 'm') and class.relpersistence = 'p'") -current_xlog_lsn = plpy.execute("SELECT min_recovery_end_lsn as lsn FROM pg_control_recovery()")[0]['lsn'] +current_xlog_lsn = plpy.execute("SELECT min_recovery_end_location as lsn FROM pg_control_recovery()")[0]['lsn'] plpy.notice('CURRENT LSN: {0}'.format(current_xlog_lsn)) found_corruption = False for relation in relations: @@ -158,7 +158,7 @@ def test_minrecpoint_on_replica(self): found_corruption = True if found_corruption: plpy.error('Found Corruption') -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; ''' else: script = ''' From fc752c89026e9f692b728b8d8c3454a25d14c8fa Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Mon, 18 Oct 2021 05:51:12 +0300 Subject: [PATCH 224/525] Stabilize tests.checkdb.CheckdbTest.test_checkdb_with_least_privileges: accounting differences in amcheck versions in various editions of PG-10 (bd81f7f follow-up) --- tests/checkdb.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/tests/checkdb.py b/tests/checkdb.py index fcc40b2bf..044c057f6 100644 --- a/tests/checkdb.py +++ b/tests/checkdb.py @@ -621,9 +621,18 @@ def test_checkdb_with_least_privileges(self): 'GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.charne("char", "char") TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; ' - 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass) TO backup;' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup;' ) + if ProbackupTest.enterprise: + # amcheck-1.1 + node.safe_psql( + 'backupdb', + 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool) TO backup') + else: + # amcheck-1.0 + node.safe_psql( + 'backupdb', + 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass) TO backup') # >= 11 else: node.safe_psql( From 49caaa3b570bd620101e96c5833210d9c3598a6e Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Mon, 18 Oct 2021 06:00:41 +0300 Subject: [PATCH 225/525] [skip travis] README.md update: fix info about supported versions of PGPRO with ptrack --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 95581ecfc..86d192ecb 100644 --- a/README.md +++ b/README.md @@ -42,8 +42,8 @@ Regardless of the chosen backup type, all backups taken with `pg_probackup` supp `PTRACK` backup support provided via following options: * vanilla PostgreSQL 11, 12, 13, 14 with [ptrack extension](https://github.com/postgrespro/ptrack) -* Postgres Pro Standard 9.6, 10, 11, 12, 13 -* Postgres Pro Enterprise 9.6, 10, 11, 12, 13 +* Postgres Pro Standard 11, 12, 13 +* Postgres Pro Enterprise 11, 12, 13 ## Limitations From 3cd69fb9038b51f029676a72b8a8092d45216cfa Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Mon, 18 Oct 2021 17:20:35 +0300 Subject: [PATCH 226/525] relaxation of the requirement for calling the old pg_catalog.ptrack_version() function --- src/ptrack.c | 10 +++++++--- tests/helpers/ptrack_helpers.py | 9 ++------- 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/src/ptrack.c b/src/ptrack.c index 3f395b286..ebcba1dd4 100644 --- a/src/ptrack.c +++ b/src/ptrack.c @@ -79,13 +79,17 @@ get_ptrack_version(PGconn *backup_conn, PGNodeInfo *nodeInfo) return; } - res_db = pgut_execute(backup_conn, + /* + * it's ok not to have permission to call this old function in PGPRO-11 version (ok_error = true) + * see deprication notice https://postgrespro.com/docs/postgrespro/11/release-pro-11-9-1 + */ + res_db = pgut_execute_extended(backup_conn, "SELECT pg_catalog.ptrack_version()", - 0, NULL); + 0, NULL, true, true); if (PQntuples(res_db) == 0) { - /* TODO: Something went wrong, should we error out here? */ PQclear(res_db); + elog(WARNING, "Can't call pg_catalog.ptrack_version(), it is assumed that there is no ptrack extension installed."); return; } ptrack_version_str = PQgetvalue(res_db, 0, 0); diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index 6db6fa04d..5c0ce19bc 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -312,16 +312,11 @@ def __init__(self, *args, **kwargs): self.ptrack = False if 'PG_PROBACKUP_PTRACK' in self.test_env: if self.test_env['PG_PROBACKUP_PTRACK'] == 'ON': - self.ptrack = True + if self.pg_config_version >= self.version_to_num('11.0'): + self.ptrack = True os.environ["PGAPPNAME"] = "pg_probackup" - if self.ptrack: - self.assertGreaterEqual( - self.pg_config_version, - self.version_to_num('11.0'), - "ptrack testing require PostgreSQL >= 11") - @property def pg_config_version(self): return self.version_to_num( From a4308f07742a6b252ad56a81b1c13c41b220633e Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Mon, 18 Oct 2021 17:40:43 +0300 Subject: [PATCH 227/525] Fix tests.backup.BackupTest.test_missing_replication_permission_1: make it compatible with PGPROBACKUP_SSH_REMOTE=ON on PG14 (follow bd81f7fc1) --- tests/backup.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/backup.py b/tests/backup.py index b444c0fd9..b14f5fe98 100644 --- a/tests/backup.py +++ b/tests/backup.py @@ -3234,9 +3234,11 @@ def test_missing_replication_permission_1(self): # 'WARNING: could not connect to database backupdb: FATAL: must be superuser or replication role to start walsender' # Messages for >=14 # 'WARNING: could not connect to database backupdb: connection to server on socket "/tmp/.s.PGSQL.30983" failed: FATAL: must be superuser or replication role to start walsender' + # 'WARNING: could not connect to database backupdb: connection to server at "localhost" (127.0.0.1), port 29732 failed: FATAL: must be superuser or replication role to start walsender' self.assertRegex( output, - r'WARNING: could not connect to database backupdb: (connection to server on socket "/tmp/.s.PGSQL.\d+" failed: ){0,1}FATAL: must be superuser or replication role to start walsender') + r'WARNING: could not connect to database backupdb: (connection to server (on socket "/tmp/.s.PGSQL.\d+"|at "localhost" \(127.0.0.1\), port \d+) failed: ){0,1}' + 'FATAL: must be superuser or replication role to start walsender') # Clean after yourself self.del_test_dir(module_name, fname) From fd4b75ababb4152bfe53a8c1c2d9081304198915 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Mon, 18 Oct 2021 21:18:34 +0300 Subject: [PATCH 228/525] Adapt tests.replica.ReplicaTest.test_archive_replica_not_null_offset to pgpro enterprise edition --- tests/replica.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/tests/replica.py b/tests/replica.py index fe33e9ac2..8fb89c222 100644 --- a/tests/replica.py +++ b/tests/replica.py @@ -980,15 +980,17 @@ def test_archive_replica_not_null_offset(self): "\n Output: {0} \n CMD: {1}".format( repr(self.output), self.cmd)) except ProbackupException as e: - self.assertIn( - 'LOG: Looking for LSN 0/4000060 in segment: 000000010000000000000004', + # vanilla -- 0/4000060 + # pgproee -- 0/4000078 + self.assertRegex( e.message, + r'LOG: Looking for LSN (0/4000060|0/4000078) in segment: 000000010000000000000004', "\n Unexpected Error Message: {0}\n CMD: {1}".format( repr(e.message), self.cmd)) - self.assertIn( - 'INFO: Wait for LSN 0/4000060 in archived WAL segment', + self.assertRegex( e.message, + r'INFO: Wait for LSN (0/4000060|0/4000078) in archived WAL segment', "\n Unexpected Error Message: {0}\n CMD: {1}".format( repr(e.message), self.cmd)) From 73496c412496965bc18307cf7e93a6bf4fc1245a Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Thu, 14 Oct 2021 22:25:38 +0300 Subject: [PATCH 229/525] Fix broken tests.validate.ValidateTest.test_validate_instance_with_several_corrupt_backups_interrupt +fix small typo introduced in 02a3665 --- src/catalog.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/catalog.c b/src/catalog.c index 3ba17e9fd..54709f9c5 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -32,6 +32,8 @@ static int grab_excl_lock_file(const char *backup_dir, const char *backup_id, bo static int grab_shared_lock_file(pgBackup *backup); static int wait_shared_owners(pgBackup *backup); + +static void unlink_lock_atexit(bool fatal, void *userdata); static void unlock_backup(const char *backup_dir, const char *backup_id, bool exclusive); static void release_excl_lock_file(const char *backup_dir); static void release_shared_lock_file(const char *backup_dir); @@ -83,8 +85,8 @@ timelineInfoFree(void *tliInfo) } /* Iterate over locked backups and unlock them */ -static void -unlink_lock_atexit(void) +void +unlink_lock_atexit(bool unused_fatal, void *unused_userdata) { int i; @@ -94,7 +96,7 @@ unlink_lock_atexit(void) for (i = 0; i < parray_num(locks); i++) { LockInfo *lock = (LockInfo *) parray_get(locks, i); - unlock_backup(lock->backup_dir, lock->backup_dir, lock->exclusive); + unlock_backup(lock->backup_dir, lock->backup_id, lock->exclusive); } parray_walk(locks, pg_free); @@ -267,7 +269,7 @@ lock_backup(pgBackup *backup, bool strict, bool exclusive) */ if (!backup_lock_exit_hook_registered) { - atexit(unlink_lock_atexit); + pgut_atexit_push(unlink_lock_atexit, NULL); backup_lock_exit_hook_registered = true; } From 66dd4b26e45696d8f0ffe07e20b19f23eede2453 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Sun, 17 Oct 2021 19:37:04 +0300 Subject: [PATCH 230/525] [PGPRO-5705] remove snapfs (found in commits 9bf541b85, 8b8337047, 5c247d0ff) --- src/dir.c | 7 ------ tests/__init__.py | 3 +-- tests/snapfs.py | 61 ----------------------------------------------- 3 files changed, 1 insertion(+), 70 deletions(-) delete mode 100644 tests/snapfs.py diff --git a/src/dir.c b/src/dir.c index 00a4c4f82..eea7395c3 100644 --- a/src/dir.c +++ b/src/dir.c @@ -762,13 +762,6 @@ dir_check_file(pgFile *file, bool backup_logs) } else { - /* - * snapfs files: - * RELFILENODE.BLOCKNO.snapmap.SNAPID - * RELFILENODE.BLOCKNO.snap.SNAPID - */ - if (strstr(file->name, "snap") != NULL) - return true; len = strlen(file->name); /* reloid.cfm */ diff --git a/tests/__init__.py b/tests/__init__.py index 3a297c45e..732fdb734 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -5,7 +5,7 @@ backup, delete, delta, restore, validate, \ retention, pgpro560, pgpro589, pgpro2068, false_positive, replica, \ compression, page, ptrack, archive, exclude, cfs_backup, cfs_restore, \ - cfs_validate_backup, auth_test, time_stamp, snapfs, logging, \ + cfs_validate_backup, auth_test, time_stamp, logging, \ locking, remote, external, config, checkdb, set_backup, incr_restore, \ CVE_2018_1058 @@ -53,7 +53,6 @@ def load_tests(loader, tests, pattern): suite.addTests(loader.loadTestsFromModule(retention)) suite.addTests(loader.loadTestsFromModule(set_backup)) suite.addTests(loader.loadTestsFromModule(show)) - suite.addTests(loader.loadTestsFromModule(snapfs)) suite.addTests(loader.loadTestsFromModule(time_stamp)) suite.addTests(loader.loadTestsFromModule(validate)) suite.addTests(loader.loadTestsFromModule(CVE_2018_1058)) diff --git a/tests/snapfs.py b/tests/snapfs.py deleted file mode 100644 index 991741952..000000000 --- a/tests/snapfs.py +++ /dev/null @@ -1,61 +0,0 @@ -import unittest -import os -from time import sleep -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException - - -module_name = 'snapfs' - - -class SnapFSTest(ProbackupTest, unittest.TestCase): - - # @unittest.expectedFailure - def test_snapfs_simple(self): - """standart backup modes with ARCHIVE WAL method""" - if not self.enterprise: - self.skipTest('This test must be run on enterprise') - fname = self.id().split('.')[3] - node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - self.backup_node(backup_dir, 'node', node) - - node.safe_psql( - 'postgres', - 'select pg_make_snapshot()') - - node.pgbench_init(scale=10) - - pgbench = node.pgbench(options=['-T', '50', '-c', '2', '--no-vacuum']) - pgbench.wait() - - self.backup_node( - backup_dir, 'node', node, backup_type='page') - - node.safe_psql( - 'postgres', - 'select pg_remove_snapshot(1)') - - self.backup_node( - backup_dir, 'node', node, backup_type='page') - - pgdata = self.pgdata_content(node.data_dir) - - node.cleanup() - - self.restore_node( - backup_dir, 'node', - node, options=["-j", "4"]) - - pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # Clean after yourself - self.del_test_dir(module_name, fname) From 196a70bd3214749c3377b5ed10feb68914249d3e Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Wed, 20 Oct 2021 20:37:44 +0300 Subject: [PATCH 231/525] Fixes for ptrack tests (test_ptrack_vacuum, test_ptrack_vacuum_bits_frozen, test_ptrack_vacuum_bits_visibility): this is workaround for spgist metadata update bug (PGPRO-5707) --- tests/helpers/ptrack_helpers.py | 62 +++++++++++++++++++++++---------- tests/ptrack.py | 10 ++++-- 2 files changed, 51 insertions(+), 21 deletions(-) diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index 5c0ce19bc..1b54d3165 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -1714,8 +1714,30 @@ def pgdata_content(self, pgdata, ignore_ptrack=True, exclude_dirs=None): return directory_dict - def compare_pgdata(self, original_pgdata, restored_pgdata): - """ return dict with directory content. DO IT BEFORE RECOVERY""" + def get_known_bugs_comparision_exclusion_dict(self, node): + """ get dict of known datafiles difference, that can be used in compare_pgdata() """ + comparision_exclusion_dict = dict() + + # bug in spgist metapage update (PGPRO-5707) + spgist_filelist = node.safe_psql( + "postgres", + "SELECT pg_catalog.pg_relation_filepath(pg_class.oid) " + "FROM pg_am, pg_class " + "WHERE pg_am.amname = 'spgist' " + "AND pg_class.relam = pg_am.oid" + ).decode('utf-8').rstrip().splitlines() + for filename in spgist_filelist: + comparision_exclusion_dict[filename] = set([0]) + + return comparision_exclusion_dict + + + def compare_pgdata(self, original_pgdata, restored_pgdata, exclusion_dict = dict()): + """ + return dict with directory content. DO IT BEFORE RECOVERY + exclusion_dict is used for exclude files (and it block_no) from comparision + it is a dict with relative filenames as keys and set of block numbers as values + """ fail = False error_message = 'Restored PGDATA is not equal to original!\n' @@ -1777,16 +1799,17 @@ def compare_pgdata(self, original_pgdata, restored_pgdata): original_pgdata['files'][file]['md5'] != restored_pgdata['files'][file]['md5'] ): - fail = True - error_message += ( - '\nFile Checksumm mismatch.\n' - 'File_old: {0}\nChecksumm_old: {1}\n' - 'File_new: {2}\nChecksumm_new: {3}\n').format( - os.path.join(original_pgdata['pgdata'], file), - original_pgdata['files'][file]['md5'], - os.path.join(restored_pgdata['pgdata'], file), - restored_pgdata['files'][file]['md5'] - ) + if file not in exclusion_dict: + fail = True + error_message += ( + '\nFile Checksum mismatch.\n' + 'File_old: {0}\nChecksum_old: {1}\n' + 'File_new: {2}\nChecksum_new: {3}\n').format( + os.path.join(original_pgdata['pgdata'], file), + original_pgdata['files'][file]['md5'], + os.path.join(restored_pgdata['pgdata'], file), + restored_pgdata['files'][file]['md5'] + ) if original_pgdata['files'][file]['is_datafile']: for page in original_pgdata['files'][file]['md5_per_page']: @@ -1802,13 +1825,16 @@ def compare_pgdata(self, original_pgdata, restored_pgdata): ) continue - if original_pgdata['files'][file][ - 'md5_per_page'][page] != restored_pgdata[ - 'files'][file]['md5_per_page'][page]: + if not (file in exclusion_dict and page in exclusion_dict[file]): + if ( + original_pgdata['files'][file]['md5_per_page'][page] != + restored_pgdata['files'][file]['md5_per_page'][page] + ): + fail = True error_message += ( - '\n Page checksumm mismatch: {0}\n ' - ' PAGE Checksumm_old: {1}\n ' - ' PAGE Checksumm_new: {2}\n ' + '\n Page checksum mismatch: {0}\n ' + ' PAGE Checksum_old: {1}\n ' + ' PAGE Checksum_new: {2}\n ' ' File: {3}\n' ).format( page, diff --git a/tests/ptrack.py b/tests/ptrack.py index 93b9e3cce..a3109da48 100644 --- a/tests/ptrack.py +++ b/tests/ptrack.py @@ -3210,6 +3210,8 @@ def test_ptrack_vacuum(self): idx_ptrack[i]['type'], idx_ptrack[i]['column'])) + comparision_exclusion = self.get_known_bugs_comparision_exclusion_dict(node) + node.safe_psql('postgres', 'vacuum t_heap') node.safe_psql('postgres', 'checkpoint') @@ -3253,7 +3255,7 @@ def test_ptrack_vacuum(self): self.restore_node(backup_dir, 'node', node) pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) + self.compare_pgdata(pgdata, pgdata_restored, comparision_exclusion) # Clean after yourself self.del_test_dir(module_name, self.fname) @@ -3403,6 +3405,7 @@ def test_ptrack_vacuum_bits_frozen(self): idx_ptrack[i]['type'], idx_ptrack[i]['column'])) + comparision_exclusion = self.get_known_bugs_comparision_exclusion_dict(node) node.safe_psql('postgres', 'checkpoint') self.backup_node( @@ -3438,7 +3441,7 @@ def test_ptrack_vacuum_bits_frozen(self): self.restore_node(backup_dir, 'node', node) pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) + self.compare_pgdata(pgdata, pgdata_restored, comparision_exclusion) # Clean after yourself self.del_test_dir(module_name, self.fname) @@ -3579,6 +3582,7 @@ def test_ptrack_vacuum_bits_visibility(self): i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column'])) + comparision_exclusion = self.get_known_bugs_comparision_exclusion_dict(node) node.safe_psql('postgres', 'checkpoint') self.backup_node( @@ -3614,7 +3618,7 @@ def test_ptrack_vacuum_bits_visibility(self): self.restore_node(backup_dir, 'node', node) pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) + self.compare_pgdata(pgdata, pgdata_restored, comparision_exclusion) # Clean after yourself self.del_test_dir(module_name, self.fname) From 474a9561ee0273f13d7d3f9e8990ce5d1902dad7 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Thu, 21 Oct 2021 13:52:23 +0300 Subject: [PATCH 232/525] [ci skip] README.md: fix astra linux version list --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 7b66d8556..060883a28 100644 --- a/README.md +++ b/README.md @@ -85,7 +85,7 @@ sudo apt-get source pg-probackup-{14,13,12,11,10,9.6} #DEB Astra Linix Orel sudo sh -c 'echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup/deb/ stretch main-stretch" > /etc/apt/sources.list.d/pg_probackup.list' sudo wget -O - https://repo.postgrespro.ru/pg_probackup/keys/GPG-KEY-PG_PROBACKUP | sudo apt-key add - && sudo apt-get update -sudo apt-get install pg-probackup-{13,12,11,10,9.6,9.5}{-dbg,} +sudo apt-get install pg-probackup-{14,13,12,11,10,9.6}{-dbg,} #RPM Centos Packages rpm -ivh https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-centos.noarch.rpm From f3341dab4a825197864a553ac1d7eda44a83f1b6 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Thu, 21 Oct 2021 16:03:46 +0300 Subject: [PATCH 233/525] [PGPRO-5750] fix windows compilation problem with PG-14 this is caused by upstream commit https://git.postgresql.org/gitweb/?p=postgresql.git;a=commit;h=bed90759fcbcd72d4d06969eebab81e47326f9a2 Reported by Victor Wagner and fixed by Victor Spirin --- src/utils/file.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/utils/file.c b/src/utils/file.c index f86e605cb..810b4b394 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -1,8 +1,10 @@ #include #include -#include #include "pg_probackup.h" +/* sys/stat.h must be included after pg_probackup.h (see problems with compilation for windows described in PGPRO-5750) */ +#include + #include "file.h" #include "storage/checksum.h" From d57b5fd6edc9929f4a35379b0aabef16eeebed52 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Thu, 21 Oct 2021 17:01:14 +0300 Subject: [PATCH 234/525] Version 2.5.2 --- src/pg_probackup.h | 4 ++-- tests/expected/option_version.out | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/pg_probackup.h b/src/pg_probackup.h index dfa7051a3..6a1feb014 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -3,7 +3,7 @@ * pg_probackup.h: Backup/Recovery manager for PostgreSQL. * * Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION - * Portions Copyright (c) 2015-2018, Postgres Professional + * Portions Copyright (c) 2015-2021, Postgres Professional * *------------------------------------------------------------------------- */ @@ -338,7 +338,7 @@ typedef enum ShowFormat #define BYTES_INVALID (-1) /* file didn`t changed since previous backup, DELTA backup do not rely on it */ #define FILE_NOT_FOUND (-2) /* file disappeared during backup */ #define BLOCKNUM_INVALID (-1) -#define PROGRAM_VERSION "2.5.1" +#define PROGRAM_VERSION "2.5.2" /* update when remote agent API or behaviour changes */ #define AGENT_PROTOCOL_VERSION 20501 diff --git a/tests/expected/option_version.out b/tests/expected/option_version.out index 36e5d4c7a..e9d8c0955 100644 --- a/tests/expected/option_version.out +++ b/tests/expected/option_version.out @@ -1 +1 @@ -pg_probackup 2.5.1 \ No newline at end of file +pg_probackup 2.5.2 \ No newline at end of file From c2e4f00932444492524ce7770880c837eb882c6f Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Fri, 22 Oct 2021 00:54:41 +0300 Subject: [PATCH 235/525] [ci skip] packaging: small fix in packaging instruction --- packaging/Readme.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/Readme.md b/packaging/Readme.md index c6cbf16b5..749b9fc06 100644 --- a/packaging/Readme.md +++ b/packaging/Readme.md @@ -9,7 +9,7 @@ make pkg To build binaries for PostgresPro Standart or Enterprise, a pgpro.tar.bz2 with latest git tree must be preset in `packaging/tarballs` directory: ``` -cd packaging/tarballs +cd packaging/pkg/tarballs git clone pgpro_repo pgpro tar -cjSf pgpro.tar.bz2 pgpro ``` From 36e6d0f95dde383383e7d8f8c374c2c4b2bee61a Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Fri, 22 Oct 2021 14:56:20 +0300 Subject: [PATCH 236/525] fix rhel dockerfiles for std|ent packaging --- packaging/Dockerfiles/Dockerfile-rhel_7 | 2 ++ packaging/Dockerfiles/Dockerfile-rhel_8 | 2 ++ 2 files changed, 4 insertions(+) diff --git a/packaging/Dockerfiles/Dockerfile-rhel_7 b/packaging/Dockerfiles/Dockerfile-rhel_7 index 322c44b59..f64819e13 100644 --- a/packaging/Dockerfiles/Dockerfile-rhel_7 +++ b/packaging/Dockerfiles/Dockerfile-rhel_7 @@ -5,3 +5,5 @@ RUN yum install -y tar wget yum-utils RUN yum install -y gcc make perl libicu-devel glibc-devel RUN yum install -y git RUN yum upgrade -y +RUN yum install -y http://mirror.centos.org/centos/7/os/x86_64/Packages/bison-3.0.4-2.el7.x86_64.rpm +RUN yum install -y http://mirror.centos.org/centos/7/os/x86_64/Packages/flex-2.5.37-6.el7.x86_64.rpm diff --git a/packaging/Dockerfiles/Dockerfile-rhel_8 b/packaging/Dockerfiles/Dockerfile-rhel_8 index c8e1e225e..82385785b 100644 --- a/packaging/Dockerfiles/Dockerfile-rhel_8 +++ b/packaging/Dockerfiles/Dockerfile-rhel_8 @@ -3,3 +3,5 @@ RUN yum install -y tar wget rpm-build yum-utils RUN yum install -y gcc make perl libicu-devel glibc-devel RUN yum install -y git RUN yum upgrade -y +RUN yum install -y http://mirror.centos.org/centos/8/AppStream/x86_64/os/Packages/bison-3.0.4-10.el8.x86_64.rpm +RUN yum install -y http://mirror.centos.org/centos/8/AppStream/x86_64/os/Packages/flex-2.6.1-9.el8.x86_64.rpm From 5b6ca624170e1b7955c293ce0173a812b6402d80 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Tue, 26 Oct 2021 00:06:23 +0300 Subject: [PATCH 237/525] [ci skip] packaging "tuning" --- packaging/Makefile.repo | 1 + packaging/Makefile.test | 14 ++++++++++++- .../rpmbuild/SPECS/pg_probackup-pgpro.spec | 2 +- .../SPECS/pg_probackup.alt.forks.spec | 2 +- packaging/test/Makefile.debian | 21 +++++++++++++++++++ packaging/test/scripts/deb.sh | 3 ++- packaging/test/scripts/rpm.sh | 1 + packaging/test/scripts/suse_forks.sh | 0 8 files changed, 40 insertions(+), 4 deletions(-) mode change 100644 => 100755 packaging/test/scripts/suse_forks.sh diff --git a/packaging/Makefile.repo b/packaging/Makefile.repo index 986c827e9..8186bfd80 100644 --- a/packaging/Makefile.repo +++ b/packaging/Makefile.repo @@ -113,6 +113,7 @@ build/repo_suse_15.2: repo_finish: # cd build/data/www/$(PBK_PKG_REPO)/ cd $(BUILDDIR)/data/www/$(PBK_PKG_REPO)/rpm && sudo ln -nsf $(PBK_VERSION) latest + # following line only for vanilla cd $(BUILDDIR)/data/www/$(PBK_PKG_REPO)/srpm && sudo ln -nsf $(PBK_VERSION) latest # sudo ln -rfs build/data/www/$(PBK_PKG_REPO)/rpm/${PBK_VERSION} build/data/www/$(PBK_PKG_REPO)/rpm/latest diff --git a/packaging/Makefile.test b/packaging/Makefile.test index fbb415c46..21e850ccd 100644 --- a/packaging/Makefile.test +++ b/packaging/Makefile.test @@ -13,7 +13,7 @@ build/test_all: build/test_debian build/test_ubuntu build/test_centos build/test @echo Package testing is done ### DEBIAN -build/test_debian: build/test_debian_9 build/test_debian_10 build/test_debian_11 +build/test_debian: build/test_debian_9 build/test_debian_10 #build/test_debian_11 @echo Debian: done build/test_debian_9: build/test_debian_9_9.6 build/test_debian_9_10 build/test_debian_9_11 build/test_debian_9_12 build/test_debian_9_13 @@ -58,9 +58,13 @@ build/test_centos: build/test_centos_7 build/test_centos_8 @echo Centos: done build/test_centos_7: build/test_centos_7_9.6 build/test_centos_7_10 build/test_centos_7_11 build/test_centos_7_12 build/test_centos_7_13 +# pgpro +#build/test_centos_7: build/test_centos_7_9.6 build/test_centos_7_10 build/test_centos_7_11 build/test_centos_7_12 @echo Centos 7: done build/test_centos_8: build/test_centos_8_9.6 build/test_centos_8_10 build/test_centos_8_11 build/test_centos_8_12 build/test_centos_8_13 +# pgpro +#build/test_centos_8: build/test_centos_8_10 build/test_centos_8_11 build/test_centos_8_12 @echo Centos 8: done # Oracle Linux @@ -68,9 +72,13 @@ build/test_oraclelinux: build/test_oraclelinux_7 build/test_oraclelinux_8 @echo Oraclelinux: done build/test_oraclelinux_7: build/test_oraclelinux_7_9.6 build/test_oraclelinux_7_10 build/test_oraclelinux_7_11 build/test_oraclelinux_7_12 build/test_oraclelinux_7_13 +# pgpro +#build/test_oraclelinux_7: build/test_oraclelinux_7_9.6 build/test_oraclelinux_7_10 build/test_oraclelinux_7_11 build/test_oraclelinux_7_12 @echo Oraclelinux 7: done build/test_oraclelinux_8: build/test_oraclelinux_8_9.6 build/test_oraclelinux_8_10 build/test_oraclelinux_8_11 build/test_oraclelinux_8_12 build/test_oraclelinux_8_13 +# pgpro +#build/test_oraclelinux_8: build/test_oraclelinux_8_10 build/test_oraclelinux_8_11 build/test_oraclelinux_8_12 @echo Oraclelinux 8: done # RHEL @@ -78,9 +86,13 @@ build/test_rhel: build/test_rhel_7 build/test_rhel_8 @echo Rhel: done build/test_rhel_7: build/test_rhel_7_9.5 build/test_rhel_7_9.6 build/test_rhel_7_10 build/test_rhel_7_11 build/test_rhel_7_12 build/test_rhel_7_13 +# pgpro +#build/test_rhel_7: build/test_rhel_7_9.5 build/test_rhel_7_9.6 build/test_rhel_7_10 build/test_rhel_7_11 build/test_rhel_7_12 @echo Rhel 7: done build/test_rhel_8: build/test_rhel_8_9.5 build/test_rhel_8_9.6 build/test_rhel_8_10 build/test_rhel_8_11 build/test_rhel_8_12 build/test_rhel_8_13 +# pgpro +#build/test_rhel_8: build/test_rhel_8_9.5 build/test_rhel_8_9.6 build/test_rhel_8_10 build/test_rhel_8_11 build/test_rhel_8_12 @echo Rhel 8: done define test_rpm diff --git a/packaging/pkg/specs/rpm/rpmbuild/SPECS/pg_probackup-pgpro.spec b/packaging/pkg/specs/rpm/rpmbuild/SPECS/pg_probackup-pgpro.spec index d5811171d..8955b3fa7 100644 --- a/packaging/pkg/specs/rpm/rpmbuild/SPECS/pg_probackup-pgpro.spec +++ b/packaging/pkg/specs/rpm/rpmbuild/SPECS/pg_probackup-pgpro.spec @@ -42,7 +42,7 @@ mkdir %{_topdir}/SOURCES/pg_probackup-%{version} cd %{_topdir}/BUILD/postgrespro-%{edition}-%{pgsql_full} %if "%{pgsql_major}" == "9.6" -./configure --enable-debug +./configure --enable-debug --without-readline %else ./configure --enable-debug --without-readline --prefix=%{prefix} %endif diff --git a/packaging/pkg/specs/rpm/rpmbuild/SPECS/pg_probackup.alt.forks.spec b/packaging/pkg/specs/rpm/rpmbuild/SPECS/pg_probackup.alt.forks.spec index cbfd61a0f..cbb57e42a 100644 --- a/packaging/pkg/specs/rpm/rpmbuild/SPECS/pg_probackup.alt.forks.spec +++ b/packaging/pkg/specs/rpm/rpmbuild/SPECS/pg_probackup.alt.forks.spec @@ -43,7 +43,7 @@ cd %{_topdir}/BUILD/postgrespro-%{edition}-%{pgsql_full} %if "%{pgsql_major}" == "9.6" ./configure --enable-debug %else -./configure --enable-debug --prefix=%{prefix} +./configure --enable-debug --disable-online-upgrade --prefix=%{prefix} %endif make -C 'src/common' make -C 'src/port' diff --git a/packaging/test/Makefile.debian b/packaging/test/Makefile.debian index f540f9205..084741069 100644 --- a/packaging/test/Makefile.debian +++ b/packaging/test/Makefile.debian @@ -39,3 +39,24 @@ build/test_debian_10_12: build/test_debian_10_13: $(call test_deb,debian,10,buster,13,13.2) touch build/test_debian_10_13 + +# DEBIAN 11 +build/test_debian_11_9.6: + $(call test_deb,debian,11,bullseye,9.6,9.6.21) + touch build/test_debian_11_9.6 + +build/test_debian_11_10: + $(call test_deb,debian,11,bullseye,10,10.16) + touch build/test_debian_11_10 + +build/test_debian_11_11: + $(call test_deb,debian,11,bullseye,11,11.11) + touch build/test_debian_11_11 + +build/test_debian_11_12: + $(call test_deb,debian,11,bullseye,12,12.6) + touch build/test_debian_11_12 + +build/test_debian_11_13: + $(call test_deb,debian,11,bullseye,13,13.2) + touch build/test_debian_11_13 diff --git a/packaging/test/scripts/deb.sh b/packaging/test/scripts/deb.sh index 76e3bb043..d7b957192 100755 --- a/packaging/test/scripts/deb.sh +++ b/packaging/test/scripts/deb.sh @@ -17,6 +17,7 @@ PG_TOG=$(echo $PG_VERSION | sed 's|\.||g') export DEBIAN_FRONTEND=noninteractive echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections +#apt-get -qq --allow-releaseinfo-change update apt-get -qq update apt-get -qq install -y wget nginx gnupg lsb-release #apt-get -qq install -y libterm-readline-gnu-perl dialog gnupg procps @@ -45,7 +46,7 @@ nginx -s reload || (pkill -9 nginx || nginx -c /etc/nginx/nginx.conf &) # install POSTGRESQL #if [ ${CODENAME} == 'precise' ] && [ ${PG_VERSION} != '10' ] && [ ${PG_VERSION} != '11' ]; then sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt/ $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list' - wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - + wget --no-check-certificate -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - apt-get update -y apt-get install -y postgresql-${PG_VERSION} #fi diff --git a/packaging/test/scripts/rpm.sh b/packaging/test/scripts/rpm.sh index 3f24cc7e5..320d459f6 100755 --- a/packaging/test/scripts/rpm.sh +++ b/packaging/test/scripts/rpm.sh @@ -13,6 +13,7 @@ ulimit -n 1024 PG_TOG=$(echo $PG_VERSION | sed 's|\.||g') +yum update -y # yum upgrade -y || echo 'some packages in docker failed to upgrade' # yum install -y sudo if [ ${DISTRIB} == 'rhel' ] && [ ${PG_TOG} == '13' ]; then # no packages for PG13 on PGDG diff --git a/packaging/test/scripts/suse_forks.sh b/packaging/test/scripts/suse_forks.sh old mode 100644 new mode 100755 From b87ca18bfc1094e356247ebf6329f2b2059987a7 Mon Sep 17 00:00:00 2001 From: dlepikhova <43872363+dlepikhova@users.noreply.github.com> Date: Mon, 22 Nov 2021 12:41:49 +0500 Subject: [PATCH 238/525] [Issue #265][PGPRO-5421] archive-push backward compatibility (#437) Restore the --wal-file-path option of the archive-push command (it was ignored since a196073) Co-authored-by: Mikhail A. Kulagin Co-authored-by: Elena Indrupskaya --- doc/pgprobackup.xml | 7 +- src/archive.c | 53 ++++--------- src/backup.c | 2 +- src/catchup.c | 6 +- src/help.c | 6 +- src/init.c | 2 +- src/pg_probackup.c | 98 +++++++++++++++++++++++- src/pg_probackup.h | 4 +- src/restore.c | 2 +- src/util.c | 6 +- src/utils/file.c | 27 +++++++ src/utils/file.h | 1 + src/utils/pgut.c | 16 ++++ src/utils/pgut.h | 1 + tests/archive.py | 127 ++++++++++++++++++++++++++++++++ tests/expected/option_help.out | 1 + tests/helpers/ptrack_helpers.py | 65 ++++++++-------- 17 files changed, 338 insertions(+), 86 deletions(-) diff --git a/doc/pgprobackup.xml b/doc/pgprobackup.xml index a347e7b43..76ec2cd76 100644 --- a/doc/pgprobackup.xml +++ b/doc/pgprobackup.xml @@ -131,6 +131,7 @@ doc/src/sgml/pgprobackup.sgml backup_dir instance_name + wal_file_path wal_file_name option @@ -5367,7 +5368,9 @@ pg_probackup catchup -b catchup_mode Provides the path to the WAL file in archive_command and restore_command. Use the %p - variable as the value for this option for correct processing. + variable as the value for this option or explicitly specify the path to a file + outside of the data directory. If you skip this option, the path + specified in pg_probackup.conf will be used. @@ -5380,6 +5383,8 @@ pg_probackup catchup -b catchup_mode archive_command and restore_command. Use the %f variable as the value for this option for correct processing. + If the value of is a path + outside of the data directory, explicitly specify the filename. diff --git a/src/archive.c b/src/archive.c index 7bb8c1c03..0f32d9345 100644 --- a/src/archive.c +++ b/src/archive.c @@ -3,7 +3,7 @@ * archive.c: - pg_probackup specific archive commands for archive backups. * * - * Portions Copyright (c) 2018-2019, Postgres Professional + * Portions Copyright (c) 2018-2021, Postgres Professional * *------------------------------------------------------------------------- */ @@ -113,15 +113,13 @@ static parray *setup_push_filelist(const char *archive_status_dir, * Where archlog_path is $BACKUP_PATH/wal/instance_name */ void -do_archive_push(InstanceState *instanceState, InstanceConfig *instance, char *wal_file_path, +do_archive_push(InstanceState *instanceState, InstanceConfig *instance, char *pg_xlog_dir, char *wal_file_name, int batch_size, bool overwrite, bool no_sync, bool no_ready_rename) { uint64 i; - char current_dir[MAXPGPATH]; - char pg_xlog_dir[MAXPGPATH]; - char archive_status_dir[MAXPGPATH]; - uint64 system_id; + /* usually instance pgdata/pg_wal/archive_status, empty if no_ready_rename or batch_size == 1 */ + char archive_status_dir[MAXPGPATH] = ""; bool is_compress = false; /* arrays with meta info for multi threaded backup */ @@ -141,31 +139,8 @@ do_archive_push(InstanceState *instanceState, InstanceConfig *instance, char *wa parray *batch_files = NULL; int n_threads; - if (wal_file_name == NULL) - elog(ERROR, "Required parameter is not specified: --wal-file-name %%f"); - - if (!getcwd(current_dir, sizeof(current_dir))) - elog(ERROR, "getcwd() error"); - - /* verify that archive-push --instance parameter is valid */ - system_id = get_system_identifier(current_dir, FIO_DB_HOST); - - if (instance->pgdata == NULL) - elog(ERROR, "Cannot read pg_probackup.conf for this instance"); - - if (system_id != instance->system_identifier) - elog(ERROR, "Refuse to push WAL segment %s into archive. Instance parameters mismatch." - "Instance '%s' should have SYSTEM_ID = " UINT64_FORMAT " instead of " UINT64_FORMAT, - wal_file_name, instanceState->instance_name, instance->system_identifier, system_id); - - if (instance->compress_alg == PGLZ_COMPRESS) - elog(ERROR, "Cannot use pglz for WAL compression"); - - join_path_components(pg_xlog_dir, current_dir, XLOGDIR); - join_path_components(archive_status_dir, pg_xlog_dir, "archive_status"); - - /* Create 'archlog_path' directory. Do nothing if it already exists. */ - //fio_mkdir(instanceState->instance_wal_subdir_path, DIR_PERMISSION, FIO_BACKUP_HOST); + if (!no_ready_rename || batch_size > 1) + join_path_components(archive_status_dir, pg_xlog_dir, "archive_status"); #ifdef HAVE_LIBZ if (instance->compress_alg == ZLIB_COMPRESS) @@ -204,12 +179,13 @@ do_archive_push(InstanceState *instanceState, InstanceConfig *instance, char *wa { int rc; WALSegno *xlogfile = (WALSegno *) parray_get(batch_files, i); + bool first_wal = strcmp(xlogfile->name, wal_file_name) == 0; - rc = push_file(xlogfile, archive_status_dir, + rc = push_file(xlogfile, first_wal ? NULL : archive_status_dir, pg_xlog_dir, instanceState->instance_wal_subdir_path, overwrite, no_sync, instance->archive_timeout, - no_ready_rename || (strcmp(xlogfile->name, wal_file_name) == 0) ? true : false, + no_ready_rename || first_wal, is_compress && IsXLogFileName(xlogfile->name) ? true : false, instance->compress_level); if (rc == 0) @@ -233,7 +209,7 @@ do_archive_push(InstanceState *instanceState, InstanceConfig *instance, char *wa arg->first_filename = wal_file_name; arg->archive_dir = instanceState->instance_wal_subdir_path; arg->pg_xlog_dir = pg_xlog_dir; - arg->archive_status_dir = archive_status_dir; + arg->archive_status_dir = (!no_ready_rename || batch_size > 1) ? archive_status_dir : NULL; arg->overwrite = overwrite; arg->compress = is_compress; arg->no_sync = no_sync; @@ -276,7 +252,7 @@ do_archive_push(InstanceState *instanceState, InstanceConfig *instance, char *wa /* Note, that we are leaking memory here, * because pushing into archive is a very - * time-sensetive operation, so we skip freeing stuff. + * time-sensitive operation, so we skip freeing stuff. */ push_done: @@ -356,9 +332,6 @@ push_file(WALSegno *xlogfile, const char *archive_status_dir, int compress_level) { int rc; - char wal_file_dummy[MAXPGPATH]; - - join_path_components(wal_file_dummy, archive_status_dir, xlogfile->name); elog(LOG, "pushing file \"%s\"", xlogfile->name); @@ -375,11 +348,13 @@ push_file(WALSegno *xlogfile, const char *archive_status_dir, #endif /* take '--no-ready-rename' flag into account */ - if (!no_ready_rename) + if (!no_ready_rename && archive_status_dir != NULL) { + char wal_file_dummy[MAXPGPATH]; char wal_file_ready[MAXPGPATH]; char wal_file_done[MAXPGPATH]; + join_path_components(wal_file_dummy, archive_status_dir, xlogfile->name); snprintf(wal_file_ready, MAXPGPATH, "%s.%s", wal_file_dummy, "ready"); snprintf(wal_file_done, MAXPGPATH, "%s.%s", wal_file_dummy, "done"); diff --git a/src/backup.c b/src/backup.c index 1d08c3828..c575865c4 100644 --- a/src/backup.c +++ b/src/backup.c @@ -943,7 +943,7 @@ check_system_identifiers(PGconn *conn, const char *pgdata) uint64 system_id_conn; uint64 system_id_pgdata; - system_id_pgdata = get_system_identifier(pgdata, FIO_DB_HOST); + system_id_pgdata = get_system_identifier(pgdata, FIO_DB_HOST, false); system_id_conn = get_remote_system_identifier(conn); /* for checkdb check only system_id_pgdata and system_id_conn */ diff --git a/src/catchup.c b/src/catchup.c index 5a0c8e45a..f9145a395 100644 --- a/src/catchup.c +++ b/src/catchup.c @@ -48,7 +48,7 @@ catchup_init_state(PGNodeInfo *source_node_info, const char *source_pgdata, cons /* Get WAL segments size and system ID of source PG instance */ instance_config.xlog_seg_size = get_xlog_seg_size(source_pgdata); - instance_config.system_identifier = get_system_identifier(source_pgdata, FIO_DB_HOST); + instance_config.system_identifier = get_system_identifier(source_pgdata, FIO_DB_HOST, false); current.start_time = time(NULL); strlcpy(current.program_version, PROGRAM_VERSION, sizeof(current.program_version)); @@ -163,7 +163,7 @@ catchup_preflight_checks(PGNodeInfo *source_node_info, PGconn *source_conn, uint64 source_conn_id, source_id, dest_id; source_conn_id = get_remote_system_identifier(source_conn); - source_id = get_system_identifier(source_pgdata, FIO_DB_HOST); /* same as instance_config.system_identifier */ + source_id = get_system_identifier(source_pgdata, FIO_DB_HOST, false); /* same as instance_config.system_identifier */ if (source_conn_id != source_id) elog(ERROR, "Database identifiers mismatch: we connected to DB id %lu, but in \"%s\" we found id %lu", @@ -171,7 +171,7 @@ catchup_preflight_checks(PGNodeInfo *source_node_info, PGconn *source_conn, if (current.backup_mode != BACKUP_MODE_FULL) { - dest_id = get_system_identifier(dest_pgdata, FIO_LOCAL_HOST); + dest_id = get_system_identifier(dest_pgdata, FIO_LOCAL_HOST, false); if (source_conn_id != dest_id) elog(ERROR, "Database identifiers mismatch: we connected to DB id %lu, but in \"%s\" we found id %lu", source_conn_id, dest_pgdata, dest_id); diff --git a/src/help.c b/src/help.c index 1515359e4..a6530fc0e 100644 --- a/src/help.c +++ b/src/help.c @@ -227,6 +227,7 @@ help_pg_probackup(void) printf(_("\n %s archive-push -B backup-path --instance=instance_name\n"), PROGRAM_NAME); printf(_(" --wal-file-name=wal-file-name\n")); + printf(_(" [--wal-file-path=wal-file-path]\n")); printf(_(" [-j num-threads] [--batch-size=batch_size]\n")); printf(_(" [--archive-timeout=timeout]\n")); printf(_(" [--no-ready-rename] [--no-sync]\n")); @@ -937,6 +938,7 @@ help_archive_push(void) { printf(_("\n%s archive-push -B backup-path --instance=instance_name\n"), PROGRAM_NAME); printf(_(" --wal-file-name=wal-file-name\n")); + printf(_(" [--wal-file-path=wal-file-path]\n")); printf(_(" [-j num-threads] [--batch-size=batch_size]\n")); printf(_(" [--archive-timeout=timeout]\n")); printf(_(" [--no-ready-rename] [--no-sync]\n")); @@ -951,6 +953,8 @@ help_archive_push(void) printf(_(" --instance=instance_name name of the instance to delete\n")); printf(_(" --wal-file-name=wal-file-name\n")); printf(_(" name of the file to copy into WAL archive\n")); + printf(_(" --wal-file-path=wal-file-path\n")); + printf(_(" relative destination path of the WAL archive\n")); printf(_(" -j, --threads=NUM number of parallel threads\n")); printf(_(" --batch-size=NUM number of files to be copied\n")); printf(_(" --archive-timeout=timeout wait timeout before discarding stale temp file(default: 5min)\n")); @@ -981,8 +985,8 @@ static void help_archive_get(void) { printf(_("\n%s archive-get -B backup-path --instance=instance_name\n"), PROGRAM_NAME); - printf(_(" --wal-file-path=wal-file-path\n")); printf(_(" --wal-file-name=wal-file-name\n")); + printf(_(" [--wal-file-path=wal-file-path]\n")); printf(_(" [-j num-threads] [--batch-size=batch_size]\n")); printf(_(" [--no-validate-wal]\n")); printf(_(" [--remote-proto] [--remote-host]\n")); diff --git a/src/init.c b/src/init.c index a4911cb5c..8773016b5 100644 --- a/src/init.c +++ b/src/init.c @@ -57,7 +57,7 @@ do_add_instance(InstanceState *instanceState, InstanceConfig *instance) "(-D, --pgdata)"); /* Read system_identifier from PGDATA */ - instance->system_identifier = get_system_identifier(instance->pgdata, FIO_DB_HOST); + instance->system_identifier = get_system_identifier(instance->pgdata, FIO_DB_HOST, false); /* Starting from PostgreSQL 11 read WAL segment size from PGDATA */ instance->xlog_seg_size = get_xlog_seg_size(instance->pgdata); diff --git a/src/pg_probackup.c b/src/pg_probackup.c index d629d838d..49e226ace 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -35,7 +35,7 @@ * which includes info about pgdata directory and connection. * * Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION - * Portions Copyright (c) 2015-2019, Postgres Professional + * Portions Copyright (c) 2015-2021, Postgres Professional * *------------------------------------------------------------------------- */ @@ -151,6 +151,7 @@ static char *wal_file_path; static char *wal_file_name; static bool file_overwrite = false; static bool no_ready_rename = false; +static char archive_push_xlog_dir[MAXPGPATH] = ""; /* archive get options */ static char *prefetch_dir; @@ -788,7 +789,7 @@ main(int argc, char *argv[]) current.stream = stream_wal = true; if (instance_config.external_dir_str) elog(ERROR, "external directories not supported fom \"%s\" command", get_subcmd_name(backup_subcmd)); - // TODO проверить instance_config.conn_opt + // TODO check instance_config.conn_opt } /* sanity */ @@ -796,6 +797,97 @@ main(int argc, char *argv[]) elog(ERROR, "You cannot specify \"--no-validate\" option with the \"%s\" command", get_subcmd_name(backup_subcmd)); + if (backup_subcmd == ARCHIVE_PUSH_CMD) + { + /* Check archive-push parameters and construct archive_push_xlog_dir + * + * There are 4 cases: + * 1. no --wal-file-path specified -- use cwd, ./PG_XLOG_DIR for wal files + * (and ./PG_XLOG_DIR/archive_status for .done files inside do_archive_push()) + * in this case we can use batches and threads + * 2. --wal-file-path is specified and it is the same dir as stored in pg_probackup.conf (instance_config.pgdata) + * in this case we can use this path, as well as batches and thread + * 3. --wal-file-path is specified and it isn't same dir as stored in pg_probackup.conf but control file present with correct system_id + * in this case we can use this path, as well as batches and thread + * (replica for example, see test_archive_push_sanity) + * 4. --wal-file-path is specified and it is different from instance_config.pgdata and no control file found + * disable optimizations and work with user specified path + */ + bool check_system_id = true; + uint64 system_id; + char current_dir[MAXPGPATH]; + + if (wal_file_name == NULL) + elog(ERROR, "Required parameter is not specified: --wal-file-name %%f"); + + if (instance_config.pgdata == NULL) + elog(ERROR, "Cannot read pg_probackup.conf for this instance"); + + /* TODO may be remove in preference of checking inside compress_init()? */ + if (instance_config.compress_alg == PGLZ_COMPRESS) + elog(ERROR, "Cannot use pglz for WAL compression"); + + if (!getcwd(current_dir, sizeof(current_dir))) + elog(ERROR, "getcwd() error"); + + if (wal_file_path == NULL) + { + /* 1st case */ + system_id = get_system_identifier(current_dir, FIO_DB_HOST, false); + join_path_components(archive_push_xlog_dir, current_dir, XLOGDIR); + } + else + { + /* + * Usually we get something like + * wal_file_path = "pg_wal/0000000100000000000000A1" + * wal_file_name = "0000000100000000000000A1" + * instance_config.pgdata = "/pgdata/.../node/data" + * We need to strip wal_file_name from wal_file_path, add XLOGDIR to instance_config.pgdata + * and compare this directories. + * Note, that pg_wal can be symlink (see test_waldir_outside_pgdata_archiving) + */ + char *stripped_wal_file_path = pgut_str_strip_trailing_filename(wal_file_path, wal_file_name); + join_path_components(archive_push_xlog_dir, instance_config.pgdata, XLOGDIR); + if (fio_is_same_file(stripped_wal_file_path, archive_push_xlog_dir, true, FIO_DB_HOST)) + { + /* 2nd case */ + system_id = get_system_identifier(instance_config.pgdata, FIO_DB_HOST, false); + /* archive_push_xlog_dir already have right value */ + } + else + { + if (strlen(stripped_wal_file_path) < MAXPGPATH) + strncpy(archive_push_xlog_dir, stripped_wal_file_path, MAXPGPATH); + else + elog(ERROR, "Value specified to --wal_file_path is too long"); + + system_id = get_system_identifier(current_dir, FIO_DB_HOST, true); + /* 3rd case if control file present -- i.e. system_id != 0 */ + + if (system_id == 0) + { + /* 4th case */ + check_system_id = false; + + if (batch_size > 1 || num_threads > 1 || !no_ready_rename) + { + elog(WARNING, "Supplied --wal_file_path is outside pgdata, force safe values for options: --batch-size=1 -j 1 --no-ready-rename"); + batch_size = 1; + num_threads = 1; + no_ready_rename = true; + } + } + } + pfree(stripped_wal_file_path); + } + + if (check_system_id && system_id != instance_config.system_identifier) + elog(ERROR, "Refuse to push WAL segment %s into archive. Instance parameters mismatch." + "Instance '%s' should have SYSTEM_ID = " UINT64_FORMAT " instead of " UINT64_FORMAT, + wal_file_name, instanceState->instance_name, instance_config.system_identifier, system_id); + } + #if PG_VERSION_NUM >= 100000 if (temp_slot && perm_slot) elog(ERROR, "You cannot specify \"--perm-slot\" option with the \"--temp-slot\" option"); @@ -819,7 +911,7 @@ main(int argc, char *argv[]) switch (backup_subcmd) { case ARCHIVE_PUSH_CMD: - do_archive_push(instanceState, &instance_config, wal_file_path, wal_file_name, + do_archive_push(instanceState, &instance_config, archive_push_xlog_dir, wal_file_name, batch_size, file_overwrite, no_sync, no_ready_rename); break; case ARCHIVE_GET_CMD: diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 6a1feb014..a51794d98 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -889,7 +889,7 @@ extern int do_init(CatalogState *catalogState); extern int do_add_instance(InstanceState *instanceState, InstanceConfig *instance); /* in archive.c */ -extern void do_archive_push(InstanceState *instanceState, InstanceConfig *instance, char *wal_file_path, +extern void do_archive_push(InstanceState *instanceState, InstanceConfig *instance, char *pg_xlog_dir, char *wal_file_name, int batch_size, bool overwrite, bool no_sync, bool no_ready_rename); extern void do_archive_get(InstanceState *instanceState, InstanceConfig *instance, const char *prefetch_dir_arg, char *wal_file_path, @@ -1153,7 +1153,7 @@ extern XLogRecPtr get_next_record_lsn(const char *archivedir, XLogSegNo segno, T extern TimeLineID get_current_timeline(PGconn *conn); extern TimeLineID get_current_timeline_from_control(const char *pgdata_path, fio_location location, bool safe); extern XLogRecPtr get_checkpoint_location(PGconn *conn); -extern uint64 get_system_identifier(const char *pgdata_path, fio_location location); +extern uint64 get_system_identifier(const char *pgdata_path, fio_location location, bool safe); extern uint64 get_remote_system_identifier(PGconn *conn); extern uint32 get_data_checksum_version(bool safe); extern pg_crc32c get_pgcontrol_checksum(const char *pgdata_path); diff --git a/src/restore.c b/src/restore.c index 005984aed..47e3b0344 100644 --- a/src/restore.c +++ b/src/restore.c @@ -2186,7 +2186,7 @@ check_incremental_compatibility(const char *pgdata, uint64 system_identifier, */ elog(INFO, "Trying to read pg_control file in destination directory"); - system_id_pgdata = get_system_identifier(pgdata, FIO_DB_HOST); + system_id_pgdata = get_system_identifier(pgdata, FIO_DB_HOST, false); if (system_id_pgdata == instance_config.system_identifier) system_id_match = true; diff --git a/src/util.c b/src/util.c index f39b31d45..fb33fd046 100644 --- a/src/util.c +++ b/src/util.c @@ -247,15 +247,15 @@ get_checkpoint_location(PGconn *conn) } uint64 -get_system_identifier(const char *pgdata_path, fio_location location) +get_system_identifier(const char *pgdata_path, fio_location location, bool safe) { ControlFileData ControlFile; char *buffer; size_t size; /* First fetch file... */ - buffer = slurpFile(pgdata_path, XLOG_CONTROL_FILE, &size, false, location); - if (buffer == NULL) + buffer = slurpFile(pgdata_path, XLOG_CONTROL_FILE, &size, safe, location); + if (safe && buffer == NULL) return 0; digestControlFile(&ControlFile, buffer, size); pg_free(buffer); diff --git a/src/utils/file.c b/src/utils/file.c index 810b4b394..7d1df554b 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -1141,6 +1141,33 @@ fio_stat(char const* path, struct stat* st, bool follow_symlink, fio_location lo } } +/* + * Compare, that filename1 and filename2 is the same file + * in windows compare only filenames + */ +bool +fio_is_same_file(char const* filename1, char const* filename2, bool follow_symlink, fio_location location) +{ +#ifndef WIN32 + struct stat stat1, stat2; + + if (fio_stat(filename1, &stat1, follow_symlink, location) < 0) + elog(ERROR, "Can't stat file \"%s\": %s", filename1, strerror(errno)); + + if (fio_stat(filename2, &stat2, follow_symlink, location) < 0) + elog(ERROR, "Can't stat file \"%s\": %s", filename2, strerror(errno)); + + return stat1.st_ino == stat2.st_ino && stat1.st_dev == stat2.st_dev; +#else + char *abs_name1 = make_absolute_path(filename1); + char *abs_name2 = make_absolute_path(filename2); + bool result = strcmp(abs_name1, abs_name2) == 0; + free(abs_name2); + free(abs_name1); + return result; +#endif +} + /* * Read value of a symbolic link * this is a wrapper about readlink() syscall diff --git a/src/utils/file.h b/src/utils/file.h index edb5ea0f9..a554b4ab0 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -129,6 +129,7 @@ extern int fio_mkdir(char const* path, int mode, fio_location location); extern int fio_chmod(char const* path, int mode, fio_location location); extern int fio_access(char const* path, int mode, fio_location location); extern int fio_stat(char const* path, struct stat* st, bool follow_symlinks, fio_location location); +extern bool fio_is_same_file(char const* filename1, char const* filename2, bool follow_symlink, fio_location location); extern ssize_t fio_readlink(const char *path, char *value, size_t valsiz, fio_location location); extern DIR* fio_opendir(char const* path, fio_location location); extern struct dirent * fio_readdir(DIR *dirp); diff --git a/src/utils/pgut.c b/src/utils/pgut.c index 52599848d..2cf0ccbe7 100644 --- a/src/utils/pgut.c +++ b/src/utils/pgut.c @@ -977,6 +977,22 @@ pgut_strndup(const char *str, size_t n) return ret; } +/* + * Allocates new string, that contains part of filepath string minus trailing filename string + * If trailing filename string not found, returns copy of filepath. + * Result must be free by caller. + */ +char * +pgut_str_strip_trailing_filename(const char *filepath, const char *filename) +{ + size_t fp_len = strlen(filepath); + size_t fn_len = strlen(filename); + if (strncmp(filepath + fp_len - fn_len, filename, fn_len) == 0) + return pgut_strndup(filepath, fp_len - fn_len); + else + return pgut_strndup(filepath, fp_len); +} + FILE * pgut_fopen(const char *path, const char *mode, bool missing_ok) { diff --git a/src/utils/pgut.h b/src/utils/pgut.h index a1d7b5a93..fa0efe816 100644 --- a/src/utils/pgut.h +++ b/src/utils/pgut.h @@ -63,6 +63,7 @@ extern void *pgut_malloc0(size_t size); extern void *pgut_realloc(void *p, size_t size); extern char *pgut_strdup(const char *str); extern char *pgut_strndup(const char *str, size_t n); +extern char *pgut_str_strip_trailing_filename(const char *filepath, const char *filename); #define pgut_new(type) ((type *) pgut_malloc(sizeof(type))) #define pgut_new0(type) ((type *) pgut_malloc0(sizeof(type))) diff --git a/tests/archive.py b/tests/archive.py index 5157e8b89..22b9d8693 100644 --- a/tests/archive.py +++ b/tests/archive.py @@ -1828,6 +1828,133 @@ def test_archive_options_1(self): self.del_test_dir(module_name, fname) + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_undefined_wal_file_path(self): + """ + check that archive-push works correct with undefined + --wal-file-path + """ + fname = self.id().split('.')[3] + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + if os.name == 'posix': + archive_command = '\"{0}\" archive-push -B \"{1}\" --instance \"{2}\" --wal-file-name=%f'.format( + self.probackup_path, backup_dir, 'node') + elif os.name == 'nt': + archive_command = '\"{0}\" archive-push -B \"{1}\" --instance \"{2}\" --wal-file-name=%f'.format( + self.probackup_path, backup_dir, 'node').replace("\\","\\\\") + else: + self.assertTrue(False, 'Unexpected os family') + + self.set_auto_conf( + node, + {'archive_command': archive_command}) + + node.slow_start() + node.safe_psql( + "postgres", + "create table t_heap as select i" + " as id from generate_series(0, 10) i") + self.switch_wal_segment(node) + + # check + self.assertEqual(self.show_archive(backup_dir, instance='node', tli=1)['min-segno'], '000000010000000000000001') + + self.del_test_dir(module_name, fname) + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_intermediate_archiving(self): + """ + check that archive-push works correct with --wal-file-path setting by user + """ + fname = self.id().split('.')[3] + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + + node = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node'), + initdb_params=['--data-checksums']) + + node_pg_options = {} + if node.major_version >= 13: + node_pg_options['wal_keep_size'] = '0MB' + else: + node_pg_options['wal_keep_segments'] = '0' + self.set_auto_conf(node, node_pg_options) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + + wal_dir = os.path.join(self.tmp_path, module_name, fname, 'intermediate_dir') + shutil.rmtree(wal_dir, ignore_errors=True) + os.makedirs(wal_dir) + if os.name == 'posix': + self.set_archiving(backup_dir, 'node', node, custom_archive_command='cp -v %p {0}/%f'.format(wal_dir)) + elif os.name == 'nt': + self.set_archiving(backup_dir, 'node', node, custom_archive_command='copy /Y "%p" "{0}\\\\%f"'.format(wal_dir.replace("\\","\\\\"))) + else: + self.assertTrue(False, 'Unexpected os family') + + node.slow_start() + node.safe_psql( + "postgres", + "create table t_heap as select i" + " as id from generate_series(0, 10) i") + self.switch_wal_segment(node) + + wal_segment = '000000010000000000000001' + + self.run_pb(["archive-push", "-B", backup_dir, + "--instance=node", "-D", node.data_dir, + "--wal-file-path", "{0}/{1}".format(wal_dir, wal_segment), "--wal-file-name", wal_segment]) + + self.assertEqual(self.show_archive(backup_dir, instance='node', tli=1)['min-segno'], wal_segment) + + self.del_test_dir(module_name, fname) + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_waldir_outside_pgdata_archiving(self): + """ + check that archive-push works correct with symlinked waldir + """ + if self.pg_config_version < self.version_to_num('10.0'): + return unittest.skip( + 'Skipped because waldir outside pgdata is supported since PG 10') + + fname = self.id().split('.')[3] + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + external_wal_dir = os.path.join(self.tmp_path, module_name, fname, 'ext_wal_dir') + shutil.rmtree(external_wal_dir, ignore_errors=True) + + node = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node'), + initdb_params=['--data-checksums', '--waldir={0}'.format(external_wal_dir)]) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + + node.slow_start() + node.safe_psql( + "postgres", + "create table t_heap as select i" + " as id from generate_series(0, 10) i") + self.switch_wal_segment(node) + + # check + self.assertEqual(self.show_archive(backup_dir, instance='node', tli=1)['min-segno'], '000000010000000000000001') + + self.del_test_dir(module_name, fname) + # @unittest.skip("skip") # @unittest.expectedFailure def test_hexadecimal_timeline(self): diff --git a/tests/expected/option_help.out b/tests/expected/option_help.out index 01384a893..dd3c4e865 100644 --- a/tests/expected/option_help.out +++ b/tests/expected/option_help.out @@ -144,6 +144,7 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database. pg_probackup archive-push -B backup-path --instance=instance_name --wal-file-name=wal-file-name + [--wal-file-path=wal-file-path] [-j num-threads] [--batch-size=batch_size] [--archive-timeout=timeout] [--no-ready-rename] [--no-sync] diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index 1b54d3165..3b14b7170 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -1296,7 +1296,8 @@ def get_recovery_conf(self, node): def set_archiving( self, backup_dir, instance, node, replica=False, overwrite=False, compress=True, old_binary=False, - log_level=False, archive_timeout=False): + log_level=False, archive_timeout=False, + custom_archive_command=None): # parse postgresql.auto.conf options = {} @@ -1306,45 +1307,47 @@ def set_archiving( else: options['archive_mode'] = 'on' - if os.name == 'posix': - options['archive_command'] = '"{0}" archive-push -B {1} --instance={2} '.format( - self.probackup_path, backup_dir, instance) - - elif os.name == 'nt': - options['archive_command'] = '"{0}" archive-push -B {1} --instance={2} '.format( - self.probackup_path.replace("\\","\\\\"), - backup_dir.replace("\\","\\\\"), instance) + if custom_archive_command is None: + if os.name == 'posix': + options['archive_command'] = '"{0}" archive-push -B {1} --instance={2} '.format( + self.probackup_path, backup_dir, instance) - # don`t forget to kill old_binary after remote ssh release - if self.remote and not old_binary: - options['archive_command'] += '--remote-proto=ssh ' - options['archive_command'] += '--remote-host=localhost ' + elif os.name == 'nt': + options['archive_command'] = '"{0}" archive-push -B {1} --instance={2} '.format( + self.probackup_path.replace("\\","\\\\"), + backup_dir.replace("\\","\\\\"), instance) - if self.archive_compress and compress: - options['archive_command'] += '--compress ' + # don`t forget to kill old_binary after remote ssh release + if self.remote and not old_binary: + options['archive_command'] += '--remote-proto=ssh ' + options['archive_command'] += '--remote-host=localhost ' - if overwrite: - options['archive_command'] += '--overwrite ' + if self.archive_compress and compress: + options['archive_command'] += '--compress ' - options['archive_command'] += '--log-level-console=VERBOSE ' - options['archive_command'] += '-j 5 ' - options['archive_command'] += '--batch-size 10 ' - options['archive_command'] += '--no-sync ' + if overwrite: + options['archive_command'] += '--overwrite ' - if archive_timeout: - options['archive_command'] += '--archive-timeout={0} '.format( - archive_timeout) + options['archive_command'] += '--log-level-console=VERBOSE ' + options['archive_command'] += '-j 5 ' + options['archive_command'] += '--batch-size 10 ' + options['archive_command'] += '--no-sync ' - if os.name == 'posix': - options['archive_command'] += '--wal-file-path=%p --wal-file-name=%f' + if archive_timeout: + options['archive_command'] += '--archive-timeout={0} '.format( + archive_timeout) - elif os.name == 'nt': - options['archive_command'] += '--wal-file-path="%p" --wal-file-name="%f"' + if os.name == 'posix': + options['archive_command'] += '--wal-file-path=%p --wal-file-name=%f' - if log_level: - options['archive_command'] += ' --log-level-console={0}'.format(log_level) - options['archive_command'] += ' --log-level-file={0} '.format(log_level) + elif os.name == 'nt': + options['archive_command'] += '--wal-file-path="%p" --wal-file-name="%f"' + if log_level: + options['archive_command'] += ' --log-level-console={0}'.format(log_level) + options['archive_command'] += ' --log-level-file={0} '.format(log_level) + else: # custom_archive_command is not None + options['archive_command'] = custom_archive_command self.set_auto_conf(node, options) From 758a32f09202adcc3312c37a51d3c8b9d55934d3 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Mon, 22 Nov 2021 13:16:48 +0300 Subject: [PATCH 239/525] Version 2.5.3 --- src/pg_probackup.h | 2 +- tests/expected/option_version.out | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/pg_probackup.h b/src/pg_probackup.h index a51794d98..b828343dc 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -338,7 +338,7 @@ typedef enum ShowFormat #define BYTES_INVALID (-1) /* file didn`t changed since previous backup, DELTA backup do not rely on it */ #define FILE_NOT_FOUND (-2) /* file disappeared during backup */ #define BLOCKNUM_INVALID (-1) -#define PROGRAM_VERSION "2.5.2" +#define PROGRAM_VERSION "2.5.3" /* update when remote agent API or behaviour changes */ #define AGENT_PROTOCOL_VERSION 20501 diff --git a/tests/expected/option_version.out b/tests/expected/option_version.out index e9d8c0955..8b212ac1f 100644 --- a/tests/expected/option_version.out +++ b/tests/expected/option_version.out @@ -1 +1 @@ -pg_probackup 2.5.2 \ No newline at end of file +pg_probackup 2.5.3 \ No newline at end of file From 64ff0bbf7442b02c499c0b47607cc82a115e08b1 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" <16117281+kulaginm@users.noreply.github.com> Date: Fri, 24 Dec 2021 13:18:11 +0300 Subject: [PATCH 240/525] [Issue #459][PGPRO-6034] Fix catchup (delta and ptrack) data corruption (#460) * [Issue #459][PGPRO-6034] Fix catchup (delta and ptrack) data corruption --- src/catchup.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/catchup.c b/src/catchup.c index f9145a395..78a1e5265 100644 --- a/src/catchup.c +++ b/src/catchup.c @@ -921,7 +921,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, char fullpath[MAXPGPATH]; join_path_components(fullpath, dest_pgdata, file->rel_path); - fio_delete(file->mode, fullpath, FIO_DB_HOST); + fio_delete(file->mode, fullpath, FIO_LOCAL_HOST); elog(VERBOSE, "Deleted file \"%s\"", fullpath); /* shrink dest pgdata list */ From ad932d8a2e26b9ca42d280a9e47ab5d957023599 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" <16117281+kulaginm@users.noreply.github.com> Date: Fri, 24 Dec 2021 13:26:46 +0300 Subject: [PATCH 241/525] [PGPRO-6037] fix catchup timeline history checking (#462) * [PGPRO-6037] fix catchup timeline history checking --- src/catchup.c | 19 ++++++++++++++----- src/restore.c | 4 ++++ src/stream.c | 2 ++ tests/catchup.py | 32 ++++++++++++++++++++++++++------ 4 files changed, 46 insertions(+), 11 deletions(-) diff --git a/src/catchup.c b/src/catchup.c index 78a1e5265..1b8f8084d 100644 --- a/src/catchup.c +++ b/src/catchup.c @@ -203,6 +203,8 @@ catchup_preflight_checks(PGNodeInfo *source_node_info, PGconn *source_conn, /* fill dest_redo.lsn and dest_redo.tli */ get_redo(dest_pgdata, FIO_LOCAL_HOST, &dest_redo); + elog(VERBOSE, "source.tli = %X, dest_redo.lsn = %X/%X, dest_redo.tli = %X", + current.tli, (uint32) (dest_redo.lsn >> 32), (uint32) dest_redo.lsn, dest_redo.tli); if (current.tli != 1) { @@ -285,11 +287,12 @@ catchup_check_tablespaces_existance_in_tbsmapping(PGconn *conn) static parray* catchup_get_tli_history(ConnectionOptions *conn_opt, TimeLineID tli) { - PGresult *res; - PGconn *conn; - char *history; - char query[128]; - parray *result = NULL; + PGresult *res; + PGconn *conn; + char *history; + char query[128]; + parray *result = NULL; + TimeLineHistoryEntry *entry = NULL; snprintf(query, sizeof(query), "TIMELINE_HISTORY %u", tli); @@ -336,6 +339,12 @@ catchup_get_tli_history(ConnectionOptions *conn_opt, TimeLineID tli) pg_free(history); PQclear(res); + /* append last timeline entry (as read_timeline_history() do) */ + entry = pgut_new(TimeLineHistoryEntry); + entry->tli = tli; + entry->end = InvalidXLogRecPtr; + parray_insert(result, 0, entry); + return result; } diff --git a/src/restore.c b/src/restore.c index 47e3b0344..d8d808a4e 100644 --- a/src/restore.c +++ b/src/restore.c @@ -1821,11 +1821,15 @@ satisfy_timeline(const parray *timelines, TimeLineID tli, XLogRecPtr lsn) { int i; + elog(VERBOSE, "satisfy_timeline() checking: tli = %X, lsn = %X/%X", + tli, (uint32) (lsn >> 32), (uint32) lsn); for (i = 0; i < parray_num(timelines); i++) { TimeLineHistoryEntry *timeline; timeline = (TimeLineHistoryEntry *) parray_get(timelines, i); + elog(VERBOSE, "satisfy_timeline() check %i entry: timeline->tli = %X, timeline->end = %X/%X", + i, timeline->tli, (uint32) (timeline->end >> 32), (uint32) timeline->end); if (tli == timeline->tli && (XLogRecPtrIsInvalid(timeline->end) || lsn <= timeline->end)) diff --git a/src/stream.c b/src/stream.c index a53077391..1ee8dee37 100644 --- a/src/stream.c +++ b/src/stream.c @@ -615,6 +615,8 @@ parse_tli_history_buffer(char *history, TimeLineID tli) if (!result) result = parray_new(); parray_append(result, entry); + elog(VERBOSE, "parse_tli_history_buffer() found entry: tli = %X, end = %X/%X", + tli, switchpoint_hi, switchpoint_lo); /* we ignore the remainder of each line */ } diff --git a/tests/catchup.py b/tests/catchup.py index 79ebdec9f..8441deaaf 100644 --- a/tests/catchup.py +++ b/tests/catchup.py @@ -292,7 +292,7 @@ def test_tli_delta_catchup(self): src_pg.safe_psql("postgres", "CREATE TABLE ultimate_question AS SELECT 42 AS answer") src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") - # do catchup + # do catchup (src_tli = 2, dst_tli = 1) self.catchup_node( backup_mode = 'DELTA', source_pgdata = src_pg.data_dir, @@ -310,15 +310,25 @@ def test_tli_delta_catchup(self): dst_options = {} dst_options['port'] = str(dst_pg.port) self.set_auto_conf(dst_pg, dst_options) - dst_pg.slow_start() + self.set_replica(master = src_pg, replica = dst_pg) + dst_pg.slow_start(replica = True) # 2nd check: run verification query dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy') + dst_pg.stop() + + # do catchup (src_tli = 2, dst_tli = 2) + self.catchup_node( + backup_mode = 'DELTA', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream'] + ) + # Cleanup src_pg.stop() - dst_pg.stop() self.del_test_dir(module_name, self.fname) def test_tli_ptrack_catchup(self): @@ -365,7 +375,7 @@ def test_tli_ptrack_catchup(self): src_pg.safe_psql("postgres", "CREATE TABLE ultimate_question AS SELECT 42 AS answer") src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") - # do catchup + # do catchup (src_tli = 2, dst_tli = 1) self.catchup_node( backup_mode = 'PTRACK', source_pgdata = src_pg.data_dir, @@ -383,15 +393,25 @@ def test_tli_ptrack_catchup(self): dst_options = {} dst_options['port'] = str(dst_pg.port) self.set_auto_conf(dst_pg, dst_options) - dst_pg.slow_start() + self.set_replica(master = src_pg, replica = dst_pg) + dst_pg.slow_start(replica = True) # 2nd check: run verification query dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy') + dst_pg.stop() + + # do catchup (src_tli = 2, dst_tli = 2) + self.catchup_node( + backup_mode = 'PTRACK', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream'] + ) + # Cleanup src_pg.stop() - dst_pg.stop() self.del_test_dir(module_name, self.fname) ######################################### From f4c0ac3bf13a5896c1327d05377862c33951fb90 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Fri, 24 Dec 2021 13:49:09 +0300 Subject: [PATCH 242/525] Version 2.5.4 --- src/pg_probackup.h | 2 +- tests/expected/option_version.out | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/pg_probackup.h b/src/pg_probackup.h index b828343dc..b202b6152 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -338,7 +338,7 @@ typedef enum ShowFormat #define BYTES_INVALID (-1) /* file didn`t changed since previous backup, DELTA backup do not rely on it */ #define FILE_NOT_FOUND (-2) /* file disappeared during backup */ #define BLOCKNUM_INVALID (-1) -#define PROGRAM_VERSION "2.5.3" +#define PROGRAM_VERSION "2.5.4" /* update when remote agent API or behaviour changes */ #define AGENT_PROTOCOL_VERSION 20501 diff --git a/tests/expected/option_version.out b/tests/expected/option_version.out index 8b212ac1f..a69cee03d 100644 --- a/tests/expected/option_version.out +++ b/tests/expected/option_version.out @@ -1 +1 @@ -pg_probackup 2.5.3 \ No newline at end of file +pg_probackup 2.5.4 \ No newline at end of file From 58a5805b59dfcc6bf950a0bd3a502fda1a0d8ed8 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Sat, 25 Dec 2021 18:07:00 +0300 Subject: [PATCH 243/525] [ci skip] packaging/Readme.md fix --- packaging/Readme.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/Readme.md b/packaging/Readme.md index 749b9fc06..6fe38f277 100644 --- a/packaging/Readme.md +++ b/packaging/Readme.md @@ -7,7 +7,7 @@ export PBK_EDITION=std|ent make pkg ``` -To build binaries for PostgresPro Standart or Enterprise, a pgpro.tar.bz2 with latest git tree must be preset in `packaging/tarballs` directory: +To build binaries for PostgresPro Standart or Enterprise, a pgpro.tar.bz2 with latest git tree must be preset in `packaging/pkg/tarballs` directory: ``` cd packaging/pkg/tarballs git clone pgpro_repo pgpro From a454bd7d63e1329b2c46db6a71aa263ac7621cc6 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Sun, 26 Dec 2021 21:24:15 +0300 Subject: [PATCH 244/525] [ci skip] improve packaging: * adding testing of packages for PG-14 * updating postgres versions * adding reprepro config and rpmmacros to git * fixing forgotten rhel repo signing and package testing * adding alt-8 package testing * removing debian-8, ubuntu-14.04 and ubuntu-16.04 packaging * s/PGPRO Standart/PGPRO Standard/g --- packaging/Makefile.pkg | 24 +- packaging/Makefile.repo | 18 +- packaging/Makefile.test | 59 ++--- packaging/Readme.md | 6 +- packaging/pkg/Makefile.alt | 36 +-- packaging/pkg/Makefile.centos | 24 +- packaging/pkg/Makefile.debian | 65 ++--- packaging/pkg/Makefile.oraclelinux | 36 +-- packaging/pkg/Makefile.rhel | 24 +- packaging/pkg/Makefile.suse | 24 +- packaging/pkg/Makefile.ubuntu | 82 +----- packaging/pkg/scripts/alt.sh | 13 +- packaging/pkg/scripts/rpm.sh | 12 +- .../rpmbuild/SOURCES/pg_probackup-forks.repo | 2 +- .../SPECS/pg_probackup-repo-forks.spec | 2 +- packaging/repo/reprepro-conf/changelog.script | 246 ++++++++++++++++++ packaging/repo/reprepro-conf/distributions | 179 +++++++++++++ packaging/repo/rpm-conf/rpmmacros | 5 + packaging/repo/scripts/deb.sh | 4 +- packaging/repo/scripts/rpm.sh | 9 +- packaging/repo/scripts/suse.sh | 2 +- packaging/test/Makefile.alt | 40 ++- packaging/test/Makefile.centos | 28 +- packaging/test/Makefile.debian | 42 +-- packaging/test/Makefile.oraclelinux | 28 +- packaging/test/Makefile.rhel | 28 +- packaging/test/Makefile.suse | 28 +- packaging/test/Makefile.ubuntu | 49 ++-- packaging/test/scripts/deb.sh | 10 +- packaging/test/scripts/deb_forks.sh | 6 +- packaging/test/scripts/rpm.sh | 22 +- packaging/test/scripts/rpm_forks.sh | 7 +- 32 files changed, 771 insertions(+), 389 deletions(-) create mode 100755 packaging/repo/reprepro-conf/changelog.script create mode 100644 packaging/repo/reprepro-conf/distributions create mode 100644 packaging/repo/rpm-conf/rpmmacros diff --git a/packaging/Makefile.pkg b/packaging/Makefile.pkg index fc92ae408..e17243614 100644 --- a/packaging/Makefile.pkg +++ b/packaging/Makefile.pkg @@ -1,6 +1,6 @@ ifeq ($(PBK_EDITION),std) PBK_PKG_REPO = pg_probackup-forks - PBK_EDITION_FULL = Standart + PBK_EDITION_FULL = Standard PKG_NAME_SUFFIX = std- else ifeq ($(PBK_EDITION),ent) PBK_PKG_REPO = pg_probackup-forks @@ -42,12 +42,9 @@ build/all: build/debian build/ubuntu build/centos build/oraclelinux build/alt bu @echo Packaging is done ### DEBIAN -build/debian: build/debian_8 build/debian_9 build/debian_10 build/debian_11 +build/debian: build/debian_9 build/debian_10 build/debian_11 @echo Debian: done -build/debian_8: build/debian_8_9.6 build/debian_8_10 build/debian_8_11 build/debian_8_12 build/debian_8_13 build/debian_8_14 - @echo Debian 8: done - build/debian_9: build/debian_9_9.6 build/debian_9_10 build/debian_9_11 build/debian_9_12 build/debian_9_13 build/debian_9_14 @echo Debian 9: done @@ -58,15 +55,9 @@ build/debian_11: build/debian_11_9.6 build/debian_11_10 build/debian_11_11 build @echo Debian 11: done ### UBUNTU -build/ubuntu: build/ubuntu_14.04 build/ubuntu_16.04 build/ubuntu_18.04 build/ubuntu_20.04 +build/ubuntu: build/ubuntu_18.04 build/ubuntu_20.04 @echo Ubuntu: done -build/ubuntu_14.04: build/ubuntu_14.04_9.6 build/ubuntu_14.04_10 build/ubuntu_14.04_11 build/ubuntu_14.04_12 build/ubuntu_14.04_13 build/ubuntu_14.04_14 - @echo Ubuntu 14.04: done - -build/ubuntu_16.04: build/ubuntu_16.04_9.6 build/ubuntu_16.04_10 build/ubuntu_16.04_11 build/ubuntu_16.04_12 build/ubuntu_16.04_13 build/ubuntu_16.04_14 - @echo Ubuntu 16.04: done - build/ubuntu_18.04: build/ubuntu_18.04_9.6 build/ubuntu_18.04_10 build/ubuntu_18.04_11 build/ubuntu_18.04_12 build/ubuntu_18.04_13 build/ubuntu_18.04_14 @echo Ubuntu 18.04: done @@ -95,7 +86,8 @@ build/centos: build/centos_7 build/centos_8 #build/rpm_repo_package_centos build/centos_7: build/centos_7_9.6 build/centos_7_10 build/centos_7_11 build/centos_7_12 build/centos_7_13 build/centos_7_14 @echo Centos 7: done -build/centos_8: build/centos_8_9.6 build/centos_8_10 build/centos_8_11 build/centos_8_12 build/centos_8_13 build/centos_8_14 +# pgpro-9.6@centos-8 doesn't exist +build/centos_8: build/centos_8_10 build/centos_8_11 build/centos_8_12 build/centos_8_13 build/centos_8_14 #build/centos_8_9.6 @echo Centos 8: done # Oracle Linux @@ -108,7 +100,8 @@ build/oraclelinux_6: build/oraclelinux_6_9.6 build/oraclelinux_6_10 build/oracle build/oraclelinux_7: build/oraclelinux_7_9.6 build/oraclelinux_7_10 build/oraclelinux_7_11 build/oraclelinux_7_12 build/oraclelinux_7_13 build/oraclelinux_7_14 @echo Oraclelinux 7: done -build/oraclelinux_8: build/oraclelinux_8_9.6 build/oraclelinux_8_10 build/oraclelinux_8_11 build/oraclelinux_8_12 build/oraclelinux_8_13 build/oraclelinux_8_14 +# pgpro-9.6@oraclelinux-8 doesn't exist +build/oraclelinux_8: build/oraclelinux_8_10 build/oraclelinux_8_11 build/oraclelinux_8_12 build/oraclelinux_8_13 build/oraclelinux_8_14 #build/oraclelinux_8_9.6 @echo Oraclelinux 8: done # RHEL @@ -170,7 +163,8 @@ include packaging/pkg/Makefile.alt build/suse: build/suse_15.1 build/suse_15.2 @echo Suse: done -build/suse_15.1: build/suse_15.1_9.6 build/suse_15.1_10 build/suse_15.1_11 build/suse_15.1_12 build/suse_15.1_13 build/suse_15.1_14 +# there is no PG-14 in suse-15.1 repositories (test fails) +build/suse_15.1: build/suse_15.1_9.6 build/suse_15.1_10 build/suse_15.1_11 build/suse_15.1_12 build/suse_15.1_13 @echo Rhel 15.1: done build/suse_15.2: build/suse_15.2_9.6 build/suse_15.2_10 build/suse_15.2_11 build/suse_15.2_12 build/suse_15.2_13 build/suse_15.2_14 diff --git a/packaging/Makefile.repo b/packaging/Makefile.repo index 8186bfd80..10fb27137 100644 --- a/packaging/Makefile.repo +++ b/packaging/Makefile.repo @@ -1,15 +1,11 @@ #### REPO BUILD #### -repo: check_env repo/debian repo/ubuntu repo/centos repo/oraclelinux repo/alt repo/suse repo_finish #repo/rhel +repo: check_env repo/debian repo/ubuntu repo/centos repo/oraclelinux repo/rhel repo/alt repo/suse repo_finish @echo Build repo for all platform: done # Debian -repo/debian: build/repo_debian_8 build/repo_debian_9 build/repo_debian_10 build/repo_debian_11 +repo/debian: build/repo_debian_9 build/repo_debian_10 build/repo_debian_11 @echo Build repo for debian platforms: done -build/repo_debian_8: - $(call build_repo_deb,debian,8,jessie) - touch build/repo_debian_8 - build/repo_debian_9: $(call build_repo_deb,debian,9,stretch) touch build/repo_debian_9 @@ -23,17 +19,9 @@ build/repo_debian_11: touch build/repo_debian_11 # Ubuntu -repo/ubuntu: build/repo_ubuntu_14.04 build/repo_ubuntu_16.04 build/repo_ubuntu_18.04 build/repo_ubuntu_20.04 +repo/ubuntu: build/repo_ubuntu_18.04 build/repo_ubuntu_20.04 @echo Build repo for ubuntu platforms: done -build/repo_ubuntu_14.04: - $(call build_repo_deb,ubuntu,14.04,trusty) - touch build/repo_ubuntu_14.04 - -build/repo_ubuntu_16.04: - $(call build_repo_deb,ubuntu,16.04,xenial) - touch build/repo_ubuntu_16.04 - build/repo_ubuntu_18.04: $(call build_repo_deb,ubuntu,18.04,bionic) touch build/repo_ubuntu_18.04 diff --git a/packaging/Makefile.test b/packaging/Makefile.test index 21e850ccd..f5e004f01 100644 --- a/packaging/Makefile.test +++ b/packaging/Makefile.test @@ -9,33 +9,30 @@ endif test: build/test_all @echo Test for all platform: done -build/test_all: build/test_debian build/test_ubuntu build/test_centos build/test_oraclelinux build/test_alt build/test_suse # build/test_rhel +build/test_all: build/test_debian build/test_ubuntu build/test_centos build/test_oraclelinux build/test_alt build/test_suse #build/test_rhel @echo Package testing is done ### DEBIAN -build/test_debian: build/test_debian_9 build/test_debian_10 #build/test_debian_11 +build/test_debian: build/test_debian_9 build/test_debian_10 build/test_debian_11 @echo Debian: done -build/test_debian_9: build/test_debian_9_9.6 build/test_debian_9_10 build/test_debian_9_11 build/test_debian_9_12 build/test_debian_9_13 +build/test_debian_9: build/test_debian_9_9.6 build/test_debian_9_10 build/test_debian_9_11 build/test_debian_9_12 build/test_debian_9_13 build/test_debian_9_14 @echo Debian 9: done -build/test_debian_10: build/test_debian_10_9.6 build/test_debian_10_10 build/test_debian_10_11 build/test_debian_10_12 build/test_debian_10_13 +build/test_debian_10: build/test_debian_10_9.6 build/test_debian_10_10 build/test_debian_10_11 build/test_debian_10_12 build/test_debian_10_13 build/test_debian_10_14 @echo Debian 10: done -build/test_debian_11: build/test_debian_11_9.6 build/test_debian_11_10 build/test_debian_11_11 build/test_debian_11_12 build/test_debian_11_13 +build/test_debian_11: build/test_debian_11_9.6 build/test_debian_11_10 build/test_debian_11_11 build/test_debian_11_12 build/test_debian_11_13 build/test_debian_11_14 @echo Debian 11: done ### UBUNTU -build/test_ubuntu: build/test_ubuntu_16.04 build/test_ubuntu_18.04 build/test_ubuntu_20.04 +build/test_ubuntu: build/test_ubuntu_18.04 build/test_ubuntu_20.04 @echo Ubuntu: done -build/test_ubuntu_16.04: build/test_ubuntu_16.04_9.6 build/test_ubuntu_16.04_10 build/test_ubuntu_16.04_11 build/test_ubuntu_16.04_12 build/test_ubuntu_16.04_13 - @echo Ubuntu 16.04: done - -build/test_ubuntu_18.04: build/test_ubuntu_18.04_9.6 build/test_ubuntu_18.04_10 build/test_ubuntu_18.04_11 build/test_ubuntu_18.04_12 build/test_ubuntu_18.04_13 +build/test_ubuntu_18.04: build/test_ubuntu_18.04_9.6 build/test_ubuntu_18.04_10 build/test_ubuntu_18.04_11 build/test_ubuntu_18.04_12 build/test_ubuntu_18.04_13 build/test_ubuntu_18.04_14 @echo Ubuntu 18.04: done -build/test_ubuntu_20.04: build/test_ubuntu_20.04_9.6 build/test_ubuntu_20.04_10 build/test_ubuntu_20.04_11 build/test_ubuntu_20.04_12 build/test_ubuntu_20.04_13 +build/test_ubuntu_20.04: build/test_ubuntu_20.04_9.6 build/test_ubuntu_20.04_10 build/test_ubuntu_20.04_11 build/test_ubuntu_20.04_12 build/test_ubuntu_20.04_13 build/test_ubuntu_20.04_14 @echo Ubuntu 20.04: done define test_deb @@ -57,42 +54,32 @@ include packaging/test/Makefile.ubuntu build/test_centos: build/test_centos_7 build/test_centos_8 @echo Centos: done -build/test_centos_7: build/test_centos_7_9.6 build/test_centos_7_10 build/test_centos_7_11 build/test_centos_7_12 build/test_centos_7_13 -# pgpro -#build/test_centos_7: build/test_centos_7_9.6 build/test_centos_7_10 build/test_centos_7_11 build/test_centos_7_12 +build/test_centos_7: build/test_centos_7_9.6 build/test_centos_7_10 build/test_centos_7_11 build/test_centos_7_12 build/test_centos_7_13 #build/test_centos_7_14 @echo Centos 7: done -build/test_centos_8: build/test_centos_8_9.6 build/test_centos_8_10 build/test_centos_8_11 build/test_centos_8_12 build/test_centos_8_13 -# pgpro -#build/test_centos_8: build/test_centos_8_10 build/test_centos_8_11 build/test_centos_8_12 +# pgpro-9.6@centos-8 doesn't exist +build/test_centos_8: build/test_centos_8_10 build/test_centos_8_11 build/test_centos_8_12 build/test_centos_8_13 #build/test_centos_8_14 build/test_centos_8_9.6 @echo Centos 8: done # Oracle Linux build/test_oraclelinux: build/test_oraclelinux_7 build/test_oraclelinux_8 @echo Oraclelinux: done -build/test_oraclelinux_7: build/test_oraclelinux_7_9.6 build/test_oraclelinux_7_10 build/test_oraclelinux_7_11 build/test_oraclelinux_7_12 build/test_oraclelinux_7_13 -# pgpro -#build/test_oraclelinux_7: build/test_oraclelinux_7_9.6 build/test_oraclelinux_7_10 build/test_oraclelinux_7_11 build/test_oraclelinux_7_12 +build/test_oraclelinux_7: build/test_oraclelinux_7_9.6 build/test_oraclelinux_7_10 build/test_oraclelinux_7_11 build/test_oraclelinux_7_12 build/test_oraclelinux_7_13 #build/test_oraclelinux_7_14 @echo Oraclelinux 7: done -build/test_oraclelinux_8: build/test_oraclelinux_8_9.6 build/test_oraclelinux_8_10 build/test_oraclelinux_8_11 build/test_oraclelinux_8_12 build/test_oraclelinux_8_13 -# pgpro -#build/test_oraclelinux_8: build/test_oraclelinux_8_10 build/test_oraclelinux_8_11 build/test_oraclelinux_8_12 +# pgpro-9.6@oraclelinux-8 doesn't exist +build/test_oraclelinux_8: build/test_oraclelinux_8_10 build/test_oraclelinux_8_11 build/test_oraclelinux_8_12 build/test_oraclelinux_8_13 #build/test_oraclelinux_8_14 build/test_oraclelinux_8_9.6 @echo Oraclelinux 8: done # RHEL -build/test_rhel: build/test_rhel_7 build/test_rhel_8 +build/test_rhel: build/test_rhel_7 #build/test_rhel_8 @echo Rhel: done -build/test_rhel_7: build/test_rhel_7_9.5 build/test_rhel_7_9.6 build/test_rhel_7_10 build/test_rhel_7_11 build/test_rhel_7_12 build/test_rhel_7_13 -# pgpro -#build/test_rhel_7: build/test_rhel_7_9.5 build/test_rhel_7_9.6 build/test_rhel_7_10 build/test_rhel_7_11 build/test_rhel_7_12 +build/test_rhel_7: build/test_rhel_7_9.6 build/test_rhel_7_10 build/test_rhel_7_11 build/test_rhel_7_12 build/test_rhel_7_13 #build/test_rhel_7_14 @echo Rhel 7: done -build/test_rhel_8: build/test_rhel_8_9.5 build/test_rhel_8_9.6 build/test_rhel_8_10 build/test_rhel_8_11 build/test_rhel_8_12 build/test_rhel_8_13 -# pgpro -#build/test_rhel_8: build/test_rhel_8_9.5 build/test_rhel_8_9.6 build/test_rhel_8_10 build/test_rhel_8_11 build/test_rhel_8_12 +build/test_rhel_8: build/test_rhel_8_9.6 build/test_rhel_8_10 build/test_rhel_8_11 build/test_rhel_8_12 build/test_rhel_8_13 build/test_rhel_8_14 @echo Rhel 8: done define test_rpm @@ -112,10 +99,16 @@ include packaging/test/Makefile.rhel include packaging/test/Makefile.oraclelinux # Alt Linux -build/test_alt: build/test_alt_9 +build/test_alt: build/test_alt_8 build/test_alt_9 @echo Alt Linux: done -build/test_alt_9: build/test_alt_9_9.6 build/test_alt_9_10 build/test_alt_9_11 build/test_alt_9_12 build/test_alt_9_13 +# nginx@alt7 fall with 'nginx: [alert] sysctl(KERN_RTSIGMAX) failed (1: Operation not permitted)' +# within docker on modern host linux kernels (this nginx build require Linux between 2.2.19 and 2.6.17) + +build/test_alt_8: build/test_alt_8_9.6 build/test_alt_8_10 build/test_alt_8_11 build/test_alt_8_12 build/test_alt_8_13 build/test_alt_8_14 + @echo Alt Linux 8: done + +build/test_alt_9: build/test_alt_9_9.6 build/test_alt_9_10 build/test_alt_9_11 build/test_alt_9_12 build/test_alt_9_13 build/test_alt_9_14 @echo Alt Linux 9: done define test_alt @@ -139,7 +132,7 @@ build/test_suse: build/test_suse_15.1 build/test_suse_15.2 build/test_suse_15.1: build/test_suse_15.1_9.6 build/test_suse_15.1_10 build/test_suse_15.1_11 build/test_suse_15.1_12 build/test_suse_15.1_13 @echo Rhel 15.1: done -build/test_suse_15.2: build/test_suse_15.2_9.6 build/test_suse_15.2_10 build/test_suse_15.2_11 build/test_suse_15.2_12 build/test_suse_15.2_13 +build/test_suse_15.2: build/test_suse_15.2_9.6 build/test_suse_15.2_10 build/test_suse_15.2_11 build/test_suse_15.2_12 build/test_suse_15.2_13 build/test_suse_15.2_14 @echo Rhel 15.1: done define test_suse diff --git a/packaging/Readme.md b/packaging/Readme.md index 6fe38f277..f4437d838 100644 --- a/packaging/Readme.md +++ b/packaging/Readme.md @@ -4,10 +4,10 @@ export PBK_VERSION=2.4.17 export PBK_HASH=57f871accce2604 export PBK_RELEASE=1 export PBK_EDITION=std|ent -make pkg +make --keep-going pkg ``` -To build binaries for PostgresPro Standart or Enterprise, a pgpro.tar.bz2 with latest git tree must be preset in `packaging/pkg/tarballs` directory: +To build binaries for PostgresPro Standard or Enterprise, a pgpro.tar.bz2 with latest git tree must be preset in `packaging/pkg/tarballs` directory: ``` cd packaging/pkg/tarballs git clone pgpro_repo pgpro @@ -19,3 +19,5 @@ Repo must be build using 1 thread (due to debian bullshit): ``` make repo -j1 ``` + + diff --git a/packaging/pkg/Makefile.alt b/packaging/pkg/Makefile.alt index 919d3f58c..28eabf53f 100644 --- a/packaging/pkg/Makefile.alt +++ b/packaging/pkg/Makefile.alt @@ -4,27 +4,27 @@ build/alt_7_9.5: touch build/alt_7_9.5 build/alt_7_9.6: - $(call build_alt,alt,7,,9.6,9.6.23) + $(call build_alt,alt,7,,9.6,9.6.24) touch build/alt_7_9.6 build/alt_7_10: - $(call build_alt,alt,7,,10,10.18) + $(call build_alt,alt,7,,10,10.19) touch build/alt_7_10 build/alt_7_11: - $(call build_alt,alt,7,,11,11.13) + $(call build_alt,alt,7,,11,11.14) touch build/alt_7_11 build/alt_7_12: - $(call build_alt,alt,7,,12,12.8) + $(call build_alt,alt,7,,12,12.9) touch build/alt_7_12 build/alt_7_13: - $(call build_alt,alt,7,,13,13.4) + $(call build_alt,alt,7,,13,13.5) touch build/alt_7_13 build/alt_7_14: - $(call build_alt,alt,7,,14,14.0) + $(call build_alt,alt,7,,14,14.1) touch build/alt_7_14 # ALT 8 @@ -33,27 +33,27 @@ build/alt_8_9.5: touch build/alt_8_9.5 build/alt_8_9.6: - $(call build_alt,alt,8,,9.6,9.6.23) + $(call build_alt,alt,8,,9.6,9.6.24) touch build/alt_8_9.6 build/alt_8_10: - $(call build_alt,alt,8,,10,10.18) + $(call build_alt,alt,8,,10,10.19) touch build/alt_8_10 build/alt_8_11: - $(call build_alt,alt,8,,11,11.13) + $(call build_alt,alt,8,,11,11.14) touch build/alt_8_11 build/alt_8_12: - $(call build_alt,alt,8,,12,12.8) + $(call build_alt,alt,8,,12,12.9) touch build/alt_8_12 build/alt_8_13: - $(call build_alt,alt,8,,13,13.4) + $(call build_alt,alt,8,,13,13.5) touch build/alt_8_13 build/alt_8_14: - $(call build_alt,alt,8,,14,14.0) + $(call build_alt,alt,8,,14,14.1) touch build/alt_8_14 # ALT 9 @@ -62,26 +62,26 @@ build/alt_9_9.5: touch build/alt_9_9.5 build/alt_9_9.6: - $(call build_alt,alt,9,,9.6,9.6.23) + $(call build_alt,alt,9,,9.6,9.6.24) touch build/alt_9_9.6 build/alt_9_10: - $(call build_alt,alt,9,,10,10.18) + $(call build_alt,alt,9,,10,10.19) touch build/alt_9_10 build/alt_9_11: - $(call build_alt,alt,9,,11,11.13) + $(call build_alt,alt,9,,11,11.14) touch build/alt_9_11 build/alt_9_12: - $(call build_alt,alt,9,,12,12.8) + $(call build_alt,alt,9,,12,12.9) touch build/alt_9_12 build/alt_9_13: - $(call build_alt,alt,9,,13,13.4) + $(call build_alt,alt,9,,13,13.5) touch build/alt_9_13 build/alt_9_14: - $(call build_alt,alt,9,,14,14.0) + $(call build_alt,alt,9,,14,14.1) touch build/alt_9_14 diff --git a/packaging/pkg/Makefile.centos b/packaging/pkg/Makefile.centos index 9542a5202..fb537d0a6 100644 --- a/packaging/pkg/Makefile.centos +++ b/packaging/pkg/Makefile.centos @@ -4,27 +4,27 @@ build/centos_7_9.5: touch build/centos_7_9.5 build/centos_7_9.6: - $(call build_rpm,centos,7,,9.6,9.6.23) + $(call build_rpm,centos,7,,9.6,9.6.24) touch build/centos_7_9.6 build/centos_7_10: - $(call build_rpm,centos,7,,10,10.18) + $(call build_rpm,centos,7,,10,10.19) touch build/centos_7_10 build/centos_7_11: - $(call build_rpm,centos,7,,11,11.13) + $(call build_rpm,centos,7,,11,11.14) touch build/centos_7_11 build/centos_7_12: - $(call build_rpm,centos,7,,12,12.8) + $(call build_rpm,centos,7,,12,12.9) touch build/centos_7_12 build/centos_7_13: - $(call build_rpm,centos,7,,13,13.4) + $(call build_rpm,centos,7,,13,13.5) touch build/centos_7_13 build/centos_7_14: - $(call build_rpm,centos,7,,14,14.0) + $(call build_rpm,centos,7,,14,14.1) touch build/centos_7_14 # CENTOS 8 @@ -33,25 +33,25 @@ build/centos_8_9.5: touch build/centos_8_9.5 build/centos_8_9.6: - $(call build_rpm,centos,8,,9.6,9.6.23) + $(call build_rpm,centos,8,,9.6,9.6.24) touch build/centos_8_9.6 build/centos_8_10: - $(call build_rpm,centos,8,,10,10.18) + $(call build_rpm,centos,8,,10,10.19) touch build/centos_8_10 build/centos_8_11: - $(call build_rpm,centos,8,,11,11.13) + $(call build_rpm,centos,8,,11,11.14) touch build/centos_8_11 build/centos_8_12: - $(call build_rpm,centos,8,,12,12.8) + $(call build_rpm,centos,8,,12,12.9) touch build/centos_8_12 build/centos_8_13: - $(call build_rpm,centos,8,,13,13.4) + $(call build_rpm,centos,8,,13,13.5) touch build/centos_8_13 build/centos_8_14: - $(call build_rpm,centos,8,,14,14.0) + $(call build_rpm,centos,8,,14,14.1) touch build/centos_8_14 diff --git a/packaging/pkg/Makefile.debian b/packaging/pkg/Makefile.debian index 7c82a412b..d9c885d3a 100644 --- a/packaging/pkg/Makefile.debian +++ b/packaging/pkg/Makefile.debian @@ -1,59 +1,30 @@ -# DEBIAN 8 -build/debian_8_9.5: - $(call build_deb,debian,8,jessie,9.5,9.5.25) - touch build/debian_8_9.5 - -build/debian_8_9.6: - $(call build_deb,debian,8,jessie,9.6,9.6.23) - touch build/debian_8_9.6 - -build/debian_8_10: - $(call build_deb,debian,8,jessie,10,10.18) - touch build/debian_8_10 - -build/debian_8_11: - $(call build_deb,debian,8,jessie,11,11.13) - touch build/debian_8_11 - -build/debian_8_12: - $(call build_deb,debian,8,jessie,12,12.8) - touch build/debian_8_12 - -build/debian_8_13: - $(call build_deb,debian,8,jessie,13,13.4) - touch build/debian_8_13 - -build/debian_8_14: - $(call build_deb,debian,8,jessie,14,14.0) - touch build/debian_8_14 - # DEBIAN 9 build/debian_9_9.5: $(call build_deb,debian,9,stretch,9.5,9.5.25) touch build/debian_9_9.5 build/debian_9_9.6: - $(call build_deb,debian,9,stretch,9.6,9.6.23) + $(call build_deb,debian,9,stretch,9.6,9.6.24) touch build/debian_9_9.6 build/debian_9_10: - $(call build_deb,debian,9,stretch,10,10.18) + $(call build_deb,debian,9,stretch,10,10.19) touch build/debian_9_10 build/debian_9_11: - $(call build_deb,debian,9,stretch,11,11.13) + $(call build_deb,debian,9,stretch,11,11.14) touch build/debian_9_11 build/debian_9_12: - $(call build_deb,debian,9,stretch,12,12.8) + $(call build_deb,debian,9,stretch,12,12.9) touch build/debian_9_12 build/debian_9_13: - $(call build_deb,debian,9,stretch,13,13.4) + $(call build_deb,debian,9,stretch,13,13.5) touch build/debian_9_13 build/debian_9_14: - $(call build_deb,debian,9,stretch,14,14.0) + $(call build_deb,debian,9,stretch,14,14.1) touch build/debian_9_14 # DEBIAN 10 @@ -62,27 +33,27 @@ build/debian_10_9.5: touch build/debian_10_9.5 build/debian_10_9.6: - $(call build_deb,debian,10,buster,9.6,9.6.23) + $(call build_deb,debian,10,buster,9.6,9.6.24) touch build/debian_10_9.6 build/debian_10_10: - $(call build_deb,debian,10,buster,10,10.18) + $(call build_deb,debian,10,buster,10,10.19) touch build/debian_10_10 build/debian_10_11: - $(call build_deb,debian,10,buster,11,11.13) + $(call build_deb,debian,10,buster,11,11.14) touch build/debian_10_11 build/debian_10_12: - $(call build_deb,debian,10,buster,12,12.8) + $(call build_deb,debian,10,buster,12,12.9) touch build/debian_10_12 build/debian_10_13: - $(call build_deb,debian,10,buster,13,13.4) + $(call build_deb,debian,10,buster,13,13.5) touch build/debian_10_13 build/debian_10_14: - $(call build_deb,debian,10,buster,14,14.0) + $(call build_deb,debian,10,buster,14,14.1) touch build/debian_10_14 # DEBIAN 11 @@ -91,25 +62,25 @@ build/debian_11_9.5: touch build/debian_11_9.5 build/debian_11_9.6: - $(call build_deb,debian,11,bullseye,9.6,9.6.23) + $(call build_deb,debian,11,bullseye,9.6,9.6.24) touch build/debian_11_9.6 build/debian_11_10: - $(call build_deb,debian,11,bullseye,10,10.18) + $(call build_deb,debian,11,bullseye,10,10.19) touch build/debian_11_10 build/debian_11_11: - $(call build_deb,debian,11,bullseye,11,11.13) + $(call build_deb,debian,11,bullseye,11,11.14) touch build/debian_11_11 build/debian_11_12: - $(call build_deb,debian,11,bullseye,12,12.8) + $(call build_deb,debian,11,bullseye,12,12.9) touch build/debian_11_12 build/debian_11_13: - $(call build_deb,debian,11,bullseye,13,13.4) + $(call build_deb,debian,11,bullseye,13,13.5) touch build/debian_11_13 build/debian_11_14: - $(call build_deb,debian,11,bullseye,14,14.0) + $(call build_deb,debian,11,bullseye,14,14.1) touch build/debian_11_14 diff --git a/packaging/pkg/Makefile.oraclelinux b/packaging/pkg/Makefile.oraclelinux index 3dbdbd424..127a578f1 100644 --- a/packaging/pkg/Makefile.oraclelinux +++ b/packaging/pkg/Makefile.oraclelinux @@ -4,27 +4,27 @@ build/oraclelinux_6_9.5: touch build/oraclelinux_6_9.5 build/oraclelinux_6_9.6: - $(call build_rpm,oraclelinux,6,,9.6,9.6.23) + $(call build_rpm,oraclelinux,6,,9.6,9.6.24) touch build/oraclelinux_6_9.6 build/oraclelinux_6_10: - $(call build_rpm,oraclelinux,6,,10,10.18) + $(call build_rpm,oraclelinux,6,,10,10.19) touch build/oraclelinux_6_10 build/oraclelinux_6_11: - $(call build_rpm,oraclelinux,6,,11,11.13) + $(call build_rpm,oraclelinux,6,,11,11.14) touch build/oraclelinux_6_11 build/oraclelinux_6_12: - $(call build_rpm,oraclelinux,6,,12,12.8) + $(call build_rpm,oraclelinux,6,,12,12.9) touch build/oraclelinux_6_12 build/oraclelinux_6_13: - $(call build_rpm,oraclelinux,6,,13,13.4) + $(call build_rpm,oraclelinux,6,,13,13.5) touch build/oraclelinux_6_13 build/oraclelinux_6_14: - $(call build_rpm,oraclelinux,6,,14,14.0) + $(call build_rpm,oraclelinux,6,,14,14.1) touch build/oraclelinux_6_14 # ORACLE LINUX 7 @@ -33,27 +33,27 @@ build/oraclelinux_7_9.5: touch build/oraclelinux_7_9.5 build/oraclelinux_7_9.6: - $(call build_rpm,oraclelinux,7,,9.6,9.6.23) + $(call build_rpm,oraclelinux,7,,9.6,9.6.24) touch build/oraclelinux_7_9.6 build/oraclelinux_7_10: - $(call build_rpm,oraclelinux,7,,10,10.18) + $(call build_rpm,oraclelinux,7,,10,10.19) touch build/oraclelinux_7_10 build/oraclelinux_7_11: - $(call build_rpm,oraclelinux,7,,11,11.13) + $(call build_rpm,oraclelinux,7,,11,11.14) touch build/oraclelinux_7_11 build/oraclelinux_7_12: - $(call build_rpm,oraclelinux,7,,12,12.8) + $(call build_rpm,oraclelinux,7,,12,12.9) touch build/oraclelinux_7_12 build/oraclelinux_7_13: - $(call build_rpm,oraclelinux,7,,13,13.4) + $(call build_rpm,oraclelinux,7,,13,13.5) touch build/oraclelinux_7_13 build/oraclelinux_7_14: - $(call build_rpm,oraclelinux,7,,14,14.0) + $(call build_rpm,oraclelinux,7,,14,14.1) touch build/oraclelinux_7_14 # ORACLE LINUX 8 @@ -62,26 +62,26 @@ build/oraclelinux_8_9.5: touch build/oraclelinux_8_9.5 build/oraclelinux_8_9.6: - $(call build_rpm,oraclelinux,8,,9.6,9.6.23) + $(call build_rpm,oraclelinux,8,,9.6,9.6.24) touch build/oraclelinux_8_9.6 build/oraclelinux_8_10: - $(call build_rpm,oraclelinux,8,,10,10.18) + $(call build_rpm,oraclelinux,8,,10,10.19) touch build/oraclelinux_8_10 build/oraclelinux_8_11: - $(call build_rpm,oraclelinux,8,,11,11.13) + $(call build_rpm,oraclelinux,8,,11,11.14) touch build/oraclelinux_8_11 build/oraclelinux_8_12: - $(call build_rpm,oraclelinux,8,,12,12.8) + $(call build_rpm,oraclelinux,8,,12,12.9) touch build/oraclelinux_8_12 build/oraclelinux_8_13: - $(call build_rpm,oraclelinux,8,,13,13.4) + $(call build_rpm,oraclelinux,8,,13,13.5) touch build/oraclelinux_8_13 build/oraclelinux_8_14: - $(call build_rpm,oraclelinux,8,,14,14.0) + $(call build_rpm,oraclelinux,8,,14,14.1) touch build/oraclelinux_8_14 diff --git a/packaging/pkg/Makefile.rhel b/packaging/pkg/Makefile.rhel index b604a990d..8c1b0687b 100644 --- a/packaging/pkg/Makefile.rhel +++ b/packaging/pkg/Makefile.rhel @@ -4,27 +4,27 @@ build/rhel_7_9.5: touch build/rhel_7_9.5 build/rhel_7_9.6: - $(call build_rpm,rhel,7,7Server,9.6,9.6.23) + $(call build_rpm,rhel,7,7Server,9.6,9.6.24) touch build/rhel_7_9.6 build/rhel_7_10: - $(call build_rpm,rhel,7,7Server,10,10.18) + $(call build_rpm,rhel,7,7Server,10,10.19) touch build/rhel_7_10 build/rhel_7_11: - $(call build_rpm,rhel,7,7Server,11,11.13) + $(call build_rpm,rhel,7,7Server,11,11.14) touch build/rhel_7_11 build/rhel_7_12: - $(call build_rpm,rhel,7,7Server,12,12.8) + $(call build_rpm,rhel,7,7Server,12,12.9) touch build/rhel_7_12 build/rhel_7_13: - $(call build_rpm,rhel,7,7Server,13,13.4) + $(call build_rpm,rhel,7,7Server,13,13.5) touch build/rhel_7_13 build/rhel_7_14: - $(call build_rpm,rhel,7,7Server,14,14.0) + $(call build_rpm,rhel,7,7Server,14,14.1) touch build/rhel_7_14 # RHEL 8 @@ -33,25 +33,25 @@ build/rhel_8_9.5: touch build/rhel_8_9.5 build/rhel_8_9.6: - $(call build_rpm,rhel,8,8Server,9.6,9.6.23) + $(call build_rpm,rhel,8,8Server,9.6,9.6.24) touch build/rhel_8_9.6 build/rhel_8_10: - $(call build_rpm,rhel,8,8Server,10,10.18) + $(call build_rpm,rhel,8,8Server,10,10.19) touch build/rhel_8_10 build/rhel_8_11: - $(call build_rpm,rhel,8,8Server,11,11.13) + $(call build_rpm,rhel,8,8Server,11,11.14) touch build/rhel_8_11 build/rhel_8_12: - $(call build_rpm,rhel,8,8Server,12,12.8) + $(call build_rpm,rhel,8,8Server,12,12.9) touch build/rhel_8_12 build/rhel_8_13: - $(call build_rpm,rhel,8,8Server,13,13.4) + $(call build_rpm,rhel,8,8Server,13,13.5) touch build/rhel_8_13 build/rhel_8_14: - $(call build_rpm,rhel,8,8Server,14,14.0) + $(call build_rpm,rhel,8,8Server,14,14.1) touch build/rhel_8_14 diff --git a/packaging/pkg/Makefile.suse b/packaging/pkg/Makefile.suse index 5af22c5d0..c71ebd389 100644 --- a/packaging/pkg/Makefile.suse +++ b/packaging/pkg/Makefile.suse @@ -4,27 +4,27 @@ build/suse_15.1_9.5: touch build/suse_15.1_9.5 build/suse_15.1_9.6: - $(call build_suse,suse,15.1,,9.6,9.6.23) + $(call build_suse,suse,15.1,,9.6,9.6.24) touch build/suse_15.1_9.6 build/suse_15.1_10: - $(call build_suse,suse,15.1,,10,10.18) + $(call build_suse,suse,15.1,,10,10.19) touch build/suse_15.1_10 build/suse_15.1_11: - $(call build_suse,suse,15.1,,11,11.13) + $(call build_suse,suse,15.1,,11,11.14) touch build/suse_15.1_11 build/suse_15.1_12: - $(call build_suse,suse,15.1,,12,12.8) + $(call build_suse,suse,15.1,,12,12.9) touch build/suse_15.1_12 build/suse_15.1_13: - $(call build_suse,suse,15.1,,13,13.4) + $(call build_suse,suse,15.1,,13,13.5) touch build/suse_15.1_13 build/suse_15.1_14: - $(call build_suse,suse,15.1,,14,14.0) + $(call build_suse,suse,15.1,,14,14.1) touch build/suse_15.1_14 # Suse 15.2 @@ -33,25 +33,25 @@ build/suse_15.2_9.5: touch build/suse_15.2_9.5 build/suse_15.2_9.6: - $(call build_suse,suse,15.2,,9.6,9.6.23) + $(call build_suse,suse,15.2,,9.6,9.6.24) touch build/suse_15.2_9.6 build/suse_15.2_10: - $(call build_suse,suse,15.2,,10,10.18) + $(call build_suse,suse,15.2,,10,10.19) touch build/suse_15.2_10 build/suse_15.2_11: - $(call build_suse,suse,15.2,,11,11.13) + $(call build_suse,suse,15.2,,11,11.14) touch build/suse_15.2_11 build/suse_15.2_12: - $(call build_suse,suse,15.2,,12,12.8) + $(call build_suse,suse,15.2,,12,12.9) touch build/suse_15.2_12 build/suse_15.2_13: - $(call build_suse,suse,15.2,,13,13.4) + $(call build_suse,suse,15.2,,13,13.5) touch build/suse_15.2_13 build/suse_15.2_14: - $(call build_suse,suse,15.2,,14,14.0) + $(call build_suse,suse,15.2,,14,14.1) touch build/suse_15.2_14 diff --git a/packaging/pkg/Makefile.ubuntu b/packaging/pkg/Makefile.ubuntu index 88803c64f..02acd6c67 100644 --- a/packaging/pkg/Makefile.ubuntu +++ b/packaging/pkg/Makefile.ubuntu @@ -4,27 +4,27 @@ build/ubuntu_20.04_9.5: touch build/ubuntu_20.04_9.5 build/ubuntu_20.04_9.6: - $(call build_deb,ubuntu,20.04,focal,9.6,9.6.23) + $(call build_deb,ubuntu,20.04,focal,9.6,9.6.24) touch build/ubuntu_20.04_9.6 build/ubuntu_20.04_10: - $(call build_deb,ubuntu,20.04,focal,10,10.18) + $(call build_deb,ubuntu,20.04,focal,10,10.19) touch build/ubuntu_20.04_10 build/ubuntu_20.04_11: - $(call build_deb,ubuntu,20.04,focal,11,11.13) + $(call build_deb,ubuntu,20.04,focal,11,11.14) touch build/ubuntu_20.04_11 build/ubuntu_20.04_12: - $(call build_deb,ubuntu,20.04,focal,12,12.8) + $(call build_deb,ubuntu,20.04,focal,12,12.9) touch build/ubuntu_20.04_12 build/ubuntu_20.04_13: - $(call build_deb,ubuntu,20.04,focal,13,13.4) + $(call build_deb,ubuntu,20.04,focal,13,13.5) touch build/ubuntu_20.04_13 build/ubuntu_20.04_14: - $(call build_deb,ubuntu,20.04,focal,14,14.0) + $(call build_deb,ubuntu,20.04,focal,14,14.1) touch build/ubuntu_20.04_14 # UBUNTU 18.04 @@ -33,84 +33,26 @@ build/ubuntu_18.04_9.5: touch build/ubuntu_18.04_9.5 build/ubuntu_18.04_9.6: - $(call build_deb,ubuntu,18.04,bionic,9.6,9.6.23) + $(call build_deb,ubuntu,18.04,bionic,9.6,9.6.24) touch build/ubuntu_18.04_9.6 build/ubuntu_18.04_10: - $(call build_deb,ubuntu,18.04,bionic,10,10.18) + $(call build_deb,ubuntu,18.04,bionic,10,10.19) touch build/ubuntu_18.04_10 build/ubuntu_18.04_11: - $(call build_deb,ubuntu,18.04,bionic,11,11.13) + $(call build_deb,ubuntu,18.04,bionic,11,11.14) touch build/ubuntu_18.04_11 build/ubuntu_18.04_12: - $(call build_deb,ubuntu,18.04,bionic,12,12.8) + $(call build_deb,ubuntu,18.04,bionic,12,12.9) touch build/ubuntu_18.04_12 build/ubuntu_18.04_13: - $(call build_deb,ubuntu,18.04,bionic,13,13.4) + $(call build_deb,ubuntu,18.04,bionic,13,13.5) touch build/ubuntu_18.04_13 build/ubuntu_18.04_14: - $(call build_deb,ubuntu,18.04,bionic,14,14.0) + $(call build_deb,ubuntu,18.04,bionic,14,14.1) touch build/ubuntu_18.04_14 -# UBUNTU 16.04 -build/ubuntu_16.04_9.5: - $(call build_deb,ubuntu,16.04,xenial,9.5,9.5.25) - touch build/ubuntu_16.04_9.5 - -build/ubuntu_16.04_9.6: - $(call build_deb,ubuntu,16.04,xenial,9.6,9.6.23) - touch build/ubuntu_16.04_9.6 - -build/ubuntu_16.04_10: - $(call build_deb,ubuntu,16.04,xenial,10,10.18) - touch build/ubuntu_16.04_10 - -build/ubuntu_16.04_11: - $(call build_deb,ubuntu,16.04,xenial,11,11.13) - touch build/ubuntu_16.04_11 - -build/ubuntu_16.04_12: - $(call build_deb,ubuntu,16.04,xenial,12,12.8) - touch build/ubuntu_16.04_12 - -build/ubuntu_16.04_13: - $(call build_deb,ubuntu,16.04,xenial,13,13.4) - touch build/ubuntu_16.04_13 - -build/ubuntu_16.04_14: - $(call build_deb,ubuntu,16.04,xenial,14,14.0) - touch build/ubuntu_16.04_14 - - -# UBUNTU 14.04 -build/ubuntu_14.04_9.5: - $(call build_deb,ubuntu,14.04,trusty,9.5,9.5.25) - touch build/ubuntu_14.04_9.5 - -build/ubuntu_14.04_9.6: - $(call build_deb,ubuntu,14.04,trusty,9.6,9.6.23) - touch build/ubuntu_14.04_9.6 - -build/ubuntu_14.04_10: - $(call build_deb,ubuntu,14.04,trusty,10,10.18) - touch build/ubuntu_14.04_10 - -build/ubuntu_14.04_11: - $(call build_deb,ubuntu,14.04,trusty,11,11.13) - touch build/ubuntu_14.04_11 - -build/ubuntu_14.04_12: - $(call build_deb,ubuntu,14.04,trusty,12,12.8) - touch build/ubuntu_14.04_12 - -build/ubuntu_14.04_13: - $(call build_deb,ubuntu,14.04,trusty,13,13.4) - touch build/ubuntu_14.04_13 - -build/ubuntu_14.04_14: - $(call build_deb,ubuntu,14.04,trusty,14,14.0) - touch build/ubuntu_14.04_14 diff --git a/packaging/pkg/scripts/alt.sh b/packaging/pkg/scripts/alt.sh index ae3c713fa..7c3971d6a 100755 --- a/packaging/pkg/scripts/alt.sh +++ b/packaging/pkg/scripts/alt.sh @@ -49,11 +49,11 @@ else cd /root/rpmbuild/SOURCES/pgpro PGPRO_TOC=$(echo ${PG_FULL_VERSION} | sed 's|\.|_|g') - if [[ ${PBK_EDITION} == 'std' ]] ; then - git checkout "PGPRO${PGPRO_TOC}_1" - else - git checkout "PGPROEE${PGPRO_TOC}_1" - fi + if [[ ${PBK_EDITION} == 'std' ]] ; then + git checkout "PGPRO${PGPRO_TOC}_1" + else + git checkout "PGPROEE${PGPRO_TOC}_1" + fi rm -rf .git cd /root/rpmbuild/SOURCES/ @@ -86,7 +86,7 @@ else sed -i "s/@PG_FULL_VERSION@/${PG_FULL_VERSION}/" pg_probackup.alt.forks.spec if [ ${PG_VERSION} != '9.6' ]; then - sed -i "s|@PREFIX@|/opt/pgpro/${EDITION}-${PG_VERSION}|g" pg_probackup.alt.forks.spec + sed -i "s|@PREFIX@|/opt/pgpro/${EDITION}-${PG_VERSION}|g" pg_probackup.alt.forks.spec fi fi @@ -106,7 +106,6 @@ fi apt-get install -y flex libldap-devel libpam-devel libreadline-devel libssl-devel if [[ ${PBK_EDITION} == '' ]] ; then - # build pg_probackup rpmbuild -bs pg_probackup.alt.spec rpmbuild -ba pg_probackup.alt.spec #2>&1 | tee -ai /app/out/build.log diff --git a/packaging/pkg/scripts/rpm.sh b/packaging/pkg/scripts/rpm.sh index ffd681b75..d03915c20 100755 --- a/packaging/pkg/scripts/rpm.sh +++ b/packaging/pkg/scripts/rpm.sh @@ -1,7 +1,7 @@ #!/usr/bin/env bash # Copyright Notice: -# © (C) Postgres Professional 2015-2016 http://www.postgrespro.ru/ +# © (C) Postgres Professional 2015-2021 http://www.postgrespro.ru/ # Distributed under Apache License 2.0 # Распространяется по лицензии Apache 2.0 @@ -18,18 +18,16 @@ set -o pipefail # fix https://github.com/moby/moby/issues/23137 ulimit -n 1024 -# THere is no std/ent packages for PG 9.5 -if [[ ${PG_VERSION} == '9.5' ]] && [[ ${PBK_EDITION} != '' ]] ; then - exit 0 -fi - -if [ -f /etc/centos-release ] ; then +if [ ${DISTRIB} = 'centos' ] ; then sed -i 's|^baseurl=http://|baseurl=https://|g' /etc/yum.repos.d/*.repo yum update -y fi # PACKAGES NEEDED yum install -y git wget bzip2 rpm-build +if [ ${DISTRIB} = 'oraclelinux' -a ${DISTRIB_VERSION} = '8' -a -n ${PBK_EDITION} ] ; then + yum install -y bison flex +fi mkdir /root/build cd /root/build diff --git a/packaging/pkg/specs/rpm/rpmbuild/SOURCES/pg_probackup-forks.repo b/packaging/pkg/specs/rpm/rpmbuild/SOURCES/pg_probackup-forks.repo index fcef58a9c..d26b058cd 100644 --- a/packaging/pkg/specs/rpm/rpmbuild/SOURCES/pg_probackup-forks.repo +++ b/packaging/pkg/specs/rpm/rpmbuild/SOURCES/pg_probackup-forks.repo @@ -1,5 +1,5 @@ [pg_probackup-forks] -name=PG_PROBACKUP @SHORT_CODENAME@ packages for PostgresPro Standart and Enterprise - $basearch +name=PG_PROBACKUP @SHORT_CODENAME@ packages for PostgresPro Standard and Enterprise - $basearch baseurl=https://repo.postgrespro.ru/pg_probackup-forks/rpm/latest/@DISTRIB@-$releasever-$basearch enabled=1 gpgcheck=1 diff --git a/packaging/pkg/specs/rpm/rpmbuild/SPECS/pg_probackup-repo-forks.spec b/packaging/pkg/specs/rpm/rpmbuild/SPECS/pg_probackup-repo-forks.spec index fd4a99f2c..47adb250f 100644 --- a/packaging/pkg/specs/rpm/rpmbuild/SPECS/pg_probackup-repo-forks.spec +++ b/packaging/pkg/specs/rpm/rpmbuild/SPECS/pg_probackup-repo-forks.spec @@ -16,7 +16,7 @@ BuildArch: noarch %description This package contains yum configuration for @SHORT_CODENAME@, and also the GPG key -for pg_probackup RPMs for PostgresPro Standart and Enterprise. +for pg_probackup RPMs for PostgresPro Standard and Enterprise. %prep %setup -q -c -T diff --git a/packaging/repo/reprepro-conf/changelog.script b/packaging/repo/reprepro-conf/changelog.script new file mode 100755 index 000000000..4ff1f1787 --- /dev/null +++ b/packaging/repo/reprepro-conf/changelog.script @@ -0,0 +1,246 @@ +#!/bin/sh +# This is an example script that can be hooked into reprepro +# to either generate a hierachy like packages.debian.org/changelogs/ +# or to generate changelog files in the "third party sites" +# location apt-get changelogs looks if it is not found in +# Apt::Changelogs::Server. +# +# All you have to do is to: +# - copy it into you conf/ directory, +# - if you want "third party site" style changelogs, edit the +# CHANGELOGDIR variable below, +# and +# - add the following to any distribution in conf/distributions +# you want to have changelogs and copyright files extracted: +#Log: +# --type=dsc changelogs.example +# (note the space at the beginning of the second line). +# This will cause this script to extract changelogs for all +# newly added source packages. (To generate them for already +# existing packages, call "reprepro rerunnotifiers"). + +# DEPENDENCIES: dpkg >= 1.13.9 + +if test "x${REPREPRO_OUT_DIR:+set}" = xset ; then + # Note: due to cd, REPREPRO_*_DIR will no longer + # be usable. And only things relative to outdir will work... + cd "${REPREPRO_OUT_DIR}" || exit 1 +else + # this will also trigger if reprepro < 3.5.1 is used, + # in that case replace this with a manual cd to the + # correct directory... + cat "changelog.example needs to be run by reprepro!" >&2 + exit 1 +fi + +# CHANGELOGDIR set means generate full hierachy +# (clients need to set Apt::Changelogs::Server to use that) +#CHANGELOGDIR=changelogs + +# CHANGELOGDIR empty means generate changelog (and only changelog) files +# in the new "third party site" place apt-get changelog is using as fallback: +#CHANGELOGDIR= + +# Set to avoid using some predefined TMPDIR or even /tmp as +# tempdir: + +# TMPDIR=/var/cache/whateveryoucreated + +if test -z "$CHANGELOGDIR" ; then +addsource() { + DSCFILE="$1" + CANONDSCFILE="$(readlink --canonicalize "$DSCFILE")" + CHANGELOGFILE="${DSCFILE%.dsc}.changelog" + BASEDIR="$(dirname "$CHANGELOGFILE")" + if ! [ -f "$CHANGELOGFILE" ] ; then + EXTRACTDIR="$(mktemp -d)" + (cd -- "$EXTRACTDIR" && dpkg-source --no-copy -x "$CANONDSCFILE" > /dev/null) + install --mode=644 -- "$EXTRACTDIR"/*/debian/changelog "$CHANGELOGFILE" + chmod -R u+rwX -- "$EXTRACTDIR" + rm -r -- "$EXTRACTDIR" + fi + if [ -L "$BASEDIR"/current."$CODENAME" ] ; then + # should not be there, just to be sure + rm -f -- "$BASEDIR"/current."$CODENAME" + fi + # mark this as needed by this distribution + ln -s -- "$(basename "$CHANGELOGFILE")" "$BASEDIR/current.$CODENAME" + JUSTADDED="$CHANGELOGFILE" +} +delsource() { + DSCFILE="$1" + CHANGELOGFILE="${DSCFILE%.dsc}.changelog" + BASEDIR="$(dirname "$CHANGELOGFILE")" + BASENAME="$(basename "$CHANGELOGFILE")" + if [ "x$JUSTADDED" = "x$CHANGELOGFILE" ] ; then + exit 0 + fi +# echo "delete, basedir=$BASEDIR changelog=$CHANGELOGFILE, dscfile=$DSCFILE, " + if [ "x$(readlink "$BASEDIR/current.$CODENAME")" = "x$BASENAME" ] ; then + rm -- "$BASEDIR/current.$CODENAME" + fi + NEEDED=0 + for c in "$BASEDIR"/current.* ; do + if [ "x$(readlink -- "$c")" = "x$BASENAME" ] ; then + NEEDED=1 + fi + done + if [ "$NEEDED" -eq 0 -a -f "$CHANGELOGFILE" ] ; then + rm -r -- "$CHANGELOGFILE" + # to remove the directory if now empty + rmdir --ignore-fail-on-non-empty -- "$BASEDIR" + fi +} + +else # "$CHANGELOGDIR" set: + +addsource() { + DSCFILE="$1" + CANONDSCFILE="$(readlink --canonicalize "$DSCFILE")" + TARGETDIR="${CHANGELOGDIR}/${DSCFILE%.dsc}" + SUBDIR="$(basename $TARGETDIR)" + BASEDIR="$(dirname $TARGETDIR)" + if ! [ -d "$TARGETDIR" ] ; then + echo "extract $CANONDSCFILE information to $TARGETDIR" + mkdir -p -- "$TARGETDIR" + EXTRACTDIR="$(mktemp -d)" + (cd -- "$EXTRACTDIR" && dpkg-source --no-copy -x "$CANONDSCFILE" > /dev/null) + install --mode=644 -- "$EXTRACTDIR"/*/debian/copyright "$TARGETDIR/copyright" + install --mode=644 -- "$EXTRACTDIR"/*/debian/changelog "$TARGETDIR/changelog" + chmod -R u+rwX -- "$EXTRACTDIR" + rm -r -- "$EXTRACTDIR" + fi + if [ -L "$BASEDIR"/current."$CODENAME" ] ; then + # should not be there, just to be sure + rm -f -- "$BASEDIR"/current."$CODENAME" + fi + # mark this as needed by this distribution + ln -s -- "$SUBDIR" "$BASEDIR/current.$CODENAME" + JUSTADDED="$TARGETDIR" +} +delsource() { + DSCFILE="$1" + TARGETDIR="${CHANGELOGDIR}/${DSCFILE%.dsc}" + SUBDIR="$(basename $TARGETDIR)" + BASEDIR="$(dirname $TARGETDIR)" + if [ "x$JUSTADDED" = "x$TARGETDIR" ] ; then + exit 0 + fi +# echo "delete, basedir=$BASEDIR targetdir=$TARGETDIR, dscfile=$DSCFILE, " + if [ "x$(readlink "$BASEDIR/current.$CODENAME")" = "x$SUBDIR" ] ; then + rm -- "$BASEDIR/current.$CODENAME" + fi + NEEDED=0 + for c in "$BASEDIR"/current.* ; do + if [ "x$(readlink -- "$c")" = "x$SUBDIR" ] ; then + NEEDED=1 + fi + done + if [ "$NEEDED" -eq 0 -a -d "$TARGETDIR" ] ; then + rm -r -- "$TARGETDIR" + # to remove the directory if now empty + rmdir --ignore-fail-on-non-empty -- "$BASEDIR" + fi +} +fi # CHANGELOGDIR + +ACTION="$1" +CODENAME="$2" +PACKAGETYPE="$3" +if [ "x$PACKAGETYPE" != "xdsc" ] ; then +# the --type=dsc should cause this to never happen, but better safe than sorry. + exit 1 +fi +COMPONENT="$4" +ARCHITECTURE="$5" +if [ "x$ARCHITECTURE" != "xsource" ] ; then + exit 1 +fi +NAME="$6" +shift 6 +JUSTADDED="" +if [ "x$ACTION" = "xadd" -o "x$ACTION" = "xinfo" ] ; then + VERSION="$1" + shift + if [ "x$1" != "x--" ] ; then + exit 2 + fi + shift + while [ "$#" -gt 0 ] ; do + case "$1" in + *.dsc) + addsource "$1" + ;; + --) + exit 2 + ;; + esac + shift + done +elif [ "x$ACTION" = "xremove" ] ; then + OLDVERSION="$1" + shift + if [ "x$1" != "x--" ] ; then + exit 2 + fi + shift + while [ "$#" -gt 0 ] ; do + case "$1" in + *.dsc) + delsource "$1" + ;; + --) + exit 2 + ;; + esac + shift + done +elif [ "x$ACTION" = "xreplace" ] ; then + VERSION="$1" + shift + OLDVERSION="$1" + shift + if [ "x$1" != "x--" ] ; then + exit 2 + fi + shift + while [ "$#" -gt 0 -a "x$1" != "x--" ] ; do + case "$1" in + *.dsc) + addsource "$1" + ;; + esac + shift + done + if [ "x$1" != "x--" ] ; then + exit 2 + fi + shift + while [ "$#" -gt 0 ] ; do + case "$1" in + *.dsc) + delsource "$1" + ;; + --) + exit 2 + ;; + esac + shift + done +fi + +exit 0 +# Copyright 2007,2008,2012 Bernhard R. Link +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA \ No newline at end of file diff --git a/packaging/repo/reprepro-conf/distributions b/packaging/repo/reprepro-conf/distributions new file mode 100644 index 000000000..7dce7e6d0 --- /dev/null +++ b/packaging/repo/reprepro-conf/distributions @@ -0,0 +1,179 @@ +Origin: repo.postgrespro.ru +Label: PostgreSQL backup utility pg_probackup +Codename: squeeze +Architectures: amd64 i386 source +Components: main-squeeze +Description: PostgresPro pg_probackup repo +SignWith: yes +Log: + --type=dsc changelog.script + +Origin: repo.postgrespro.ru +Label: PostgreSQL backup utility pg_probackup +Codename: wheezy +Architectures: amd64 i386 source +Components: main-wheezy +Description: PostgresPro pg_probackup repo +SignWith: yes +Log: + --type=dsc changelog.script + +Origin: repo.postgrespro.ru +Label: PostgreSQL backup utility pg_probackup +Codename: jessie +Architectures: amd64 i386 source +Components: main-jessie +Description: PostgresPro pg_probackup repo +SignWith: yes +Log: + --type=dsc changelog.script + +Origin: repo.postgrespro.ru +Label: PostgreSQL backup utility pg_probackup +Codename: bullseye +Architectures: amd64 i386 source +Components: main-bullseye +Description: PostgresPro pg_probackup repo +SignWith: yes +Log: + --type=dsc changelog.script + +Origin: repo.postgrespro.ru +Label: PostgreSQL backup utility pg_probackup +Codename: wily +Architectures: amd64 i386 source +Components: main-wily +Description: PostgresPro pg_probackup repo +SignWith: yes +Log: + --type=dsc changelog.script + +Origin: repo.postgrespro.ru +Label: PostgreSQL backup utility pg_probackup +Codename: precise +Architectures: amd64 i386 source +Components: main-precise +Description: PostgresPro pg_probackup repo +SignWith: yes +Log: + --type=dsc changelog.script + +Origin: repo.postgrespro.ru +Label: PostgreSQL backup utility pg_probackup +Codename: vivid +Architectures: amd64 i386 source +Components: main-vivid +Description: PostgresPro pg_probackup repo +SignWith: yes +Log: + --type=dsc changelog.script + +Origin: repo.postgrespro.ru +Label: PostgreSQL backup utility pg_probackup +Codename: trusty +Architectures: amd64 i386 source +Components: main-trusty +Description: PostgresPro pg_probackup repo +SignWith: yes +Log: + --type=dsc changelog.script + +Origin: repo.postgrespro.ru +Label: PostgreSQL backup utility pg_probackup +Codename: lucid +Architectures: amd64 i386 source +Components: main-lucid +Description: PostgresPro pg_probackup repo +SignWith: yes +Log: + --type=dsc changelog.script + +Origin: repo.postgrespro.ru +Label: PostgreSQL backup utility pg_probackup +Codename: cosmic +Architectures: amd64 i386 source +Components: main-cosmic +Description: PostgresPro pg_probackup repo +SignWith: yes +Log: + --type=dsc changelog.script + +Origin: repo.postgrespro.ru +Label: PostgreSQL backup utility pg_probackup +Codename: xenial +Architectures: amd64 i386 source +Components: main-xenial +Description: PostgresPro pg_probackup repo +SignWith: yes +Log: + --type=dsc changelog.script + +Origin: repo.postgrespro.ru +Label: PostgreSQL backup utility pg_probackup +Codename: yakkety +Architectures: amd64 i386 source +Components: main-yakkety +Description: PostgresPro pg_probackup repo +SignWith: yes +Log: + --type=dsc changelog.script + +Origin: repo.postgrespro.ru +Label: PostgreSQL backup utility pg_probackup +Codename: zesty +Architectures: amd64 i386 source +Components: main-zesty +Description: PostgresPro pg_probackup repo +SignWith: yes +Log: + --type=dsc changelog.script + +Origin: repo.postgrespro.ru +Label: PostgreSQL backup utility pg_probackup +Codename: stretch +Architectures: amd64 i386 source +Components: main-stretch +Description: PostgresPro pg_probackup repo +SignWith: yes +Log: + --type=dsc changelog.script + +Origin: repo.postgrespro.ru +Label: PostgreSQL backup utility pg_probackup +Codename: buster +Architectures: amd64 i386 source +Components: main-buster +Description: PostgresPro pg_probackup repo +SignWith: yes +Log: + --type=dsc changelog.script + +Origin: repo.postgrespro.ru +Label: PostgreSQL backup utility pg_probackup +Codename: artful +Architectures: amd64 i386 source +Components: main-artful +Description: PostgresPro pg_probackup repo +SignWith: yes +Log: + --type=dsc changelog.script + +Origin: repo.postgrespro.ru +Label: PostgreSQL backup utility pg_probackup +Codename: bionic +Architectures: amd64 i386 source +Components: main-bionic +Description: PostgresPro pg_probackup repo +SignWith: yes +Log: + --type=dsc changelog.script + +Origin: repo.postgrespro.ru +Label: PostgreSQL backup utility pg_probackup +Codename: focal +Architectures: amd64 i386 source +Components: main-focal +Description: PostgresPro pg_probackup repo +SignWith: yes +Log: + --type=dsc changelog.script diff --git a/packaging/repo/rpm-conf/rpmmacros b/packaging/repo/rpm-conf/rpmmacros new file mode 100644 index 000000000..e00a76d68 --- /dev/null +++ b/packaging/repo/rpm-conf/rpmmacros @@ -0,0 +1,5 @@ +%_signature gpg +%_gpg_path /root/.gnupg +%_gpg_name PostgreSQL Professional +%_gpgbin /usr/bin/gpg +%_gpg_check_password_cmd /bin/true diff --git a/packaging/repo/scripts/deb.sh b/packaging/repo/scripts/deb.sh index 6515e6b42..31416972d 100755 --- a/packaging/repo/scripts/deb.sh +++ b/packaging/repo/scripts/deb.sh @@ -20,7 +20,7 @@ cd $INPUT_DIR export DEB_DIR=$OUT_DIR/deb export KEYS_DIR=$OUT_DIR/keys -export CONF=/app/repo/${PBK_PKG_REPO}/conf +export CONF=/app/repo/reprepro-conf mkdir -p "$KEYS_DIR" cp -av /app/repo/${PBK_PKG_REPO}/gnupg /root/.gnupg @@ -29,7 +29,7 @@ echo -e 'User-agent: *\nDisallow: /' > $OUT_DIR/robots.txt mkdir -p $DEB_DIR cd $DEB_DIR -cp -av $CONF ./ +cp -av $CONF ./conf # make remove-debpkg tool echo -n "#!" > remove-debpkg diff --git a/packaging/repo/scripts/rpm.sh b/packaging/repo/scripts/rpm.sh index d4e621c3e..524789cbf 100755 --- a/packaging/repo/scripts/rpm.sh +++ b/packaging/repo/scripts/rpm.sh @@ -20,24 +20,17 @@ export KEYS_DIR=$OUT_DIR/keys mkdir -p "$KEYS_DIR" rsync /app/repo/$PBK_PKG_REPO/gnupg/key.public $KEYS_DIR/GPG-KEY-PG_PROBACKUP chmod 755 $KEYS_DIR -chmod +x /app/repo/$PBK_PKG_REPO/autosign.sh echo -e 'User-agent: *\nDisallow: /' > $OUT_DIR/robots.txt cd $INPUT_DIR -cp -arv /app/repo/$PBK_PKG_REPO/rpmmacros /root/.rpmmacros +cp -arv /app/repo/rpm-conf/rpmmacros /root/.rpmmacros cp -arv /app/repo/$PBK_PKG_REPO/gnupg /root/.gnupg chmod -R 0600 /root/.gnupg chown -R root:root /root/.gnupg for pkg in $(ls ${INPUT_DIR}); do for pkg_full_version in $(ls ${INPUT_DIR}/$pkg); do - - # THere is no std/ent packages for PG 9.5 - if [[ ${pkg} == 'pg_probackup-std-9.5' ]] || [[ ${pkg} == 'pg_probackup-ent-9.5' ]] ; then - continue; - fi - if [[ ${PBK_EDITION} == '' ]] ; then cp $INPUT_DIR/$pkg/$pkg_full_version/RPMS/noarch/pg_probackup-repo-*.noarch.rpm \ $KEYS_DIR/pg_probackup-repo-$DISTRIB.noarch.rpm diff --git a/packaging/repo/scripts/suse.sh b/packaging/repo/scripts/suse.sh index 7253df700..c85e0ff10 100755 --- a/packaging/repo/scripts/suse.sh +++ b/packaging/repo/scripts/suse.sh @@ -33,7 +33,7 @@ rsync /app/repo/$PBK_PKG_REPO/gnupg/key.public $KEYS_DIR/GPG-KEY-PG_PROBACKUP echo -e 'User-agent: *\nDisallow: /' > $OUT_DIR/robots.txt -cp -arv /app/repo/$PBK_PKG_REPO/rpmmacros /root/.rpmmacros +cp -arv /app/repo/rpm-conf/rpmmacros /root/.rpmmacros cp -arv /app/repo/$PBK_PKG_REPO/gnupg /root/.gnupg chmod -R 0600 /root/.gnupg diff --git a/packaging/test/Makefile.alt b/packaging/test/Makefile.alt index 3c1899cb9..bd00cef7f 100644 --- a/packaging/test/Makefile.alt +++ b/packaging/test/Makefile.alt @@ -1,20 +1,50 @@ # ALT 9 build/test_alt_9_9.6: - $(call test_alt,alt,9,,9.6,9.6.21) + $(call test_alt,alt,9,,9.6,9.6.24) touch build/test_alt_9_9.6 build/test_alt_9_10: - $(call test_alt,alt,9,,10,10.17) + $(call test_alt,alt,9,,10,10.19) touch build/test_alt_9_10 build/test_alt_9_11: - $(call test_alt,alt,9,,11,11.11) + $(call test_alt,alt,9,,11,11.14) touch build/test_alt_9_11 build/test_alt_9_12: - $(call test_alt,alt,9,,12,12.6) + $(call test_alt,alt,9,,12,12.9) touch build/test_alt_9_12 build/test_alt_9_13: - $(call test_alt,alt,9,,13,13.2) + $(call test_alt,alt,9,,13,13.5) touch build/test_alt_9_13 + +build/test_alt_9_14: + $(call test_alt,alt,9,,14,14.1) + touch build/test_alt_9_14 + +# ALT 8 +build/test_alt_8_9.6: + $(call test_alt,alt,8,,9.6,9.6.24) + touch build/test_alt_8_9.6 + +build/test_alt_8_10: + $(call test_alt,alt,8,,10,10.19) + touch build/test_alt_8_10 + +build/test_alt_8_11: + $(call test_alt,alt,8,,11,11.14) + touch build/test_alt_8_11 + +build/test_alt_8_12: + $(call test_alt,alt,8,,12,12.9) + touch build/test_alt_8_12 + +build/test_alt_8_13: + $(call test_alt,alt,8,,13,13.5) + touch build/test_alt_8_13 + +build/test_alt_8_14: + $(call test_alt,alt,8,,14,14.1) + touch build/test_alt_8_14 + diff --git a/packaging/test/Makefile.centos b/packaging/test/Makefile.centos index e3787c612..9d30a324b 100644 --- a/packaging/test/Makefile.centos +++ b/packaging/test/Makefile.centos @@ -1,41 +1,49 @@ # CENTOS 7 build/test_centos_7_9.6: - $(call test_rpm,centos,7,,9.6,9.6.21) + $(call test_rpm,centos,7,,9.6,9.6.24) touch build/test_centos_7_9.6 build/test_centos_7_10: - $(call test_rpm,centos,7,,10,10.16) + $(call test_rpm,centos,7,,10,10.19) touch build/test_centos_7_10 build/test_centos_7_11: - $(call test_rpm,centos,7,,11,11.11) + $(call test_rpm,centos,7,,11,11.14) touch build/test_centos_7_11 build/test_centos_7_12: - $(call test_rpm,centos,7,,12,12.6) + $(call test_rpm,centos,7,,12,12.9) touch build/test_centos_7_12 build/test_centos_7_13: - $(call test_rpm,centos,7,,13,13.2) + $(call test_rpm,centos,7,,13,13.5) touch build/test_centos_7_13 +build/test_centos_7_14: + $(call test_rpm,centos,7,,14,14.1) + touch build/test_centos_7_14 + # CENTOS 8 build/test_centos_8_9.6: - $(call test_rpm,centos,8,,9.6,9.6.21) + $(call test_rpm,centos,8,,9.6,9.6.24) touch build/test_centos_8_9.6 build/test_centos_8_10: - $(call test_rpm,centos,8,,10,10.16) + $(call test_rpm,centos,8,,10,10.19) touch build/test_centos_8_10 build/test_centos_8_11: - $(call test_rpm,centos,8,,11,11.11) + $(call test_rpm,centos,8,,11,11.14) touch build/test_centos_8_11 build/test_centos_8_12: - $(call test_rpm,centos,8,,12,12.6) + $(call test_rpm,centos,8,,12,12.9) touch build/test_centos_8_12 build/test_centos_8_13: - $(call test_rpm,centos,8,,13,13.2) + $(call test_rpm,centos,8,,13,13.5) touch build/test_centos_8_13 + +build/test_centos_8_14: + $(call test_rpm,centos,8,,14,14.1) + touch build/test_centos_8_14 diff --git a/packaging/test/Makefile.debian b/packaging/test/Makefile.debian index 084741069..e4d904f62 100644 --- a/packaging/test/Makefile.debian +++ b/packaging/test/Makefile.debian @@ -1,62 +1,74 @@ # DEBIAN 9 build/test_debian_9_9.6: - $(call test_deb,debian,9,stretch,9.6,9.6.21) + $(call test_deb,debian,9,stretch,9.6,9.6.24) touch build/test_debian_9_9.6 build/test_debian_9_10: - $(call test_deb,debian,9,stretch,10,10.16) + $(call test_deb,debian,9,stretch,10,10.19) touch build/test_debian_9_10 build/test_debian_9_11: - $(call test_deb,debian,9,stretch,11,11.11) + $(call test_deb,debian,9,stretch,11,11.14) touch build/test_debian_9_11 build/test_debian_9_12: - $(call test_deb,debian,9,stretch,12,12.6) + $(call test_deb,debian,9,stretch,12,12.9) touch build/test_debian_9_12 build/test_debian_9_13: - $(call test_deb,debian,9,stretch,13,13.2) + $(call test_deb,debian,9,stretch,13,13.5) touch build/test_debian_9_13 +build/test_debian_9_14: + $(call test_deb,debian,9,stretch,14,14.1) + touch build/test_debian_9_14 + # DEBIAN 10 build/test_debian_10_9.6: - $(call test_deb,debian,10,buster,9.6,9.6.21) + $(call test_deb,debian,10,buster,9.6,9.6.24) touch build/test_debian_10_9.6 build/test_debian_10_10: - $(call test_deb,debian,10,buster,10,10.16) + $(call test_deb,debian,10,buster,10,10.19) touch build/test_debian_10_10 build/test_debian_10_11: - $(call test_deb,debian,10,buster,11,11.11) + $(call test_deb,debian,10,buster,11,11.14) touch build/test_debian_10_11 build/test_debian_10_12: - $(call test_deb,debian,10,buster,12,12.6) + $(call test_deb,debian,10,buster,12,12.9) touch build/test_debian_10_12 build/test_debian_10_13: - $(call test_deb,debian,10,buster,13,13.2) + $(call test_deb,debian,10,buster,13,13.5) touch build/test_debian_10_13 +build/test_debian_10_14: + $(call test_deb,debian,10,buster,14,14.1) + touch build/test_debian_10_14 + # DEBIAN 11 build/test_debian_11_9.6: - $(call test_deb,debian,11,bullseye,9.6,9.6.21) + $(call test_deb,debian,11,bullseye,9.6,9.6.24) touch build/test_debian_11_9.6 build/test_debian_11_10: - $(call test_deb,debian,11,bullseye,10,10.16) + $(call test_deb,debian,11,bullseye,10,10.19) touch build/test_debian_11_10 build/test_debian_11_11: - $(call test_deb,debian,11,bullseye,11,11.11) + $(call test_deb,debian,11,bullseye,11,11.14) touch build/test_debian_11_11 build/test_debian_11_12: - $(call test_deb,debian,11,bullseye,12,12.6) + $(call test_deb,debian,11,bullseye,12,12.9) touch build/test_debian_11_12 build/test_debian_11_13: - $(call test_deb,debian,11,bullseye,13,13.2) + $(call test_deb,debian,11,bullseye,13,13.5) touch build/test_debian_11_13 + +build/test_debian_11_14: + $(call test_deb,debian,11,bullseye,14,14.1) + touch build/test_debian_11_14 diff --git a/packaging/test/Makefile.oraclelinux b/packaging/test/Makefile.oraclelinux index fdf44de8b..0efe6574d 100644 --- a/packaging/test/Makefile.oraclelinux +++ b/packaging/test/Makefile.oraclelinux @@ -1,41 +1,49 @@ # ORACLE LINUX 7 build/test_oraclelinux_7_9.6: - $(call test_rpm,oraclelinux,7,,9.6,9.6.21) + $(call test_rpm,oraclelinux,7,,9.6,9.6.24) touch build/test_oraclelinux_7_9.6 build/test_oraclelinux_7_10: - $(call test_rpm,oraclelinux,7,,10,10.16) + $(call test_rpm,oraclelinux,7,,10,10.19) touch build/test_oraclelinux_7_10 build/test_oraclelinux_7_11: - $(call test_rpm,oraclelinux,7,,11,11.11) + $(call test_rpm,oraclelinux,7,,11,11.14) touch build/test_oraclelinux_7_11 build/test_oraclelinux_7_12: - $(call test_rpm,oraclelinux,7,,12,12.6) + $(call test_rpm,oraclelinux,7,,12,12.9) touch build/test_oraclelinux_7_12 build/test_oraclelinux_7_13: - $(call test_rpm,oraclelinux,7,,13,13.2) + $(call test_rpm,oraclelinux,7,,13,13.5) touch build/test_oraclelinux_7_13 +build/test_oraclelinux_7_14: + $(call test_rpm,oraclelinux,7,,14,14.1) + touch build/test_oraclelinux_7_14 + # ORACLE LINUX 8 build/test_oraclelinux_8_9.6: - $(call test_rpm,oraclelinux,8,,9.6,9.6.21) + $(call test_rpm,oraclelinux,8,,9.6,9.6.24) touch build/test_oraclelinux_8_9.6 build/test_oraclelinux_8_10: - $(call test_rpm,oraclelinux,8,,10,10.16) + $(call test_rpm,oraclelinux,8,,10,10.19) touch build/test_oraclelinux_8_10 build/test_oraclelinux_8_11: - $(call test_rpm,oraclelinux,8,,11,11.11) + $(call test_rpm,oraclelinux,8,,11,11.14) touch build/test_oraclelinux_8_11 build/test_oraclelinux_8_12: - $(call test_rpm,oraclelinux,8,,12,12.6) + $(call test_rpm,oraclelinux,8,,12,12.9) touch build/test_oraclelinux_8_12 build/test_oraclelinux_8_13: - $(call test_rpm,oraclelinux,8,,13,13.2) + $(call test_rpm,oraclelinux,8,,13,13.5) touch build/test_oraclelinux_8_13 + +build/test_oraclelinux_8_14: + $(call test_rpm,oraclelinux,8,,14,14.1) + touch build/test_oraclelinux_8_14 diff --git a/packaging/test/Makefile.rhel b/packaging/test/Makefile.rhel index 3169d11c9..3b26c8942 100644 --- a/packaging/test/Makefile.rhel +++ b/packaging/test/Makefile.rhel @@ -1,41 +1,49 @@ # RHEL 7 build/test_rhel_7_9.6: - $(call test_rpm,rhel,7,7Server,9.6,9.6.21) + $(call test_rpm,rhel,7,7Server,9.6,9.6.24) touch build/test_rhel_7_9.6 build/test_rhel_7_10: - $(call test_rpm,rhel,7,7Server,10,10.16) + $(call test_rpm,rhel,7,7Server,10,10.19) touch build/test_rhel_7_10 build/test_rhel_7_11: - $(call test_rpm,rhel,7,7Server,11,11.11) + $(call test_rpm,rhel,7,7Server,11,11.14) touch build/test_rhel_7_11 build/test_rhel_7_12: - $(call test_rpm,rhel,7,7Server,12,12.6) + $(call test_rpm,rhel,7,7Server,12,12.9) touch build/test_rhel_7_12 build/test_rhel_7_13: - $(call test_rpm,rhel,7,7Server,13,13.2) + $(call test_rpm,rhel,7,7Server,13,13.5) touch build/test_rhel_7_13 +build/test_rhel_7_14: + $(call test_rpm,rhel,7,7Server,14,14.1) + touch build/test_rhel_7_14 + # RHEL 8 build/test_rhel_8_9.6: - $(call test_rpm,rhel,8,8Server,9.6,9.6.21) + $(call test_rpm,rhel,8,8Server,9.6,9.6.24) touch build/test_rhel_8_9.6 build/test_rhel_8_10: - $(call test_rpm,rhel,8,8Server,10,10.16) + $(call test_rpm,rhel,8,8Server,10,10.19) touch build/test_rhel_8_10 build/test_rhel_8_11: - $(call test_rpm,rhel,8,8Server,11,11.11) + $(call test_rpm,rhel,8,8Server,11,11.14) touch build/test_rhel_8_11 build/test_rhel_8_12: - $(call test_rpm,rhel,8,8Server,12,12.6) + $(call test_rpm,rhel,8,8Server,12,12.9) touch build/test_rhel_8_12 build/test_rhel_8_13: - $(call test_rpm,rhel,8,8Server,13,13.2) + $(call test_rpm,rhel,8,8Server,13,13.5) touch build/test_rhel_8_13 + +build/test_rhel_8_14: + $(call test_rpm,rhel,8,8Server,14,14.1) + touch build/test_rhel_8_14 diff --git a/packaging/test/Makefile.suse b/packaging/test/Makefile.suse index 9257bdbfd..19e8d52d8 100644 --- a/packaging/test/Makefile.suse +++ b/packaging/test/Makefile.suse @@ -1,41 +1,49 @@ # Suse 15.1 build/test_suse_15.1_9.6: - $(call test_suse,suse,15.1,,9.6,9.6.21) + $(call test_suse,suse,15.1,,9.6,9.6.24) touch build/test_suse_15.1_9.6 build/test_suse_15.1_10: - $(call test_suse,suse,15.1,,10,10.16) + $(call test_suse,suse,15.1,,10,10.19) touch build/test_suse_15.1_10 build/test_suse_15.1_11: - $(call test_suse,suse,15.1,,11,11.11) + $(call test_suse,suse,15.1,,11,11.14) touch build/test_suse_15.1_11 build/test_suse_15.1_12: - $(call test_suse,suse,15.1,,12,12.6) + $(call test_suse,suse,15.1,,12,12.9) touch build/test_suse_15.1_12 build/test_suse_15.1_13: - $(call test_suse,suse,15.1,,13,13.2) + $(call test_suse,suse,15.1,,13,13.5) touch build/test_suse_15.1_13 +build/test_suse_15.1_14: + $(call test_suse,suse,15.1,,14,14.1) + touch build/test_suse_15.1_14 + # Suse 15.2 build/test_suse_15.2_9.6: - $(call test_suse,suse,15.2,,9.6,9.6.21) + $(call test_suse,suse,15.2,,9.6,9.6.24) touch build/test_suse_15.2_9.6 build/test_suse_15.2_10: - $(call test_suse,suse,15.2,,10,10.16) + $(call test_suse,suse,15.2,,10,10.19) touch build/test_suse_15.2_10 build/test_suse_15.2_11: - $(call test_suse,suse,15.2,,11,11.11) + $(call test_suse,suse,15.2,,11,11.14) touch build/test_suse_15.2_11 build/test_suse_15.2_12: - $(call test_suse,suse,15.2,,12,12.6) + $(call test_suse,suse,15.2,,12,12.9) touch build/test_suse_15.2_12 build/test_suse_15.2_13: - $(call test_suse,suse,15.2,,13,13.2) + $(call test_suse,suse,15.2,,13,13.5) touch build/test_suse_15.2_13 + +build/test_suse_15.2_14: + $(call test_suse,suse,15.2,,14,14.1) + touch build/test_suse_15.2_14 diff --git a/packaging/test/Makefile.ubuntu b/packaging/test/Makefile.ubuntu index 9e201a30b..86a257b91 100644 --- a/packaging/test/Makefile.ubuntu +++ b/packaging/test/Makefile.ubuntu @@ -1,62 +1,49 @@ -# UBUNTU 16.04 -build/test_ubuntu_16.04_9.6: - $(call test_deb,ubuntu,16.04,xenial,9.6,9.6.21) - touch build/test_ubuntu_16.04_9.6 - -build/test_ubuntu_16.04_10: - $(call test_deb,ubuntu,16.04,xenial,10,10.16) - touch build/test_ubuntu_16.04_10 - -build/test_ubuntu_16.04_11: - $(call test_deb,ubuntu,16.04,xenial,11,11.11) - touch build/test_ubuntu_16.04_11 - -build/test_ubuntu_16.04_12: - $(call test_deb,ubuntu,16.04,xenial,12,12.6) - touch build/test_ubuntu_16.04_12 - -build/test_ubuntu_16.04_13: - $(call test_deb,ubuntu,16.04,xenial,13,13.2) - touch build/test_ubuntu_16.04_13 - # UBUNTU 18.04 build/test_ubuntu_18.04_9.6: - $(call test_deb,ubuntu,18.04,bionic,9.6,9.6.21) + $(call test_deb,ubuntu,18.04,bionic,9.6,9.6.24) touch build/test_ubuntu_18.04_9.6 build/test_ubuntu_18.04_10: - $(call test_deb,ubuntu,18.04,bionic,10,10.16) + $(call test_deb,ubuntu,18.04,bionic,10,10.19) touch build/test_ubuntu_18.04_10 build/test_ubuntu_18.04_11: - $(call test_deb,ubuntu,18.04,bionic,11,11.11) + $(call test_deb,ubuntu,18.04,bionic,11,11.14) touch build/test_ubuntu_18.04_11 build/test_ubuntu_18.04_12: - $(call test_deb,ubuntu,18.04,bionic,12,12.6) + $(call test_deb,ubuntu,18.04,bionic,12,12.9) touch build/test_ubuntu_18.04_12 build/test_ubuntu_18.04_13: - $(call test_deb,ubuntu,18.04,bionic,13,13.2) + $(call test_deb,ubuntu,18.04,bionic,13,13.5) touch build/test_ubuntu_18.04_13 +build/test_ubuntu_18.04_14: + $(call test_deb,ubuntu,18.04,bionic,14,14.1) + touch build/test_ubuntu_18.04_14 + # UBUNTU 20.04 build/test_ubuntu_20.04_9.6: - $(call test_deb,ubuntu,20.04,focal,9.6,9.6.21) + $(call test_deb,ubuntu,20.04,focal,9.6,9.6.24) touch build/test_ubuntu_20.04_9.6 build/test_ubuntu_20.04_10: - $(call test_deb,ubuntu,20.04,focal,10,10.16) + $(call test_deb,ubuntu,20.04,focal,10,10.19) touch build/test_ubuntu_20.04_10 build/test_ubuntu_20.04_11: - $(call test_deb,ubuntu,20.04,focal,11,11.11) + $(call test_deb,ubuntu,20.04,focal,11,11.14) touch build/test_ubuntu_20.04_11 build/test_ubuntu_20.04_12: - $(call test_deb,ubuntu,20.04,focal,12,12.6) + $(call test_deb,ubuntu,20.04,focal,12,12.9) touch build/test_ubuntu_20.04_12 build/test_ubuntu_20.04_13: - $(call test_deb,ubuntu,20.04,focal,13,13.2) + $(call test_deb,ubuntu,20.04,focal,13,13.5) touch build/test_ubuntu_20.04_13 + +build/test_ubuntu_20.04_14: + $(call test_deb,ubuntu,20.04,focal,14,14.1) + touch build/test_ubuntu_20.04_14 diff --git a/packaging/test/scripts/deb.sh b/packaging/test/scripts/deb.sh index d7b957192..fca9a23d8 100755 --- a/packaging/test/scripts/deb.sh +++ b/packaging/test/scripts/deb.sh @@ -17,8 +17,14 @@ PG_TOG=$(echo $PG_VERSION | sed 's|\.||g') export DEBIAN_FRONTEND=noninteractive echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections -#apt-get -qq --allow-releaseinfo-change update -apt-get -qq update +if [ ${DISTRIB} = 'ubuntu' -a ${CODENAME} = 'xenial' ] ; then + apt-get -qq update +elif [ ${DISTRIB} = 'debian' -a ${CODENAME} = 'stretch' ] ; then + apt-get -qq update +else + apt-get -qq --allow-releaseinfo-change update +fi + apt-get -qq install -y wget nginx gnupg lsb-release #apt-get -qq install -y libterm-readline-gnu-perl dialog gnupg procps diff --git a/packaging/test/scripts/deb_forks.sh b/packaging/test/scripts/deb_forks.sh index 5175f38db..e05695608 100755 --- a/packaging/test/scripts/deb_forks.sh +++ b/packaging/test/scripts/deb_forks.sh @@ -31,7 +31,11 @@ echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections #printf "deb http://archive.debian.org/debian/ jessie main\ndeb-src http://archive.debian.org/debian/ jessie main\ndeb http://security.debian.org jessie/updates main\ndeb-src http://security.debian.org jessie/updates main" > /etc/apt/sources.list #fi -apt-get -qq update +if [ ${DISTRIB} = 'debian' -a ${CODENAME} = 'stretch' ] ; then + apt-get -qq update +else + apt-get -qq --allow-releaseinfo-change update +fi apt-get -qq install -y wget nginx gnupg lsb-release apt-transport-https #apt-get -qq install -y libterm-readline-gnu-perl dialog gnupg procps diff --git a/packaging/test/scripts/rpm.sh b/packaging/test/scripts/rpm.sh index 320d459f6..92804a7f4 100755 --- a/packaging/test/scripts/rpm.sh +++ b/packaging/test/scripts/rpm.sh @@ -1,7 +1,7 @@ #!/usr/bin/env bash # Copyright Notice: -# © (C) Postgres Professional 2015-2016 http://www.postgrespro.ru/ +# © (C) Postgres Professional 2015-2021 http://www.postgrespro.ru/ # Distributed under Apache License 2.0 # Распространяется по лицензии Apache 2.0 @@ -13,23 +13,19 @@ ulimit -n 1024 PG_TOG=$(echo $PG_VERSION | sed 's|\.||g') -yum update -y +if [ ${DISTRIB} != 'rhel' -o ${DISTRIB_VERSION} != '7' ]; then + # update of rpm package is broken in rhel-7 (26/12/2022) + yum update -y +fi # yum upgrade -y || echo 'some packages in docker failed to upgrade' # yum install -y sudo -if [ ${DISTRIB} == 'rhel' ] && [ ${PG_TOG} == '13' ]; then # no packages for PG13 on PGDG - exit 0 -fi - -#if [ ${DISTRIB} == 'oraclelinux' ] && [ ${DISTRIB_VERSION} == '6' ] && [ ${PG_TOG} == '13' ]; then # no packages for PG13 on PGDG -# exit 0 -#fi -if [ ${DISTRIB_VERSION} == '6' ]; then +if [ ${DISTRIB_VERSION} = '6' ]; then yum install -y https://nginx.org/packages/rhel/6/x86_64/RPMS/nginx-1.8.1-1.el6.ngx.x86_64.rpm -elif [ ${DISTRIB} == 'oraclelinux' ] && [ ${DISTRIB_VERSION} == '8' ]; then - yum install -y nginx -elif [ ${DISTRIB_VERSION} == '7' ]; then +elif [ ${DISTRIB_VERSION} = '7' ]; then yum install -y https://nginx.org/packages/rhel/7/x86_64/RPMS/nginx-1.8.1-1.el7.ngx.x86_64.rpm +elif [ ${DISTRIB_VERSION} = '8' -a \( ${DISTRIB} = 'rhel' -o ${DISTRIB} = 'oraclelinux' \) ]; then + yum install -y nginx else yum install epel-release -y yum install -y nginx diff --git a/packaging/test/scripts/rpm_forks.sh b/packaging/test/scripts/rpm_forks.sh index 8596f6656..0d72040ed 100755 --- a/packaging/test/scripts/rpm_forks.sh +++ b/packaging/test/scripts/rpm_forks.sh @@ -13,7 +13,12 @@ ulimit -n 1024 PG_TOG=$(echo $PG_VERSION | sed 's|\.||g') -if [ ${PBK_PBK_EDITION} == 'ent' ]; then +if [ ${DISTRIB} != 'rhel' -o ${DISTRIB_VERSION} != '7' ]; then + # update of rpm package is broken in rhel-7 (26/12/2022) + yum update -y +fi + +if [ ${PBK_EDITION} == 'ent' ]; then exit 0 fi From 1790a990d2f98e17e891971b1ef957a85319dd1b Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Fri, 11 Feb 2022 16:58:23 +0300 Subject: [PATCH 245/525] fix windows builds (add forgotten catchup.c into gen_probackup_project.pl) --- gen_probackup_project.pl | 1 + 1 file changed, 1 insertion(+) diff --git a/gen_probackup_project.pl b/gen_probackup_project.pl index abc779a40..c24db1228 100644 --- a/gen_probackup_project.pl +++ b/gen_probackup_project.pl @@ -155,6 +155,7 @@ sub build_pgprobackup 'archive.c', 'backup.c', 'catalog.c', + 'catchup.c', 'configure.c', 'data.c', 'delete.c', From 98f77d24fd9d30531e5788d8dea1b16e188ac9d2 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" <16117281+kulaginm@users.noreply.github.com> Date: Sat, 12 Feb 2022 21:35:03 +0300 Subject: [PATCH 246/525] =?UTF-8?q?[PGPRO-5691]=20ptrack-2.3:=20move=20mma?= =?UTF-8?q?pped=20ptrack=20map=20into=20shared=20postgres=E2=80=A6=20(#471?= =?UTF-8?q?)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [PGPRO-5691] ptrack-2.3: move mmapped ptrack map into shared postgres memory In ptrack-2.3 ptrack.map.mmap will be removed and 'incorrect checksum' error will not be fatal (https://github.com/postgrespro/ptrack/pull/19) * added test_corrupt_ptrack_map test compatibility with both version 2.2 and version 2.3 of ptrack --- tests/ptrack.py | 63 ++++++++++++++++++++++++++++--------------------- 1 file changed, 36 insertions(+), 27 deletions(-) diff --git a/tests/ptrack.py b/tests/ptrack.py index a3109da48..5878f0700 100644 --- a/tests/ptrack.py +++ b/tests/ptrack.py @@ -4314,6 +4314,8 @@ def test_corrupt_ptrack_map(self): "postgres", "CREATE EXTENSION ptrack") + ptrack_version = self.get_ptrack_version(node) + # Create table node.safe_psql( "postgres", @@ -4338,48 +4340,55 @@ def test_corrupt_ptrack_map(self): node.stop(['-m', 'immediate', '-D', node.data_dir]) ptrack_map = os.path.join(node.data_dir, 'global', 'ptrack.map') - ptrack_map_mmap = os.path.join(node.data_dir, 'global', 'ptrack.map.mmap') - # Let`s do index corruption. ptrack.map, ptrack.map.mmap + # Let`s do index corruption. ptrack.map with open(ptrack_map, "rb+", 0) as f: f.seek(42) f.write(b"blablahblahs") f.flush() f.close - with open(ptrack_map_mmap, "rb+", 0) as f: - f.seek(42) - f.write(b"blablahblahs") - f.flush() - f.close - # os.remove(os.path.join(node.logs_dir, node.pg_log_name)) - try: + if self.verbose: + print('Ptrack version:', ptrack_version) + if ptrack_version >= self.version_to_num("2.3"): node.slow_start() - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because ptrack.map is corrupted" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except StartNodeException as e: + + log_file = os.path.join(node.logs_dir, 'postgresql.log') + with open(log_file, 'r') as f: + log_content = f.read() + self.assertIn( - 'Cannot start node', - e.message, - '\n Unexpected Error Message: {0}\n' - ' CMD: {1}'.format(repr(e.message), self.cmd)) + 'WARNING: ptrack read map: incorrect checksum of file "{0}"'.format(ptrack_map), + log_content) - log_file = os.path.join(node.logs_dir, 'postgresql.log') - with open(log_file, 'r') as f: - log_content = f.read() + node.stop(['-D', node.data_dir]) + else: + try: + node.slow_start() + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because ptrack.map is corrupted" + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except StartNodeException as e: + self.assertIn( + 'Cannot start node', + e.message, + '\n Unexpected Error Message: {0}\n' + ' CMD: {1}'.format(repr(e.message), self.cmd)) + + log_file = os.path.join(node.logs_dir, 'postgresql.log') + with open(log_file, 'r') as f: + log_content = f.read() - self.assertIn( - 'FATAL: ptrack init: incorrect checksum of file "{0}"'.format(ptrack_map), - log_content) + self.assertIn( + 'FATAL: ptrack init: incorrect checksum of file "{0}"'.format(ptrack_map), + log_content) self.set_auto_conf(node, {'ptrack.map_size': '0'}) - node.slow_start() try: From 6470693d2a42b980062a95490fba3c7607655c9b Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" <16117281+kulaginm@users.noreply.github.com> Date: Thu, 17 Feb 2022 01:24:08 +0300 Subject: [PATCH 247/525] [PGPRO-5612] Support for checkunique parameter of amcheck.bt_index_check() function (PR #456) Co-authored-by: Elena Indrupskaya --- doc/pgprobackup.xml | 35 ++++++-- src/checkdb.c | 124 +++++++++++++++++++------- src/help.c | 6 +- src/pg_probackup.c | 12 +++ src/pg_probackup.h | 1 + tests/checkdb.py | 155 ++++++++++++++++++++++++++++++++- tests/expected/option_help.out | 2 +- 7 files changed, 291 insertions(+), 44 deletions(-) diff --git a/doc/pgprobackup.xml b/doc/pgprobackup.xml index 76ec2cd76..76333b116 100644 --- a/doc/pgprobackup.xml +++ b/doc/pgprobackup.xml @@ -4176,7 +4176,7 @@ pg_probackup restore -B backup_dir --instance backup_dir] [--instance instance_name] [-D data_dir] [--help] [-j num_threads] [--progress] -[--skip-block-validation] [--amcheck] [--heapallindexed] +[--skip-block-validation] [--amcheck [--checkunique] [--heapallindexed]] [connection_options] [logging_options] @@ -4195,17 +4195,24 @@ pg_probackup checkdb extension or the amcheck_next extension installed in the database to check its indexes. For databases without amcheck, index verification will be skipped. + Additional options and + are effective depending on the version of amcheck installed. - + - Skip validation of data files. You can use this flag only - together with the flag, so that only logical - verification of indexes is performed. + Verifies unique constraints during logical verification of indexes. + You can use this flag only together with the flag when + the amcheck extension is + installed in the database. + + + This verification is only possible if it is supported by the version of the + amcheck extension you are using. @@ -4219,12 +4226,24 @@ pg_probackup checkdb flag. - This check is only possible if you are using the - amcheck extension of version 2.0 or higher, or - the amcheck_next extension of any version. + This check is only possible if it is supported by the version of the + amcheck extension you are using or + if the amcheck_next extension is used instead. + + + + + + + + + Skip validation of data files. You can use this flag only + together with the flag, so that only logical + verification of indexes is performed. + diff --git a/src/checkdb.c b/src/checkdb.c index e3f2df538..177fc3cc7 100644 --- a/src/checkdb.c +++ b/src/checkdb.c @@ -83,6 +83,7 @@ typedef struct pg_indexEntry char *name; char *namespace; bool heapallindexed_is_supported; + bool checkunique_is_supported; /* schema where amcheck extension is located */ char *amcheck_nspname; /* lock for synchronization of parallel threads */ @@ -351,10 +352,14 @@ get_index_list(const char *dbname, bool first_db_with_amcheck, { PGresult *res; char *amcheck_nspname = NULL; + char *amcheck_extname = NULL; + char *amcheck_extversion = NULL; int i; bool heapallindexed_is_supported = false; + bool checkunique_is_supported = false; parray *index_list = NULL; + /* Check amcheck extension version */ res = pgut_execute(db_conn, "SELECT " "extname, nspname, extversion " "FROM pg_catalog.pg_namespace n " @@ -379,24 +384,68 @@ get_index_list(const char *dbname, bool first_db_with_amcheck, return NULL; } + amcheck_extname = pgut_malloc(strlen(PQgetvalue(res, 0, 0)) + 1); + strcpy(amcheck_extname, PQgetvalue(res, 0, 0)); amcheck_nspname = pgut_malloc(strlen(PQgetvalue(res, 0, 1)) + 1); strcpy(amcheck_nspname, PQgetvalue(res, 0, 1)); + amcheck_extversion = pgut_malloc(strlen(PQgetvalue(res, 0, 2)) + 1); + strcpy(amcheck_extversion, PQgetvalue(res, 0, 2)); + PQclear(res); /* heapallindexed_is_supported is database specific */ - if (strcmp(PQgetvalue(res, 0, 2), "1.0") != 0 && - strcmp(PQgetvalue(res, 0, 2), "1") != 0) + /* TODO this is wrong check, heapallindexed supported also in 1.1.1, 1.2 and 1.2.1... */ + if (strcmp(amcheck_extversion, "1.0") != 0 && + strcmp(amcheck_extversion, "1") != 0) heapallindexed_is_supported = true; elog(INFO, "Amchecking database '%s' using extension '%s' " "version %s from schema '%s'", - dbname, PQgetvalue(res, 0, 0), - PQgetvalue(res, 0, 2), PQgetvalue(res, 0, 1)); + dbname, amcheck_extname, + amcheck_extversion, amcheck_nspname); if (!heapallindexed_is_supported && heapallindexed) elog(WARNING, "Extension '%s' version %s in schema '%s'" "do not support 'heapallindexed' option", - PQgetvalue(res, 0, 0), PQgetvalue(res, 0, 2), - PQgetvalue(res, 0, 1)); + amcheck_extname, amcheck_extversion, + amcheck_nspname); + +#ifndef PGPRO_EE + /* + * Will support when the vanilla patch will commited https://commitfest.postgresql.org/32/2976/ + */ + checkunique_is_supported = false; +#else + /* + * Check bt_index_check function signature to determine support of checkunique parameter + * This can't be exactly checked by checking extension version, + * For example, 1.1.1 and 1.2.1 supports this parameter, but 1.2 doesn't (PGPROEE-12.4.1) + */ + res = pgut_execute(db_conn, "SELECT " + " oid " + "FROM pg_catalog.pg_proc " + "WHERE " + " pronamespace = $1::regnamespace " + "AND proname = 'bt_index_check' " + "AND 'checkunique' = ANY(proargnames) " + "AND (pg_catalog.string_to_array(proargtypes::text, ' ')::regtype[])[pg_catalog.array_position(proargnames, 'checkunique')] = 'bool'::regtype", + 1, (const char **) &amcheck_nspname); + + if (PQresultStatus(res) != PGRES_TUPLES_OK) + { + PQclear(res); + elog(ERROR, "Cannot check 'checkunique' option is supported in bt_index_check function %s: %s", + dbname, PQerrorMessage(db_conn)); + } + + checkunique_is_supported = PQntuples(res) >= 1; + PQclear(res); +#endif + + if (!checkunique_is_supported && checkunique) + elog(WARNING, "Extension '%s' version %s in schema '%s' " + "do not support 'checkunique' parameter", + amcheck_extname, amcheck_extversion, + amcheck_nspname); /* * In order to avoid duplicates, select global indexes @@ -453,6 +502,7 @@ get_index_list(const char *dbname, bool first_db_with_amcheck, strcpy(ind->namespace, namespace); /* enough buffer size guaranteed */ ind->heapallindexed_is_supported = heapallindexed_is_supported; + ind->checkunique_is_supported = checkunique_is_supported; ind->amcheck_nspname = pgut_malloc(strlen(amcheck_nspname) + 1); strcpy(ind->amcheck_nspname, amcheck_nspname); pg_atomic_clear_flag(&ind->lock); @@ -464,6 +514,9 @@ get_index_list(const char *dbname, bool first_db_with_amcheck, } PQclear(res); + free(amcheck_extversion); + free(amcheck_nspname); + free(amcheck_extname); return index_list; } @@ -473,38 +526,46 @@ static bool amcheck_one_index(check_indexes_arg *arguments, pg_indexEntry *ind) { - PGresult *res; - char *params[2]; + PGresult *res; + char *params[3]; + static const char *queries[] = { + "SELECT %s.bt_index_check(index => $1)", + "SELECT %s.bt_index_check(index => $1, heapallindexed => $2)", + "SELECT %s.bt_index_check(index => $1, heapallindexed => $2, checkunique => $3)", + }; + int params_count; char *query = NULL; - params[0] = palloc(64); + if (interrupted) + elog(ERROR, "Interrupted"); +#define INDEXRELID 0 +#define HEAPALLINDEXED 1 +#define CHECKUNIQUE 2 /* first argument is index oid */ - sprintf(params[0], "%u", ind->indexrelid); + params[INDEXRELID] = palloc(64); + sprintf(params[INDEXRELID], "%u", ind->indexrelid); /* second argument is heapallindexed */ - params[1] = heapallindexed ? "true" : "false"; + params[HEAPALLINDEXED] = heapallindexed ? "true" : "false"; + /* third optional argument is checkunique */ + params[CHECKUNIQUE] = checkunique ? "true" : "false"; +#undef CHECKUNIQUE +#undef HEAPALLINDEXED - if (interrupted) - elog(ERROR, "Interrupted"); - - if (ind->heapallindexed_is_supported) - { - query = palloc(strlen(ind->amcheck_nspname)+strlen("SELECT .bt_index_check($1, $2)")+1); - sprintf(query, "SELECT %s.bt_index_check($1, $2)", ind->amcheck_nspname); + params_count = ind->checkunique_is_supported ? + 3 : + ( ind->heapallindexed_is_supported ? 2 : 1 ); - res = pgut_execute_parallel(arguments->conn_arg.conn, - arguments->conn_arg.cancel_conn, - query, 2, (const char **)params, true, true, true); - } - else - { - query = palloc(strlen(ind->amcheck_nspname)+strlen("SELECT .bt_index_check($1)")+1); - sprintf(query, "SELECT %s.bt_index_check($1)", ind->amcheck_nspname); + /* + * Prepare query text with schema name + * +1 for \0 and -2 for %s + */ + query = palloc(strlen(ind->amcheck_nspname) + strlen(queries[params_count - 1]) + 1 - 2); + sprintf(query, queries[params_count - 1], ind->amcheck_nspname); - res = pgut_execute_parallel(arguments->conn_arg.conn, + res = pgut_execute_parallel(arguments->conn_arg.conn, arguments->conn_arg.cancel_conn, - query, 1, (const char **)params, true, true, true); - } + query, params_count, (const char **)params, true, true, true); if (PQresultStatus(res) != PGRES_TUPLES_OK) { @@ -512,7 +573,7 @@ amcheck_one_index(check_indexes_arg *arguments, arguments->thread_num, arguments->conn_opt.pgdatabase, ind->namespace, ind->name, PQresultErrorMessage(res)); - pfree(params[0]); + pfree(params[INDEXRELID]); pfree(query); PQclear(res); return false; @@ -522,7 +583,8 @@ amcheck_one_index(check_indexes_arg *arguments, arguments->thread_num, arguments->conn_opt.pgdatabase, ind->namespace, ind->name); - pfree(params[0]); + pfree(params[INDEXRELID]); +#undef INDEXRELID pfree(query); PQclear(res); return true; diff --git a/src/help.c b/src/help.c index a6530fc0e..a494ab209 100644 --- a/src/help.c +++ b/src/help.c @@ -190,7 +190,7 @@ help_pg_probackup(void) printf(_("\n %s checkdb [-B backup-path] [--instance=instance_name]\n"), PROGRAM_NAME); printf(_(" [-D pgdata-path] [--progress] [-j num-threads]\n")); printf(_(" [--amcheck] [--skip-block-validation]\n")); - printf(_(" [--heapallindexed]\n")); + printf(_(" [--heapallindexed] [--checkunique]\n")); printf(_(" [--help]\n")); printf(_("\n %s show -B backup-path\n"), PROGRAM_NAME); @@ -601,7 +601,7 @@ help_checkdb(void) printf(_("\n%s checkdb [-B backup-path] [--instance=instance_name]\n"), PROGRAM_NAME); printf(_(" [-D pgdata-path] [-j num-threads] [--progress]\n")); printf(_(" [--amcheck] [--skip-block-validation]\n")); - printf(_(" [--heapallindexed]\n\n")); + printf(_(" [--heapallindexed] [--checkunique]\n\n")); printf(_(" -B, --backup-path=backup-path location of the backup storage area\n")); printf(_(" --instance=instance_name name of the instance\n")); @@ -616,6 +616,8 @@ help_checkdb(void) printf(_(" using 'amcheck' or 'amcheck_next' extensions\n")); printf(_(" --heapallindexed also check that heap is indexed\n")); printf(_(" can be used only with '--amcheck' option\n")); + printf(_(" --checkunique also check unique constraints\n")); + printf(_(" can be used only with '--amcheck' option\n")); printf(_("\n Logging options:\n")); printf(_(" --log-level-console=log-level-console\n")); diff --git a/src/pg_probackup.c b/src/pg_probackup.c index 49e226ace..c5ed13175 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -126,6 +126,7 @@ static parray *exclude_relative_paths_list = NULL; /* checkdb options */ bool need_amcheck = false; bool heapallindexed = false; +bool checkunique = false; bool amcheck_parent = false; /* delete options */ @@ -240,6 +241,7 @@ static ConfigOption cmd_options[] = /* checkdb options */ { 'b', 195, "amcheck", &need_amcheck, SOURCE_CMD_STRICT }, { 'b', 196, "heapallindexed", &heapallindexed, SOURCE_CMD_STRICT }, + { 'b', 198, "checkunique", &checkunique, SOURCE_CMD_STRICT }, { 'b', 197, "parent", &amcheck_parent, SOURCE_CMD_STRICT }, /* delete options */ { 'b', 145, "wal", &delete_wal, SOURCE_CMD_STRICT }, @@ -596,6 +598,16 @@ main(int argc, char *argv[]) instance_config.pgdata == NULL) elog(ERROR, "required parameter not specified: --instance"); + /* Check checkdb command options consistency */ + if (backup_subcmd == CHECKDB_CMD && + !need_amcheck) + { + if (heapallindexed) + elog(ERROR, "--heapallindexed can only be used with --amcheck option"); + if (checkunique) + elog(ERROR, "--checkunique can only be used with --amcheck option"); + } + /* Usually checkdb for file logging requires log_directory * to be specified explicitly, but if backup_dir and instance name are provided, * checkdb can use the usual default values or values from config diff --git a/src/pg_probackup.h b/src/pg_probackup.h index b202b6152..783a14b1e 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -829,6 +829,7 @@ extern ShowFormat show_format; /* checkdb options */ extern bool heapallindexed; +extern bool checkunique; extern bool skip_block_validation; /* current settings */ diff --git a/tests/checkdb.py b/tests/checkdb.py index 044c057f6..9b7adcd71 100644 --- a/tests/checkdb.py +++ b/tests/checkdb.py @@ -211,6 +211,7 @@ def test_checkdb_amcheck_only_sanity(self): # Clean after yourself gdb.kill() + node.stop() self.del_test_dir(module_name, fname) # @unittest.skip("skip") @@ -349,6 +350,7 @@ def test_basic_checkdb_amcheck_only_sanity(self): log_file_content) # Clean after yourself + node.stop() self.del_test_dir(module_name, fname) # @unittest.skip("skip") @@ -445,6 +447,98 @@ def test_checkdb_block_validation_sanity(self): e.message) # Clean after yourself + node.stop() + self.del_test_dir(module_name, fname) + + def test_checkdb_checkunique(self): + """Test checkunique parameter of amcheck.bt_index_check function""" + fname = self.id().split('.')[3] + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node'), + initdb_params=['--data-checksums']) + node.slow_start() + + try: + node.safe_psql( + "postgres", + "create extension amcheck") + except QueryException as e: + node.safe_psql( + "postgres", + "create extension amcheck_next") + + # Part of https://commitfest.postgresql.org/32/2976/ patch test + node.safe_psql( + "postgres", + "CREATE TABLE bttest_unique(a varchar(50), b varchar(1500), c bytea, d varchar(50)); " + "ALTER TABLE bttest_unique SET (autovacuum_enabled = false); " + "CREATE UNIQUE INDEX bttest_unique_idx ON bttest_unique(a,b); " + "UPDATE pg_catalog.pg_index SET indisunique = false " + "WHERE indrelid = (SELECT oid FROM pg_catalog.pg_class WHERE relname = 'bttest_unique'); " + "INSERT INTO bttest_unique " + " SELECT i::text::varchar, " + " array_to_string(array( " + " SELECT substr('ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789', ((random()*(36-1)+1)::integer), 1) " + " FROM generate_series(1,1300)),'')::varchar, " + " i::text::bytea, i::text::varchar " + " FROM generate_series(0,1) AS i, generate_series(0,30) AS x; " + "UPDATE pg_catalog.pg_index SET indisunique = true " + "WHERE indrelid = (SELECT oid FROM pg_catalog.pg_class WHERE relname = 'bttest_unique'); " + "DELETE FROM bttest_unique WHERE ctid::text='(0,2)'; " + "DELETE FROM bttest_unique WHERE ctid::text='(4,2)'; " + "DELETE FROM bttest_unique WHERE ctid::text='(4,3)'; " + "DELETE FROM bttest_unique WHERE ctid::text='(9,3)';") + + # run without checkunique option (error will not detected) + output = self.checkdb_node( + options=[ + '--amcheck', + '--skip-block-validation', + '-d', 'postgres', '-p', str(node.port)]) + + self.assertIn( + 'INFO: checkdb --amcheck finished successfully', + output) + self.assertIn( + 'All checked indexes are valid', + output) + + # run with checkunique option + try: + self.checkdb_node( + options=[ + '--amcheck', + '--skip-block-validation', + '--checkunique', + '-d', 'postgres', '-p', str(node.port)]) + if (ProbackupTest.enterprise and + (self.get_version(node) >= 111300 and self.get_version(node) < 120000 + or self.get_version(node) >= 120800 and self.get_version(node) < 130000 + or self.get_version(node) >= 130400)): + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because of index corruption\n" + " Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + else: + self.assertRegex( + self.output, + r"WARNING: Extension 'amcheck(|_next)' version [\d.]* in schema 'public' do not support 'checkunique' parameter") + except ProbackupException as e: + self.assertIn( + "ERROR: checkdb --amcheck finished with failure. Not all checked indexes are valid. All databases were amchecked.", + e.message, + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) + + self.assertIn( + "Amcheck failed in database 'postgres' for index: 'public.bttest_unique_idx': ERROR: index \"bttest_unique_idx\" is corrupted. There are tuples violating UNIQUE constraint", + e.message) + + # Clean after yourself + node.stop() self.del_test_dir(module_name, fname) # @unittest.skip("skip") @@ -502,6 +596,7 @@ def test_checkdb_sigint_handling(self): # Clean after yourself gdb.kill() + node.stop() self.del_test_dir(module_name, fname) # @unittest.skip("skip") @@ -563,12 +658,15 @@ def test_checkdb_with_least_privileges(self): 'GRANT SELECT ON TABLE pg_catalog.pg_namespace TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.texteq(text, text) TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.namene(name, name) TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.int8(integer) TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.charne("char", "char") TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.string_to_array(text, text) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.array_position(anyarray, anyelement) TO backup; ' 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool) TO backup;' # amcheck-next function ) # PG 9.6 @@ -588,6 +686,7 @@ def test_checkdb_with_least_privileges(self): 'GRANT SELECT ON TABLE pg_catalog.pg_namespace TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.texteq(text, text) TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.namene(name, name) TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.int8(integer) TO backup; ' @@ -595,6 +694,8 @@ def test_checkdb_with_least_privileges(self): 'GRANT EXECUTE ON FUNCTION pg_catalog.charne("char", "char") TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.string_to_array(text, text) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.array_position(anyarray, anyelement) TO backup; ' # 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass) TO backup; ' 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool) TO backup;' ) @@ -615,13 +716,16 @@ def test_checkdb_with_least_privileges(self): 'GRANT SELECT ON TABLE pg_catalog.pg_namespace TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.texteq(text, text) TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.namene(name, name) TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.int8(integer) TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.charne("char", "char") TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup;' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.string_to_array(text, text) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.array_position(anyarray, anyelement) TO backup;' ) if ProbackupTest.enterprise: # amcheck-1.1 @@ -633,7 +737,45 @@ def test_checkdb_with_least_privileges(self): node.safe_psql( 'backupdb', 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass) TO backup') - # >= 11 + # >= 11 < 14 + elif self.get_version(node) > 110000 and self.get_version(node) < 140000: + node.safe_psql( + 'backupdb', + 'CREATE ROLE backup WITH LOGIN; ' + 'GRANT CONNECT ON DATABASE backupdb to backup; ' + 'GRANT USAGE ON SCHEMA pg_catalog TO backup; ' + 'GRANT USAGE ON SCHEMA public TO backup; ' + 'GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; ' + 'GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; ' + 'GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; ' + 'GRANT SELECT ON TABLE pg_catalog.pg_am TO backup; ' + 'GRANT SELECT ON TABLE pg_catalog.pg_class TO backup; ' + 'GRANT SELECT ON TABLE pg_catalog.pg_index TO backup; ' + 'GRANT SELECT ON TABLE pg_catalog.pg_namespace TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.texteq(text, text) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.namene(name, name) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.int8(integer) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.charne("char", "char") TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.string_to_array(text, text) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.array_position(anyarray, anyelement) TO backup; ' + 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass) TO backup; ' + 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool) TO backup;' + ) + # checkunique parameter + if ProbackupTest.enterprise: + if (self.get_version(node) >= 111300 and self.get_version(node) < 120000 + or self.get_version(node) >= 120800 and self.get_version(node) < 130000 + or self.get_version(node) >= 130400): + node.safe_psql( + "backupdb", + "GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool, bool) TO backup") + # >= 14 else: node.safe_psql( 'backupdb', @@ -650,6 +792,7 @@ def test_checkdb_with_least_privileges(self): 'GRANT SELECT ON TABLE pg_catalog.pg_namespace TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.texteq(text, text) TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.namene(name, name) TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.int8(integer) TO backup; ' @@ -657,9 +800,16 @@ def test_checkdb_with_least_privileges(self): 'GRANT EXECUTE ON FUNCTION pg_catalog.charne("char", "char") TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.string_to_array(text, text) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.array_position(anycompatiblearray, anycompatible) TO backup; ' 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass) TO backup; ' 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool) TO backup;' ) + # checkunique parameter + if ProbackupTest.enterprise: + node.safe_psql( + "backupdb", + "GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool, bool) TO backup") if ProbackupTest.enterprise: node.safe_psql( @@ -700,4 +850,5 @@ def test_checkdb_with_least_privileges(self): repr(e.message), self.cmd)) # Clean after yourself + node.stop() self.del_test_dir(module_name, fname) diff --git a/tests/expected/option_help.out b/tests/expected/option_help.out index dd3c4e865..a8b4a64b3 100644 --- a/tests/expected/option_help.out +++ b/tests/expected/option_help.out @@ -107,7 +107,7 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database. pg_probackup checkdb [-B backup-path] [--instance=instance_name] [-D pgdata-path] [--progress] [-j num-threads] [--amcheck] [--skip-block-validation] - [--heapallindexed] + [--heapallindexed] [--checkunique] [--help] pg_probackup show -B backup-path From d222659ee21700a95fc118eeaa941a2bb9b8f07d Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Thu, 17 Feb 2022 01:31:48 +0300 Subject: [PATCH 248/525] Version 2.5.5 --- src/pg_probackup.h | 4 ++-- tests/expected/option_version.out | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 783a14b1e..4cd65980c 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -3,7 +3,7 @@ * pg_probackup.h: Backup/Recovery manager for PostgreSQL. * * Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION - * Portions Copyright (c) 2015-2021, Postgres Professional + * Portions Copyright (c) 2015-2022, Postgres Professional * *------------------------------------------------------------------------- */ @@ -338,7 +338,7 @@ typedef enum ShowFormat #define BYTES_INVALID (-1) /* file didn`t changed since previous backup, DELTA backup do not rely on it */ #define FILE_NOT_FOUND (-2) /* file disappeared during backup */ #define BLOCKNUM_INVALID (-1) -#define PROGRAM_VERSION "2.5.4" +#define PROGRAM_VERSION "2.5.5" /* update when remote agent API or behaviour changes */ #define AGENT_PROTOCOL_VERSION 20501 diff --git a/tests/expected/option_version.out b/tests/expected/option_version.out index a69cee03d..29cd93f45 100644 --- a/tests/expected/option_version.out +++ b/tests/expected/option_version.out @@ -1 +1 @@ -pg_probackup 2.5.4 \ No newline at end of file +pg_probackup 2.5.5 \ No newline at end of file From 0834e54fc37bd841f11717e07291d59ba92e3333 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Thu, 17 Feb 2022 15:37:09 +0300 Subject: [PATCH 249/525] [PGPRO-6051] [DOC] [ci skip] before release last minute documentation changes --- doc/pgprobackup.xml | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/doc/pgprobackup.xml b/doc/pgprobackup.xml index 76333b116..86063b843 100644 --- a/doc/pgprobackup.xml +++ b/doc/pgprobackup.xml @@ -977,6 +977,7 @@ GRANT SELECT ON TABLE pg_catalog.pg_namespace TO backup; GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; GRANT EXECUTE ON FUNCTION bt_index_check(regclass) TO backup; GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool) TO backup; +GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool, bool) TO backup; @@ -4176,7 +4177,7 @@ pg_probackup restore -B backup_dir --instance backup_dir] [--instance instance_name] [-D data_dir] [--help] [-j num_threads] [--progress] -[--skip-block-validation] [--amcheck [--checkunique] [--heapallindexed]] +[--amcheck [--skip-block-validation] [--checkunique] [--heapallindexed]] [connection_options] [logging_options] @@ -4191,7 +4192,7 @@ pg_probackup checkdb Performs logical verification of indexes for the specified PostgreSQL instance if no corruption was found while checking - data files. You must have the amcheck + data files. You must have the amcheck extension or the amcheck_next extension installed in the database to check its indexes. For databases without amcheck, index verification will be skipped. @@ -4211,8 +4212,10 @@ pg_probackup checkdb installed in the database. - This verification is only possible if it is supported by the version of the - amcheck extension you are using. + The verification of unique constraints is only possible if in the version of the + amcheck extension you are using, the + bt_index_check function takes the + checkunique parameter. @@ -4226,9 +4229,10 @@ pg_probackup checkdb flag. - This check is only possible if it is supported by the version of the - amcheck extension you are using or - if the amcheck_next extension is used instead. + This check is only possible if in the version of the + amcheck/amcheck_next extension + you are using, the bt_index_check + function takes the heapallindexed parameter. From 22c808312f67a060cda3bb36e5a032784a5810f9 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Thu, 17 Mar 2022 11:33:18 +0300 Subject: [PATCH 250/525] [ci skip] [packaging] Fix CentOS-8 packaging, fix pgpro-std tests --- packaging/pkg/scripts/rpm.sh | 8 ++++++++ packaging/test/scripts/rpm.sh | 11 ++++++++++- packaging/test/scripts/rpm_forks.sh | 20 +++++++++++++++----- 3 files changed, 33 insertions(+), 6 deletions(-) diff --git a/packaging/pkg/scripts/rpm.sh b/packaging/pkg/scripts/rpm.sh index d03915c20..2fec4a700 100755 --- a/packaging/pkg/scripts/rpm.sh +++ b/packaging/pkg/scripts/rpm.sh @@ -20,7 +20,15 @@ ulimit -n 1024 if [ ${DISTRIB} = 'centos' ] ; then sed -i 's|^baseurl=http://|baseurl=https://|g' /etc/yum.repos.d/*.repo + if [ ${DISTRIB_VERSION} = '8' ]; then + sed -i 's|mirrorlist|#mirrorlist|g' /etc/yum.repos.d/CentOS-*.repo + sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-*.repo + fi yum update -y + if [ ${DISTRIB_VERSION} = '8' ]; then + sed -i 's|mirrorlist|#mirrorlist|g' /etc/yum.repos.d/CentOS-*.repo + sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-*.repo + fi fi # PACKAGES NEEDED diff --git a/packaging/test/scripts/rpm.sh b/packaging/test/scripts/rpm.sh index 92804a7f4..3b6806993 100755 --- a/packaging/test/scripts/rpm.sh +++ b/packaging/test/scripts/rpm.sh @@ -15,7 +15,16 @@ PG_TOG=$(echo $PG_VERSION | sed 's|\.||g') if [ ${DISTRIB} != 'rhel' -o ${DISTRIB_VERSION} != '7' ]; then # update of rpm package is broken in rhel-7 (26/12/2022) - yum update -y + #yum update -y + if [ ${DISTRIB} = 'centos' -a ${DISTRIB_VERSION} = '8' ]; then + sed -i 's|mirrorlist|#mirrorlist|g' /etc/yum.repos.d/CentOS-*.repo + sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-*.repo + fi + yum update -y + if [ ${DISTRIB} = 'centos' -a ${DISTRIB_VERSION} = '8' ]; then + sed -i 's|mirrorlist|#mirrorlist|g' /etc/yum.repos.d/CentOS-*.repo + sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-*.repo + fi fi # yum upgrade -y || echo 'some packages in docker failed to upgrade' # yum install -y sudo diff --git a/packaging/test/scripts/rpm_forks.sh b/packaging/test/scripts/rpm_forks.sh index 0d72040ed..d57711697 100755 --- a/packaging/test/scripts/rpm_forks.sh +++ b/packaging/test/scripts/rpm_forks.sh @@ -15,7 +15,15 @@ PG_TOG=$(echo $PG_VERSION | sed 's|\.||g') if [ ${DISTRIB} != 'rhel' -o ${DISTRIB_VERSION} != '7' ]; then # update of rpm package is broken in rhel-7 (26/12/2022) + if [ ${DISTRIB} = 'centos' -a ${DISTRIB_VERSION} = '8' ]; then + sed -i 's|mirrorlist|#mirrorlist|g' /etc/yum.repos.d/CentOS-*.repo + sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-*.repo + fi yum update -y + if [ ${DISTRIB} = 'centos' -a ${DISTRIB_VERSION} = '8' ]; then + sed -i 's|mirrorlist|#mirrorlist|g' /etc/yum.repos.d/CentOS-*.repo + sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-*.repo + fi fi if [ ${PBK_EDITION} == 'ent' ]; then @@ -80,11 +88,13 @@ if [ $PBK_EDITION == 'std' ] ; then # install POSTGRESQL # rpm -ivh https://download.postgresql.org/pub/repos/yum/reporpms/EL-${DISTRIB_VERSION}-x86_64/pgdg-redhat-repo-latest.noarch.rpm - if [[ ${PG_VERSION} == '11' ]] || [[ ${PG_VERSION} == '12' ]]; then - rpm -ivh https://repo.postgrespro.ru/pgpro-${PG_VERSION}/keys/postgrespro-std-${PG_VERSION}.${DISTRIB}.yum-${PG_VERSION}-0.3.noarch.rpm - else - rpm -ivh https://repo.postgrespro.ru/pgpro-${PG_VERSION}/keys/postgrespro-std-${PG_VERSION}.${DISTRIB}.yum-${PG_VERSION}-0.3.noarch.rpm - fi + #if [[ ${PG_VERSION} == '11' ]] || [[ ${PG_VERSION} == '12' ]]; then + # rpm -ivh https://repo.postgrespro.ru/pgpro-${PG_VERSION}/keys/postgrespro-std-${PG_VERSION}.${DISTRIB}.yum-${PG_VERSION}-0.3.noarch.rpm + #else + # rpm -ivh https://repo.postgrespro.ru/pgpro-${PG_VERSION}/keys/postgrespro-std-${PG_VERSION}.${DISTRIB}.yum-${PG_VERSION}-0.3.noarch.rpm + #fi + curl -o pgpro-repo-add.sh https://repo.postgrespro.ru/pgpro-${PG_VERSION}/keys/pgpro-repo-add.sh + sh pgpro-repo-add.sh if [[ ${PG_VERSION} == '9.6' ]]; then yum install -y postgrespro${PG_TOG}-server.x86_64 From 06994293a221366b969a6f83d5ae339444caf372 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" <16117281+kulaginm@users.noreply.github.com> Date: Mon, 28 Mar 2022 06:30:39 +0300 Subject: [PATCH 251/525] [PGPRO-5387] Vanilla fixed idle replica archiving (#458) See https://www.postgresql.org/message-id/flat/20210901.121225.1339494423357751537.horikyota.ntt%40gmail.com#ba576416b65f28725488861280805e84 So we can revert two workarounds: * Revert "[PGPRO-5378] fix unstable tests.replica.ReplicaTest.test_replica_archive_page_backup" This reverts commit 90b9b5745e19909a6f5f28761def49ef6bfef0e4. * Revert ""fix" unstable backup.BackupTest.test_backup_with_less_privileges_role (disable tests in archive mode from replica)" This reverts commit 5dcd1ce2b817219180005b1b70a231798cd96ec5. --- tests/backup.py | 40 +++++++++++++++++++--------------------- tests/replica.py | 15 ++------------- 2 files changed, 21 insertions(+), 34 deletions(-) diff --git a/tests/backup.py b/tests/backup.py index b14f5fe98..682409015 100644 --- a/tests/backup.py +++ b/tests/backup.py @@ -2351,47 +2351,45 @@ def test_backup_with_less_privileges_role(self): replica.slow_start(replica=True) - # Archive backups from replica in this test are disabled, - # because WAL archiving on replica in idle DB in PostgreSQL is broken: - # replica will not archive the previous WAL until it receives new records in the next WAL file, - # this "lazy" archiving can be seen in src/backend/replication/walreceiver.c:XLogWalRcvWrite() - # (see !XLByteInSeg checking and XLogArchiveNotify() calling). - # # self.switch_wal_segment(node) - #self.backup_node( - # backup_dir, 'replica', replica, - # datname='backupdb', options=['-U', 'backup']) + # self.switch_wal_segment(node) + + self.backup_node( + backup_dir, 'replica', replica, + datname='backupdb', options=['-U', 'backup']) # stream full backup from replica self.backup_node( backup_dir, 'replica', replica, datname='backupdb', options=['--stream', '-U', 'backup']) +# self.switch_wal_segment(node) + # PAGE backup from replica - #self.switch_wal_segment(node) - #self.backup_node( - # backup_dir, 'replica', replica, backup_type='page', - # datname='backupdb', options=['-U', 'backup', '--archive-timeout=30s']) + self.switch_wal_segment(node) + self.backup_node( + backup_dir, 'replica', replica, backup_type='page', + datname='backupdb', options=['-U', 'backup', '--archive-timeout=30s']) self.backup_node( backup_dir, 'replica', replica, backup_type='page', datname='backupdb', options=['--stream', '-U', 'backup']) # DELTA backup from replica - #self.switch_wal_segment(node) - #self.backup_node( - # backup_dir, 'replica', replica, backup_type='delta', - # datname='backupdb', options=['-U', 'backup']) + self.switch_wal_segment(node) + self.backup_node( + backup_dir, 'replica', replica, backup_type='delta', + datname='backupdb', options=['-U', 'backup']) self.backup_node( backup_dir, 'replica', replica, backup_type='delta', datname='backupdb', options=['--stream', '-U', 'backup']) # PTRACK backup from replica if self.ptrack: - #self.switch_wal_segment(node) - #self.backup_node( - # backup_dir, 'replica', replica, backup_type='ptrack', - # datname='backupdb', options=['-U', 'backup']) + self.switch_wal_segment(node) + self.backup_node( + backup_dir, 'replica', replica, backup_type='ptrack', + datname='backupdb', options=['-U', 'backup']) self.backup_node( backup_dir, 'replica', replica, backup_type='ptrack', datname='backupdb', options=['--stream', '-U', 'backup']) diff --git a/tests/replica.py b/tests/replica.py index 8fb89c222..45eed3fb4 100644 --- a/tests/replica.py +++ b/tests/replica.py @@ -291,16 +291,6 @@ def test_replica_archive_page_backup(self): self.wait_until_replica_catch_with_master(master, replica) - master.pgbench_init(scale=5) - # Continuous making some changes on master, - # because WAL archiving on replica in idle DB in PostgreSQL is broken: - # replica will not archive the previous WAL until it receives new records in the next WAL file, - # this "lazy" archiving can be seen in src/backend/replication/walreceiver.c:XLogWalRcvWrite() - # (see !XLByteInSeg checking and XLogArchiveNotify() calling). - pgbench = master.pgbench( - stdout=subprocess.PIPE, stderr=subprocess.STDOUT, - options=['-T', '3', '-c', '1', '--no-vacuum']) - backup_id = self.backup_node( backup_dir, 'replica', replica, options=[ @@ -309,9 +299,6 @@ def test_replica_archive_page_backup(self): '--master-db=postgres', '--master-port={0}'.format(master.port)]) - pgbench.wait() - pgbench.stdout.close() - self.validate_pb(backup_dir, 'replica') self.assertEqual( 'OK', self.show_pb(backup_dir, 'replica', backup_id)['status']) @@ -334,6 +321,8 @@ def test_replica_archive_page_backup(self): # Change data on master, make PAGE backup from replica, # restore taken backup and check that restored data equal # to original data + master.pgbench_init(scale=5) + pgbench = master.pgbench( options=['-T', '30', '-c', '2', '--no-vacuum']) From e101bfda7abd8050a56952341f02fa0bfbb35230 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Tue, 12 Apr 2022 14:48:38 +0300 Subject: [PATCH 252/525] =?UTF-8?q?Fix=20incorrect=20PG=5FPROBACKUP=5FPTRA?= =?UTF-8?q?CK=20definition=20in=20travis=20tests=20(led=20to=20the=20inabi?= =?UTF-8?q?lity=20to=20run=20ptra=D1=81k=20tests)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .travis.yml | 12 ++++++------ travis/make_dockerfile.sh | 2 +- travis/run_tests.sh | 12 ++++++------ 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/.travis.yml b/.travis.yml index 876289e82..663330918 100644 --- a/.travis.yml +++ b/.travis.yml @@ -34,17 +34,17 @@ env: - PG_VERSION=10 PG_BRANCH=REL_10_STABLE - PG_VERSION=9.6 PG_BRANCH=REL9_6_STABLE - PG_VERSION=9.5 PG_BRANCH=REL9_5_STABLE -# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=off MODE=archive +# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=archive # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=backup # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=catchup -# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=off MODE=compression -# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=off MODE=delta -# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=off MODE=locking +# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=compression +# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=delta +# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=locking # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=merge -# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=off MODE=page +# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=page # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=ptrack # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=replica -# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=off MODE=retention +# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=retention # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=restore jobs: diff --git a/travis/make_dockerfile.sh b/travis/make_dockerfile.sh index 119125ced..e780649d9 100755 --- a/travis/make_dockerfile.sh +++ b/travis/make_dockerfile.sh @@ -15,7 +15,7 @@ if [ -z ${MODE+x} ]; then fi if [ -z ${PTRACK_PATCH_PG_BRANCH+x} ]; then - PTRACK_PATCH_PG_BRANCH=off + PTRACK_PATCH_PG_BRANCH=OFF fi if [ -z ${PGPROBACKUP_GDB+x} ]; then diff --git a/travis/run_tests.sh b/travis/run_tests.sh index 44815407e..52b05105b 100755 --- a/travis/run_tests.sh +++ b/travis/run_tests.sh @@ -1,7 +1,7 @@ #!/usr/bin/env bash # -# Copyright (c) 2019-2020, Postgres Professional +# Copyright (c) 2019-2022, Postgres Professional # set -xe @@ -33,18 +33,18 @@ echo "############### Getting Postgres sources:" git clone https://github.com/postgres/postgres.git -b $PG_BRANCH --depth=1 # Clone ptrack -if [ "$PTRACK_PATCH_PG_BRANCH" != "off" ]; then +if [ "$PTRACK_PATCH_PG_BRANCH" != "OFF" ]; then git clone https://github.com/postgrespro/ptrack.git -b master --depth=1 - export PG_PROBACKUP_PTRACK=on + export PG_PROBACKUP_PTRACK=ON else - export PG_PROBACKUP_PTRACK=off + export PG_PROBACKUP_PTRACK=OFF fi # Compile and install Postgres echo "############### Compiling Postgres:" cd postgres # Go to postgres dir -if [ "$PG_PROBACKUP_PTRACK" = "on" ]; then +if [ "$PG_PROBACKUP_PTRACK" = "ON" ]; then git apply -3 ../ptrack/patches/${PTRACK_PATCH_PG_BRANCH}-ptrack-core.diff fi CFLAGS="-O0" ./configure --prefix=$PGHOME --enable-debug --enable-cassert --enable-depend --enable-tap-tests @@ -59,7 +59,7 @@ export PATH=$PGHOME/bin:$PATH export LD_LIBRARY_PATH=$PGHOME/lib export PG_CONFIG=$(which pg_config) -if [ "$PG_PROBACKUP_PTRACK" = "on" ]; then +if [ "$PG_PROBACKUP_PTRACK" = "ON" ]; then echo "############### Compiling Ptrack:" make USE_PGXS=1 -C ../ptrack install fi From bdbc8265d45649e803e4dd9ad733250758e33e19 Mon Sep 17 00:00:00 2001 From: japinli Date: Tue, 19 Apr 2022 19:02:20 +0800 Subject: [PATCH 253/525] Fix comparison unsigned expression --- src/data.c | 2 +- src/delete.c | 15 +++++++-------- 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/src/data.c b/src/data.c index f02e3fd14..ec42813a6 100644 --- a/src/data.c +++ b/src/data.c @@ -2321,7 +2321,7 @@ copy_pages(const char *to_fullpath, const char *from_fullpath, elog(ERROR, "Cannot seek to end of file position in destination file \"%s\": %s", to_fullpath, strerror(errno)); { - size_t pos = ftell(out); + long pos = ftell(out); if (pos < 0) elog(ERROR, "Cannot get position in destination file \"%s\": %s", diff --git a/src/delete.c b/src/delete.c index 6c70ff81e..b86ed43e6 100644 --- a/src/delete.c +++ b/src/delete.c @@ -36,7 +36,7 @@ do_delete(InstanceState *instanceState, time_t backup_id) parray *backup_list, *delete_list; pgBackup *target_backup = NULL; - size_t size_to_delete = 0; + int64 size_to_delete = 0; char size_to_delete_pretty[20]; /* Get complete list of backups */ @@ -682,12 +682,11 @@ do_retention_wal(InstanceState *instanceState, bool dry_run) * at least one backup and no file should be removed. * Unless wal-depth is enabled. */ - if ((tlinfo->closest_backup) && instance_config.wal_depth <= 0) + if ((tlinfo->closest_backup) && instance_config.wal_depth == 0) continue; /* WAL retention keeps this timeline from purge */ - if (instance_config.wal_depth >= 0 && tlinfo->anchor_tli > 0 && - tlinfo->anchor_tli != tlinfo->tli) + if (tlinfo->anchor_tli > 0 && tlinfo->anchor_tli != tlinfo->tli) continue; /* @@ -701,7 +700,7 @@ do_retention_wal(InstanceState *instanceState, bool dry_run) */ if (tlinfo->oldest_backup) { - if (instance_config.wal_depth >= 0 && !(XLogRecPtrIsInvalid(tlinfo->anchor_lsn))) + if (!(XLogRecPtrIsInvalid(tlinfo->anchor_lsn))) { delete_walfiles_in_tli(instanceState, tlinfo->anchor_lsn, tlinfo, instance_config.xlog_seg_size, dry_run); @@ -714,7 +713,7 @@ do_retention_wal(InstanceState *instanceState, bool dry_run) } else { - if (instance_config.wal_depth >= 0 && !(XLogRecPtrIsInvalid(tlinfo->anchor_lsn))) + if (!(XLogRecPtrIsInvalid(tlinfo->anchor_lsn))) delete_walfiles_in_tli(instanceState, tlinfo->anchor_lsn, tlinfo, instance_config.xlog_seg_size, dry_run); else @@ -942,7 +941,7 @@ delete_walfiles_in_tli(InstanceState *instanceState, XLogRecPtr keep_lsn, timeli join_path_components(wal_fullpath, instanceState->instance_wal_subdir_path, wal_file->file.name); /* save segment from purging */ - if (instance_config.wal_depth >= 0 && wal_file->keep) + if (wal_file->keep) { elog(VERBOSE, "Retain WAL segment \"%s\"", wal_fullpath); continue; @@ -1027,7 +1026,7 @@ do_delete_status(InstanceState *instanceState, InstanceConfig *instance_config, parray *backup_list, *delete_list; const char *pretty_status; int n_deleted = 0, n_found = 0; - size_t size_to_delete = 0; + int64 size_to_delete = 0; char size_to_delete_pretty[20]; pgBackup *backup; From 0ae30afe0aa24d970ffd1eb0ca3d3ee6ca32de3d Mon Sep 17 00:00:00 2001 From: japinli Date: Thu, 21 Apr 2022 20:25:28 +0800 Subject: [PATCH 254/525] Fix formattor for ftello --- src/data.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/data.c b/src/data.c index ec42813a6..052e17486 100644 --- a/src/data.c +++ b/src/data.c @@ -2030,10 +2030,10 @@ get_page_header(FILE *in, const char *fullpath, BackupPageHeader* bph, return false; /* EOF found */ else if (read_len != 0 && feof(in)) elog(ERROR, - "Odd size page found at offset %lu of \"%s\"", + "Odd size page found at offset %ld of \"%s\"", ftello(in), fullpath); else - elog(ERROR, "Cannot read header at offset %lu of \"%s\": %s", + elog(ERROR, "Cannot read header at offset %ld of \"%s\": %s", ftello(in), fullpath, strerror(errno)); } From 94fd54ab6367fceb58d5f643d761ea48cf58ea05 Mon Sep 17 00:00:00 2001 From: "d.lepikhova" Date: Fri, 29 Apr 2022 21:15:35 +0500 Subject: [PATCH 255/525] Fix test pgpro560.CheckSystemID.test_pgpro560_control_file_loss. File /global/pg_control doesn't removed permanently --- tests/pgpro560.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/tests/pgpro560.py b/tests/pgpro560.py index 53c7914a2..7e10fef6a 100644 --- a/tests/pgpro560.py +++ b/tests/pgpro560.py @@ -32,15 +32,16 @@ def test_pgpro560_control_file_loss(self): node.slow_start() file = os.path.join(node.base_dir, 'data', 'global', 'pg_control') - os.remove(file) + # Not delete this file permanently + os.rename(file, os.path.join(node.base_dir, 'data', 'global', 'pg_control_copy')) try: self.backup_node(backup_dir, 'node', node, options=['--stream']) # we should die here because exception is what we expect to happen self.assertEqual( - 1, 0, - "Expecting Error because pg_control was deleted.\n " - "Output: {0} \n CMD: {1}".format(repr(self.output), self.cmd)) + 1, 0, + "Expecting Error because pg_control was deleted.\n " + "Output: {0} \n CMD: {1}".format(repr(self.output), self.cmd)) except ProbackupException as e: self.assertTrue( 'ERROR: Could not open file' in e.message and @@ -49,6 +50,8 @@ def test_pgpro560_control_file_loss(self): repr(e.message), self.cmd)) # Clean after yourself + # Return this file to avoid Postger fail + os.rename(os.path.join(node.base_dir, 'data', 'global', 'pg_control_copy'), file) self.del_test_dir(module_name, fname) def test_pgpro560_systemid_mismatch(self): From 141e96a0e6cdaac8b1e41b871254bdb60005a368 Mon Sep 17 00:00:00 2001 From: Elena Indrupskaya Date: Mon, 23 May 2022 15:07:27 +0300 Subject: [PATCH 256/525] [DOC] [PBCKP-128] [skip travis] Describe catchup dry-run flag --- doc/pgprobackup.xml | 36 +++++++++++++++++++++++------------- 1 file changed, 23 insertions(+), 13 deletions(-) diff --git a/doc/pgprobackup.xml b/doc/pgprobackup.xml index 86063b843..cb615fb17 100644 --- a/doc/pgprobackup.xml +++ b/doc/pgprobackup.xml @@ -3563,6 +3563,14 @@ pg_probackup catchup -b catchup_mode --source-pgdata= of threads with the option: pg_probackup catchup -b catchup_mode --source-pgdata=path_to_pgdata_on_remote_server --destination-pgdata=path_to_local_dir --stream --threads=num_threads + + + + Before cloning/synchronising a PostgreSQL instance, you can run the + catchup command with the flag + to estimate the size of data files to be transferred, but make no changes on disk: + +pg_probackup catchup -b catchup_mode --source-pgdata=path_to_pgdata_on_remote_server --destination-pgdata=path_to_local_dir --stream --dry-run @@ -3576,7 +3584,7 @@ pg_probackup catchup --source-pgdata=/master-pgdata --destination-pgdata=/replic Another example shows how you can add a new remote standby server with the PostgreSQL data directory /replica-pgdata by running the catchup command in the FULL mode on four parallel threads: - + pg_probackup catchup --source-pgdata=/master-pgdata --destination-pgdata=/replica-pgdata -p 5432 -d postgres -U remote-postgres-user --stream --backup-mode=FULL --remote-host=remote-hostname --remote-user=remote-unix-username -j 4 @@ -4482,7 +4490,7 @@ pg_probackup archive-get -B backup_dir --instance catchup_mode --source-pgdata=path_to_pgdata_on_remote_server --destination-pgdata=path_to_local_dir -[--help] [-j | --threads=num_threads] [--stream] +[--help] [-j | --threads=num_threads] [--stream] [--dry-run] [--temp-slot] [-P | --perm-slot] [-S | --slot=slot_name] [--exclude-path=PATHNAME] [-T OLDDIR=NEWDIR] @@ -4571,6 +4579,19 @@ pg_probackup catchup -b catchup_mode + + + + + Displays the total size of the files to be transferred by catchup. + This flag initiates a trial run of catchup, which does + not actually create, delete or move files on disk. WAL streaming is skipped with . + This flag also allows you to check that + all the options are correct and cloning/synchronising is ready to run. + + + + =path_prefix =path_prefix @@ -4591,17 +4612,6 @@ pg_probackup catchup -b catchup_mode - - - - - Copies the instance in STREAM WAL delivery mode, - including all the necessary WAL files by streaming them from - the instance server via replication protocol. - - - - From 4b2df86d6961937e062c54bb7fd5a4cdf96c1f58 Mon Sep 17 00:00:00 2001 From: Vyacheslav Makarov <50846161+MakSl@users.noreply.github.com> Date: Mon, 23 May 2022 20:13:18 +0300 Subject: [PATCH 257/525] PBCKP-97: added localization of messages * PBCKP-97: Adding localization files Added localization of messages. Fixed some bugs. Added the --enable-nls tag for tests. Added a test to check the localization of messages. Co-authored-by: Vyacheslav Makarov --- .travis.yml | 1 + README.md | 14 + nls.mk | 6 + po/ru.po | 1880 +++++++++++++++++++++++++++++ src/help.c | 4 +- src/pg_probackup.c | 1 + tests/Readme.md | 2 +- tests/expected/option_help.out | 2 +- tests/expected/option_help_ru.out | 184 +++ tests/option.py | 11 + travis/run_tests.sh | 2 +- 11 files changed, 2102 insertions(+), 5 deletions(-) create mode 100644 nls.mk create mode 100644 po/ru.po create mode 100644 tests/expected/option_help_ru.out diff --git a/.travis.yml b/.travis.yml index 663330918..8e325c64f 100644 --- a/.travis.yml +++ b/.travis.yml @@ -41,6 +41,7 @@ env: # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=delta # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=locking # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=merge +# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=option # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=page # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=ptrack # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=replica diff --git a/README.md b/README.md index 060883a28..5da8d199e 100644 --- a/README.md +++ b/README.md @@ -224,3 +224,17 @@ Postgres Professional, Moscow, Russia. ## Credits `pg_probackup` utility is based on `pg_arman`, that was originally written by NTT and then developed and maintained by Michael Paquier. + + +### Localization files (*.po) + +Description of how to add new translation languages. +1. Add a flag --enable-nls in configure. +2. Build postgres. +3. Adding to nls.mk in folder pg_probackup required files in GETTEXT_FILES. +4. In folder pg_probackup do 'make update-po'. +5. As a result, the progname.pot file will be created. Copy the content and add it to the file with the desired language. +6. Adding to nls.mk in folder pg_probackup required language in AVAIL_LANGUAGES. + +For more information, follow the link below: +https://postgrespro.ru/docs/postgresql/12/nls-translator diff --git a/nls.mk b/nls.mk new file mode 100644 index 000000000..981c1c4fe --- /dev/null +++ b/nls.mk @@ -0,0 +1,6 @@ +# contrib/pg_probackup/nls.mk +CATALOG_NAME = pg_probackup +AVAIL_LANGUAGES = ru +GETTEXT_FILES = src/help.c +GETTEXT_TRIGGERS = $(FRONTEND_COMMON_GETTEXT_TRIGGERS) +GETTEXT_FLAGS = $(FRONTEND_COMMON_GETTEXT_FLAGS) diff --git a/po/ru.po b/po/ru.po new file mode 100644 index 000000000..1263675c2 --- /dev/null +++ b/po/ru.po @@ -0,0 +1,1880 @@ +# Russian message translation file for pg_probackup +# Copyright (C) 2022 PostgreSQL Global Development Group +# This file is distributed under the same license as the pg_probackup (PostgreSQL) package. +# Vyacheslav Makarov , 2022. +msgid "" +msgstr "" +"Project-Id-Version: pg_probackup (PostgreSQL)\n" +"Report-Msgid-Bugs-To: bugs@postgrespro.ru\n" +"POT-Creation-Date: 2022-04-08 11:33+0300\n" +"PO-Revision-Date: 2022-MO-DA HO:MI+ZONE\n" +"Last-Translator: Vyacheslav Makarov \n" +"Language-Team: Russian \n" +"Language: ru\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n" +"%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);\n" + +#: src/help.c:84 +#, c-format +msgid "" +"\n" +"%s - utility to manage backup/recovery of PostgreSQL database.\n" +msgstr "" +"\n" +"%s - утилита для управления резервным копированием/восстановлением базы данных PostgreSQL.\n" + +#: src/help.c:86 +#, c-format +msgid "" +"\n" +" %s help [COMMAND]\n" +msgstr "" + +#: src/help.c:88 +#, c-format +msgid "" +"\n" +" %s version\n" +msgstr "" + +#: src/help.c:90 +#, c-format +msgid "" +"\n" +" %s init -B backup-path\n" +msgstr "" + +#: src/help.c:92 +#, c-format +msgid "" +"\n" +" %s set-config -B backup-path --instance=instance_name\n" +msgstr "" + +#: src/help.c:93 src/help.c:791 +#, c-format +msgid " [-D pgdata-path]\n" +msgstr "" + +#: src/help.c:94 src/help.c:130 src/help.c:218 +#, c-format +msgid " [--external-dirs=external-directories-paths]\n" +msgstr "" + +#: src/help.c:95 src/help.c:132 src/help.c:305 src/help.c:731 src/help.c:794 +#, c-format +msgid " [--log-level-console=log-level-console]\n" +msgstr "" + +#: src/help.c:96 src/help.c:133 src/help.c:306 src/help.c:732 src/help.c:795 +#, c-format +msgid " [--log-level-file=log-level-file]\n" +msgstr "" + +#: src/help.c:97 src/help.c:134 src/help.c:307 src/help.c:733 src/help.c:796 +#, c-format +msgid " [--log-filename=log-filename]\n" +msgstr "" + +#: src/help.c:98 src/help.c:135 src/help.c:308 src/help.c:734 src/help.c:797 +#, c-format +msgid " [--error-log-filename=error-log-filename]\n" +msgstr "" + +#: src/help.c:99 src/help.c:136 src/help.c:309 src/help.c:735 src/help.c:798 +#, c-format +msgid " [--log-directory=log-directory]\n" +msgstr "" + +#: src/help.c:100 src/help.c:137 src/help.c:310 src/help.c:736 src/help.c:799 +#, c-format +msgid " [--log-rotation-size=log-rotation-size]\n" +msgstr "" + +#: src/help.c:101 src/help.c:800 +#, c-format +msgid " [--log-rotation-age=log-rotation-age]\n" +msgstr "" + +#: src/help.c:102 src/help.c:140 src/help.c:203 src/help.c:313 src/help.c:674 +#: src/help.c:801 +#, c-format +msgid " [--retention-redundancy=retention-redundancy]\n" +msgstr "" + +#: src/help.c:103 src/help.c:141 src/help.c:204 src/help.c:314 src/help.c:675 +#: src/help.c:802 +#, c-format +msgid " [--retention-window=retention-window]\n" +msgstr "" + +#: src/help.c:104 src/help.c:142 src/help.c:205 src/help.c:315 src/help.c:676 +#: src/help.c:803 +#, c-format +msgid " [--wal-depth=wal-depth]\n" +msgstr "" + +#: src/help.c:105 src/help.c:144 src/help.c:235 src/help.c:317 src/help.c:804 +#: src/help.c:948 +#, c-format +msgid " [--compress-algorithm=compress-algorithm]\n" +msgstr "" + +#: src/help.c:106 src/help.c:145 src/help.c:236 src/help.c:318 src/help.c:805 +#: src/help.c:949 +#, c-format +msgid " [--compress-level=compress-level]\n" +msgstr "" + +#: src/help.c:107 src/help.c:232 src/help.c:806 src/help.c:945 +#, c-format +msgid " [--archive-timeout=timeout]\n" +msgstr "" + +#: src/help.c:108 src/help.c:147 src/help.c:259 src/help.c:320 src/help.c:807 +#: src/help.c:1045 +#, c-format +msgid " [-d dbname] [-h host] [-p port] [-U username]\n" +msgstr "" + +#: src/help.c:109 src/help.c:149 src/help.c:174 src/help.c:219 src/help.c:237 +#: src/help.c:247 src/help.c:261 src/help.c:322 src/help.c:449 src/help.c:808 +#: src/help.c:906 src/help.c:950 src/help.c:994 src/help.c:1047 +#, c-format +msgid " [--remote-proto] [--remote-host]\n" +msgstr "" + +#: src/help.c:110 src/help.c:150 src/help.c:175 src/help.c:220 src/help.c:238 +#: src/help.c:248 src/help.c:262 src/help.c:323 src/help.c:450 src/help.c:809 +#: src/help.c:907 src/help.c:951 src/help.c:995 src/help.c:1048 +#, c-format +msgid " [--remote-port] [--remote-path] [--remote-user]\n" +msgstr "" + +#: src/help.c:111 src/help.c:151 src/help.c:176 src/help.c:221 src/help.c:239 +#: src/help.c:249 src/help.c:263 src/help.c:324 src/help.c:451 src/help.c:1049 +#, c-format +msgid " [--ssh-options]\n" +msgstr "" + +#: src/help.c:112 +#, c-format +msgid " [--restore-command=cmdline] [--archive-host=destination]\n" +msgstr "" + +#: src/help.c:113 src/help.c:178 +#, c-format +msgid " [--archive-port=port] [--archive-user=username]\n" +msgstr "" + +#: src/help.c:114 src/help.c:119 src/help.c:123 src/help.c:153 src/help.c:179 +#: src/help.c:188 src/help.c:194 src/help.c:209 src/help.c:214 src/help.c:222 +#: src/help.c:226 src/help.c:240 src/help.c:250 src/help.c:264 +#, c-format +msgid " [--help]\n" +msgstr "" + +#: src/help.c:116 +#, c-format +msgid "" +"\n" +" %s set-backup -B backup-path --instance=instance_name\n" +msgstr "" + +#: src/help.c:117 +#, c-format +msgid " -i backup-id [--ttl=interval] [--expire-time=timestamp]\n" +msgstr "" + +#: src/help.c:118 +#, c-format +msgid " [--note=text]\n" +msgstr "" + +#: src/help.c:121 +#, c-format +msgid "" +"\n" +" %s show-config -B backup-path --instance=instance_name\n" +msgstr "" + +#: src/help.c:122 +#, c-format +msgid " [--format=format]\n" +msgstr "" + +#: src/help.c:125 +#, c-format +msgid "" +"\n" +" %s backup -B backup-path -b backup-mode --instance=instance_name\n" +msgstr "" + +#: src/help.c:126 src/help.c:299 +#, c-format +msgid " [-D pgdata-path] [-C]\n" +msgstr "" + +#: src/help.c:127 src/help.c:300 +#, c-format +msgid " [--stream [-S slot-name] [--temp-slot]]\n" +msgstr "" + +#: src/help.c:128 src/help.c:301 +#, c-format +msgid " [--backup-pg-log] [-j num-threads] [--progress]\n" +msgstr "" + +#: src/help.c:129 src/help.c:168 src/help.c:302 src/help.c:433 +#, c-format +msgid " [--no-validate] [--skip-block-validation]\n" +msgstr "" + +#: src/help.c:131 src/help.c:304 +#, c-format +msgid " [--no-sync]\n" +msgstr "" + +#: src/help.c:138 src/help.c:311 +#, c-format +msgid " [--log-rotation-age=log-rotation-age] [--no-color]\n" +msgstr "" + +#: src/help.c:139 src/help.c:312 +#, c-format +msgid " [--delete-expired] [--delete-wal] [--merge-expired]\n" +msgstr "" + +#: src/help.c:143 src/help.c:316 +#, c-format +msgid " [--compress]\n" +msgstr "" + +#: src/help.c:146 src/help.c:319 +#, c-format +msgid " [--archive-timeout=archive-timeout]\n" +msgstr "" + +#: src/help.c:148 src/help.c:260 src/help.c:321 src/help.c:1046 +#, c-format +msgid " [-w --no-password] [-W --password]\n" +msgstr "" + +#: src/help.c:152 +#, c-format +msgid " [--ttl=interval] [--expire-time=timestamp] [--note=text]\n" +msgstr "" + +#: src/help.c:156 +#, c-format +msgid "" +"\n" +" %s restore -B backup-path --instance=instance_name\n" +msgstr "" + +#: src/help.c:157 src/help.c:431 +#, c-format +msgid " [-D pgdata-path] [-i backup-id] [-j num-threads]\n" +msgstr "" + +#: src/help.c:158 src/help.c:183 src/help.c:439 src/help.c:552 +#, c-format +msgid " [--recovery-target-time=time|--recovery-target-xid=xid\n" +msgstr "" + +#: src/help.c:159 src/help.c:184 src/help.c:440 src/help.c:553 +#, c-format +msgid " |--recovery-target-lsn=lsn [--recovery-target-inclusive=boolean]]\n" +msgstr "" + +#: src/help.c:160 src/help.c:185 src/help.c:441 src/help.c:554 +#, c-format +msgid " [--recovery-target-timeline=timeline]\n" +msgstr "" + +#: src/help.c:161 src/help.c:442 +#, c-format +msgid " [--recovery-target=immediate|latest]\n" +msgstr "" + +#: src/help.c:162 src/help.c:186 src/help.c:443 src/help.c:555 +#, c-format +msgid " [--recovery-target-name=target-name]\n" +msgstr "" + +#: src/help.c:163 src/help.c:444 +#, c-format +msgid " [--recovery-target-action=pause|promote|shutdown]\n" +msgstr "" + +#: src/help.c:164 src/help.c:445 src/help.c:793 +#, c-format +msgid " [--restore-command=cmdline]\n" +msgstr "" + +#: src/help.c:165 +#, c-format +msgid " [-R | --restore-as-replica] [--force]\n" +msgstr "" + +#: src/help.c:166 src/help.c:447 +#, c-format +msgid " [--primary-conninfo=primary_conninfo]\n" +msgstr "" + +#: src/help.c:167 src/help.c:448 +#, c-format +msgid " [-S | --primary-slot-name=slotname]\n" +msgstr "" + +#: src/help.c:169 +#, c-format +msgid " [-T OLDDIR=NEWDIR] [--progress]\n" +msgstr "" + +#: src/help.c:170 src/help.c:435 +#, c-format +msgid " [--external-mapping=OLDDIR=NEWDIR]\n" +msgstr "" + +#: src/help.c:171 +#, c-format +msgid " [--skip-external-dirs] [--no-sync]\n" +msgstr "" + +#: src/help.c:172 src/help.c:437 +#, c-format +msgid " [-I | --incremental-mode=none|checksum|lsn]\n" +msgstr "" + +#: src/help.c:173 +#, c-format +msgid " [--db-include | --db-exclude]\n" +msgstr "" + +#: src/help.c:177 +#, c-format +msgid " [--archive-host=hostname]\n" +msgstr "" + +#: src/help.c:181 +#, c-format +msgid "" +"\n" +" %s validate -B backup-path [--instance=instance_name]\n" +msgstr "" + +#: src/help.c:182 src/help.c:551 +#, c-format +msgid " [-i backup-id] [--progress] [-j num-threads]\n" +msgstr "" + +#: src/help.c:187 +#, c-format +msgid " [--skip-block-validation]\n" +msgstr "" + +#: src/help.c:190 +#, c-format +msgid "" +"\n" +" %s checkdb [-B backup-path] [--instance=instance_name]\n" +msgstr "" + +#: src/help.c:191 +#, c-format +msgid " [-D pgdata-path] [--progress] [-j num-threads]\n" +msgstr "" + +#: src/help.c:192 src/help.c:603 +#, c-format +msgid " [--amcheck] [--skip-block-validation]\n" +msgstr "" + +#: src/help.c:193 +#, c-format +msgid " [--heapallindexed] [--checkunique]\n" +msgstr "" + +#: src/help.c:196 +#, c-format +msgid "" +"\n" +" %s show -B backup-path\n" +msgstr "" + +#: src/help.c:197 src/help.c:657 +#, c-format +msgid " [--instance=instance_name [-i backup-id]]\n" +msgstr "" + +#: src/help.c:198 +#, c-format +msgid " [--format=format] [--archive]\n" +msgstr "" + +#: src/help.c:199 +#, c-format +msgid " [--no-color] [--help]\n" +msgstr "" + +#: src/help.c:201 +#, c-format +msgid "" +"\n" +" %s delete -B backup-path --instance=instance_name\n" +msgstr "" + +#: src/help.c:202 src/help.c:673 +#, c-format +msgid " [-j num-threads] [--progress]\n" +msgstr "" + +#: src/help.c:206 +#, c-format +msgid " [-i backup-id | --delete-expired | --merge-expired | --status=backup_status]\n" +msgstr "" + +#: src/help.c:207 +#, c-format +msgid " [--delete-wal]\n" +msgstr "" + +#: src/help.c:208 +#, c-format +msgid " [--dry-run] [--no-validate] [--no-sync]\n" +msgstr "" + +#: src/help.c:211 +#, c-format +msgid "" +"\n" +" %s merge -B backup-path --instance=instance_name\n" +msgstr "" + +#: src/help.c:212 +#, c-format +msgid " -i backup-id [--progress] [-j num-threads]\n" +msgstr "" + +#: src/help.c:213 src/help.c:730 +#, c-format +msgid " [--no-validate] [--no-sync]\n" +msgstr "" + +#: src/help.c:216 +#, c-format +msgid "" +"\n" +" %s add-instance -B backup-path -D pgdata-path\n" +msgstr "" + +#: src/help.c:217 src/help.c:225 src/help.c:904 +#, c-format +msgid " --instance=instance_name\n" +msgstr "" + +#: src/help.c:224 +#, c-format +msgid "" +"\n" +" %s del-instance -B backup-path\n" +msgstr "" + +#: src/help.c:228 +#, c-format +msgid "" +"\n" +" %s archive-push -B backup-path --instance=instance_name\n" +msgstr "" + +#: src/help.c:229 src/help.c:244 src/help.c:942 src/help.c:990 +#, c-format +msgid " --wal-file-name=wal-file-name\n" +msgstr "" + +#: src/help.c:230 src/help.c:943 src/help.c:991 +#, c-format +msgid " [--wal-file-path=wal-file-path]\n" +msgstr "" + +#: src/help.c:231 src/help.c:245 src/help.c:944 src/help.c:992 +#, c-format +msgid " [-j num-threads] [--batch-size=batch_size]\n" +msgstr "" + +#: src/help.c:233 src/help.c:946 +#, c-format +msgid " [--no-ready-rename] [--no-sync]\n" +msgstr "" + +#: src/help.c:234 src/help.c:947 +#, c-format +msgid " [--overwrite] [--compress]\n" +msgstr "" + +#: src/help.c:242 +#, c-format +msgid "" +"\n" +" %s archive-get -B backup-path --instance=instance_name\n" +msgstr "" + +#: src/help.c:243 +#, c-format +msgid " --wal-file-path=wal-file-path\n" +msgstr "" + +#: src/help.c:246 src/help.c:993 +#, c-format +msgid " [--no-validate-wal]\n" +msgstr "" + +#: src/help.c:252 +#, c-format +msgid "" +"\n" +" %s catchup -b catchup-mode\n" +msgstr "" + +#: src/help.c:253 src/help.c:1039 +#, c-format +msgid " --source-pgdata=path_to_pgdata_on_remote_server\n" +msgstr "" + +#: src/help.c:254 src/help.c:1040 +#, c-format +msgid " --destination-pgdata=path_to_local_dir\n" +msgstr "" + +#: src/help.c:255 +#, c-format +msgid " [--stream [-S slot-name] [--temp-slot | --perm-slot]]\n" +msgstr "" + +#: src/help.c:256 src/help.c:1042 +#, c-format +msgid " [-j num-threads]\n" +msgstr "" + +#: src/help.c:257 src/help.c:434 src/help.c:1043 +#, c-format +msgid " [-T OLDDIR=NEWDIR]\n" +msgstr "" + +#: src/help.c:258 src/help.c:1044 +#, c-format +msgid " [--exclude-path=path_prefix]\n" +msgstr "" + +#: src/help.c:270 +#, c-format +msgid "Read the website for details <%s>.\n" +msgstr "Подробнее читайте на сайте <%s>.\n" + +#: src/help.c:272 +#, c-format +msgid "Report bugs to <%s>.\n" +msgstr "Сообщайте об ошибках в <%s>.\n" + +#: src/help.c:279 +#, c-format +msgid "" +"\n" +"Unknown command. Try pg_probackup help\n" +"\n" +msgstr "" +"\n" +"Неизвестная команда. Попробуйте pg_probackup help\n" +"\n" + +#: src/help.c:285 +#, c-format +msgid "" +"\n" +"This command is intended for internal use\n" +"\n" +msgstr "" + +#: src/help.c:291 +#, c-format +msgid "" +"\n" +"%s init -B backup-path\n" +"\n" +msgstr "" + +#: src/help.c:292 +#, c-format +msgid "" +" -B, --backup-path=backup-path location of the backup storage area\n" +"\n" +msgstr "" + +#: src/help.c:298 +#, c-format +msgid "" +"\n" +"%s backup -B backup-path -b backup-mode --instance=instance_name\n" +msgstr "" + +#: src/help.c:303 src/help.c:792 +#, c-format +msgid " [-E external-directories-paths]\n" +msgstr "" + +#: src/help.c:325 +#, c-format +msgid "" +" [--ttl=interval] [--expire-time=timestamp] [--note=text]\n" +"\n" +msgstr "" + +#: src/help.c:327 src/help.c:455 src/help.c:558 src/help.c:606 src/help.c:660 +#: src/help.c:679 src/help.c:739 src/help.c:812 src/help.c:895 src/help.c:910 +#: src/help.c:934 src/help.c:954 src/help.c:998 +#, c-format +msgid " -B, --backup-path=backup-path location of the backup storage area\n" +msgstr "" + +#: src/help.c:328 +#, c-format +msgid " -b, --backup-mode=backup-mode backup mode=FULL|PAGE|DELTA|PTRACK\n" +msgstr "" + +#: src/help.c:329 src/help.c:456 src/help.c:559 src/help.c:607 src/help.c:680 +#: src/help.c:740 src/help.c:813 src/help.c:896 +#, c-format +msgid " --instance=instance_name name of the instance\n" +msgstr "" + +#: src/help.c:330 src/help.c:458 src/help.c:608 src/help.c:814 src/help.c:911 +#, c-format +msgid " -D, --pgdata=pgdata-path location of the database storage area\n" +msgstr "" + +#: src/help.c:331 +#, c-format +msgid " -C, --smooth-checkpoint do smooth checkpoint before backup\n" +msgstr "" + +#: src/help.c:332 +#, c-format +msgid " --stream stream the transaction log and include it in the backup\n" +msgstr "" + +#: src/help.c:333 src/help.c:1054 +#, c-format +msgid " -S, --slot=SLOTNAME replication slot to use\n" +msgstr "" + +#: src/help.c:334 src/help.c:1055 +#, c-format +msgid " --temp-slot use temporary replication slot\n" +msgstr "" + +#: src/help.c:335 +#, c-format +msgid " --backup-pg-log backup of '%s' directory\n" +msgstr "" + +#: src/help.c:336 src/help.c:460 src/help.c:563 src/help.c:611 src/help.c:682 +#: src/help.c:743 src/help.c:960 src/help.c:1004 src/help.c:1058 +#, c-format +msgid " -j, --threads=NUM number of parallel threads\n" +msgstr "" + +#: src/help.c:337 src/help.c:462 src/help.c:562 src/help.c:610 src/help.c:683 +#: src/help.c:744 +#, c-format +msgid " --progress show progress\n" +msgstr "" + +#: src/help.c:338 +#, c-format +msgid " --no-validate disable validation after backup\n" +msgstr "" + +#: src/help.c:339 src/help.c:466 src/help.c:573 +#, c-format +msgid " --skip-block-validation set to validate only file-level checksum\n" +msgstr "" + +#: src/help.c:340 src/help.c:815 src/help.c:914 +#, c-format +msgid " -E --external-dirs=external-directories-paths\n" +msgstr "" + +#: src/help.c:341 src/help.c:816 src/help.c:915 +#, c-format +msgid " backup some directories not from pgdata \n" +msgstr "" + +#: src/help.c:342 src/help.c:817 src/help.c:916 +#, c-format +msgid " (example: --external-dirs=/tmp/dir1:/tmp/dir2)\n" +msgstr "" + +#: src/help.c:343 +#, c-format +msgid " --no-sync do not sync backed up files to disk\n" +msgstr "" + +#: src/help.c:344 +#, c-format +msgid " --note=text add note to backup\n" +msgstr "" + +#: src/help.c:345 src/help.c:784 +#, c-format +msgid " (example: --note='backup before app update to v13.1')\n" +msgstr "" + +#: src/help.c:347 src/help.c:508 src/help.c:575 src/help.c:622 src/help.c:702 +#: src/help.c:748 src/help.c:820 +#, c-format +msgid "" +"\n" +" Logging options:\n" +msgstr "" + +#: src/help.c:348 src/help.c:509 src/help.c:576 src/help.c:623 src/help.c:703 +#: src/help.c:749 src/help.c:821 +#, c-format +msgid " --log-level-console=log-level-console\n" +msgstr "" + +#: src/help.c:349 src/help.c:510 src/help.c:577 src/help.c:624 src/help.c:704 +#: src/help.c:750 src/help.c:822 +#, c-format +msgid " level for console logging (default: info)\n" +msgstr "" + +#: src/help.c:350 src/help.c:353 src/help.c:511 src/help.c:514 src/help.c:578 +#: src/help.c:581 src/help.c:625 src/help.c:628 src/help.c:705 src/help.c:708 +#: src/help.c:751 src/help.c:754 src/help.c:823 src/help.c:826 +#, c-format +msgid " available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n" +msgstr "" + +#: src/help.c:351 src/help.c:512 src/help.c:579 src/help.c:626 src/help.c:706 +#: src/help.c:752 src/help.c:824 +#, c-format +msgid " --log-level-file=log-level-file\n" +msgstr "" + +#: src/help.c:352 src/help.c:513 src/help.c:580 src/help.c:627 src/help.c:707 +#: src/help.c:753 src/help.c:825 +#, c-format +msgid " level for file logging (default: off)\n" +msgstr "" + +#: src/help.c:354 src/help.c:515 src/help.c:582 src/help.c:629 src/help.c:709 +#: src/help.c:755 src/help.c:827 +#, c-format +msgid " --log-filename=log-filename\n" +msgstr "" + +#: src/help.c:355 src/help.c:516 src/help.c:583 src/help.c:630 src/help.c:710 +#: src/help.c:756 src/help.c:828 +#, c-format +msgid " filename for file logging (default: 'pg_probackup.log')\n" +msgstr "" + +#: src/help.c:356 src/help.c:517 src/help.c:584 src/help.c:711 src/help.c:757 +#: src/help.c:829 +#, c-format +msgid " support strftime format (example: pg_probackup-%%Y-%%m-%%d_%%H%%M%%S.log)\n" +msgstr "" + +#: src/help.c:357 src/help.c:518 src/help.c:585 src/help.c:632 src/help.c:712 +#: src/help.c:758 src/help.c:830 +#, c-format +msgid " --error-log-filename=error-log-filename\n" +msgstr "" + +#: src/help.c:358 src/help.c:519 src/help.c:586 src/help.c:633 src/help.c:713 +#: src/help.c:759 src/help.c:831 +#, c-format +msgid " filename for error logging (default: none)\n" +msgstr "" + +#: src/help.c:359 src/help.c:520 src/help.c:587 src/help.c:634 src/help.c:714 +#: src/help.c:760 src/help.c:832 +#, c-format +msgid " --log-directory=log-directory\n" +msgstr "" + +#: src/help.c:360 src/help.c:521 src/help.c:588 src/help.c:635 src/help.c:715 +#: src/help.c:761 src/help.c:833 +#, c-format +msgid " directory for file logging (default: BACKUP_PATH/log)\n" +msgstr "" + +#: src/help.c:361 src/help.c:522 src/help.c:589 src/help.c:636 src/help.c:716 +#: src/help.c:762 src/help.c:834 +#, c-format +msgid " --log-rotation-size=log-rotation-size\n" +msgstr "" + +#: src/help.c:362 src/help.c:523 src/help.c:590 src/help.c:637 src/help.c:717 +#: src/help.c:763 src/help.c:835 +#, c-format +msgid " rotate logfile if its size exceeds this value; 0 disables; (default: 0)\n" +msgstr "" + +#: src/help.c:363 src/help.c:524 src/help.c:591 src/help.c:638 src/help.c:718 +#: src/help.c:764 src/help.c:836 +#, c-format +msgid " available units: 'kB', 'MB', 'GB', 'TB' (default: kB)\n" +msgstr "" + +#: src/help.c:364 src/help.c:525 src/help.c:592 src/help.c:639 src/help.c:719 +#: src/help.c:765 src/help.c:837 +#, c-format +msgid " --log-rotation-age=log-rotation-age\n" +msgstr "" + +#: src/help.c:365 src/help.c:526 src/help.c:593 src/help.c:640 src/help.c:720 +#: src/help.c:766 src/help.c:838 +#, c-format +msgid " rotate logfile if its age exceeds this value; 0 disables; (default: 0)\n" +msgstr "" + +#: src/help.c:366 src/help.c:527 src/help.c:594 src/help.c:641 src/help.c:721 +#: src/help.c:767 src/help.c:839 +#, c-format +msgid " available units: 'ms', 's', 'min', 'h', 'd' (default: min)\n" +msgstr "" + +#: src/help.c:367 src/help.c:528 src/help.c:642 +#, c-format +msgid " --no-color disable the coloring of error and warning console messages\n" +msgstr "" + +#: src/help.c:369 src/help.c:687 src/help.c:841 +#, c-format +msgid "" +"\n" +" Retention options:\n" +msgstr "" + +#: src/help.c:370 src/help.c:688 +#, c-format +msgid " --delete-expired delete backups expired according to current\n" +msgstr "" + +#: src/help.c:371 src/help.c:373 +#, c-format +msgid " retention policy after successful backup completion\n" +msgstr "" + +#: src/help.c:372 src/help.c:690 +#, c-format +msgid " --merge-expired merge backups expired according to current\n" +msgstr "" + +#: src/help.c:374 src/help.c:692 +#, c-format +msgid " --delete-wal remove redundant files in WAL archive\n" +msgstr "" + +#: src/help.c:375 src/help.c:693 src/help.c:842 +#, c-format +msgid " --retention-redundancy=retention-redundancy\n" +msgstr "" + +#: src/help.c:376 src/help.c:694 src/help.c:843 +#, c-format +msgid " number of full backups to keep; 0 disables; (default: 0)\n" +msgstr "" + +#: src/help.c:377 src/help.c:695 src/help.c:844 +#, c-format +msgid " --retention-window=retention-window\n" +msgstr "" + +#: src/help.c:378 src/help.c:696 src/help.c:845 +#, c-format +msgid " number of days of recoverability; 0 disables; (default: 0)\n" +msgstr "" + +#: src/help.c:379 src/help.c:697 +#, c-format +msgid " --wal-depth=wal-depth number of latest valid backups per timeline that must\n" +msgstr "" + +#: src/help.c:380 src/help.c:698 +#, c-format +msgid " retain the ability to perform PITR; 0 disables; (default: 0)\n" +msgstr "" + +#: src/help.c:381 src/help.c:699 +#, c-format +msgid " --dry-run perform a trial run without any changes\n" +msgstr "" + +#: src/help.c:383 +#, c-format +msgid "" +"\n" +" Pinning options:\n" +msgstr "" + +#: src/help.c:384 src/help.c:778 +#, c-format +msgid " --ttl=interval pin backup for specified amount of time; 0 unpin\n" +msgstr "" + +#: src/help.c:385 src/help.c:779 +#, c-format +msgid " available units: 'ms', 's', 'min', 'h', 'd' (default: s)\n" +msgstr "" + +#: src/help.c:386 src/help.c:780 +#, c-format +msgid " (example: --ttl=20d)\n" +msgstr "" + +#: src/help.c:387 src/help.c:781 +#, c-format +msgid " --expire-time=time pin backup until specified time stamp\n" +msgstr "" + +#: src/help.c:388 src/help.c:782 +#, c-format +msgid " (example: --expire-time='2024-01-01 00:00:00+03')\n" +msgstr "" + +#: src/help.c:390 src/help.c:849 src/help.c:967 +#, c-format +msgid "" +"\n" +" Compression options:\n" +msgstr "" + +#: src/help.c:391 src/help.c:850 src/help.c:968 +#, c-format +msgid " --compress alias for --compress-algorithm='zlib' and --compress-level=1\n" +msgstr "" + +#: src/help.c:392 src/help.c:851 src/help.c:969 +#, c-format +msgid " --compress-algorithm=compress-algorithm\n" +msgstr "" + +#: src/help.c:393 +#, c-format +msgid " available options: 'zlib', 'pglz', 'none' (default: none)\n" +msgstr "" + +#: src/help.c:394 src/help.c:853 src/help.c:971 +#, c-format +msgid " --compress-level=compress-level\n" +msgstr "" + +#: src/help.c:395 src/help.c:854 src/help.c:972 +#, c-format +msgid " level of compression [0-9] (default: 1)\n" +msgstr "" + +#: src/help.c:397 src/help.c:856 +#, c-format +msgid "" +"\n" +" Archive options:\n" +msgstr "" + +#: src/help.c:398 src/help.c:857 +#, c-format +msgid " --archive-timeout=timeout wait timeout for WAL segment archiving (default: 5min)\n" +msgstr "" + +#: src/help.c:400 src/help.c:644 src/help.c:859 src/help.c:1066 +#, c-format +msgid "" +"\n" +" Connection options:\n" +msgstr "" + +#: src/help.c:401 src/help.c:645 src/help.c:860 src/help.c:1067 +#, c-format +msgid " -U, --pguser=USERNAME user name to connect as (default: current local user)\n" +msgstr "" + +#: src/help.c:402 src/help.c:646 src/help.c:861 src/help.c:1068 +#, c-format +msgid " -d, --pgdatabase=DBNAME database to connect (default: username)\n" +msgstr "" + +#: src/help.c:403 src/help.c:647 src/help.c:862 src/help.c:1069 +#, c-format +msgid " -h, --pghost=HOSTNAME database server host or socket directory(default: 'local socket')\n" +msgstr "" + +#: src/help.c:404 src/help.c:648 src/help.c:863 src/help.c:1070 +#, c-format +msgid " -p, --pgport=PORT database server port (default: 5432)\n" +msgstr "" + +#: src/help.c:405 src/help.c:649 src/help.c:1071 +#, c-format +msgid " -w, --no-password never prompt for password\n" +msgstr "" + +#: src/help.c:406 +#, c-format +msgid " -W, --password force password prompt\n" +msgstr "" + +#: src/help.c:408 src/help.c:530 src/help.c:865 src/help.c:917 src/help.c:974 +#: src/help.c:1009 src/help.c:1074 +#, c-format +msgid "" +"\n" +" Remote options:\n" +msgstr "" + +#: src/help.c:409 src/help.c:531 src/help.c:866 src/help.c:918 src/help.c:975 +#: src/help.c:1010 src/help.c:1075 +#, c-format +msgid " --remote-proto=protocol remote protocol to use\n" +msgstr "" + +#: src/help.c:410 src/help.c:532 src/help.c:867 src/help.c:919 src/help.c:976 +#: src/help.c:1011 src/help.c:1076 +#, c-format +msgid " available options: 'ssh', 'none' (default: ssh)\n" +msgstr "" + +#: src/help.c:411 src/help.c:533 src/help.c:868 src/help.c:920 +#, c-format +msgid " --remote-host=destination remote host address or hostname\n" +msgstr "" + +#: src/help.c:412 src/help.c:534 src/help.c:869 src/help.c:921 src/help.c:978 +#: src/help.c:1013 src/help.c:1078 +#, c-format +msgid " --remote-port=port remote host port (default: 22)\n" +msgstr "" + +#: src/help.c:413 src/help.c:535 src/help.c:870 src/help.c:922 src/help.c:979 +#: src/help.c:1014 src/help.c:1079 +#, c-format +msgid " --remote-path=path path to directory with pg_probackup binary on remote host\n" +msgstr "" + +#: src/help.c:414 src/help.c:536 src/help.c:871 src/help.c:923 src/help.c:980 +#: src/help.c:1015 src/help.c:1080 +#, c-format +msgid " (default: current binary path)\n" +msgstr "" + +#: src/help.c:415 src/help.c:537 src/help.c:872 src/help.c:924 src/help.c:981 +#: src/help.c:1016 src/help.c:1081 +#, c-format +msgid " --remote-user=username user name for ssh connection (default: current user)\n" +msgstr "" + +#: src/help.c:416 src/help.c:538 src/help.c:873 src/help.c:925 src/help.c:982 +#: src/help.c:1017 src/help.c:1082 +#, c-format +msgid " --ssh-options=ssh_options additional ssh options (default: none)\n" +msgstr "" + +#: src/help.c:417 src/help.c:539 src/help.c:874 +#, c-format +msgid " (example: --ssh-options='-c cipher_spec -F configfile')\n" +msgstr "" + +#: src/help.c:419 src/help.c:881 +#, c-format +msgid "" +"\n" +" Replica options:\n" +msgstr "" + +#: src/help.c:420 src/help.c:882 +#, c-format +msgid " --master-user=user_name user name to connect to master (deprecated)\n" +msgstr "" + +#: src/help.c:421 src/help.c:883 +#, c-format +msgid " --master-db=db_name database to connect to master (deprecated)\n" +msgstr "" + +#: src/help.c:422 src/help.c:884 +#, c-format +msgid " --master-host=host_name database server host of master (deprecated)\n" +msgstr "" + +#: src/help.c:423 src/help.c:885 +#, c-format +msgid " --master-port=port database server port of master (deprecated)\n" +msgstr "" + +#: src/help.c:424 src/help.c:886 +#, c-format +msgid "" +" --replica-timeout=timeout wait timeout for WAL segment streaming through replication (deprecated)\n" +"\n" +msgstr "" + +#: src/help.c:430 +#, c-format +msgid "" +"\n" +"%s restore -B backup-path --instance=instance_name\n" +msgstr "" + +#: src/help.c:432 +#, c-format +msgid " [--progress] [--force] [--no-sync]\n" +msgstr "" + +#: src/help.c:436 +#, c-format +msgid " [--skip-external-dirs]\n" +msgstr "" + +#: src/help.c:438 +#, c-format +msgid " [--db-include dbname | --db-exclude dbname]\n" +msgstr "" + +#: src/help.c:446 +#, c-format +msgid " [-R | --restore-as-replica]\n" +msgstr "" + +#: src/help.c:452 +#, c-format +msgid " [--archive-host=hostname] [--archive-port=port]\n" +msgstr "" + +#: src/help.c:453 +#, c-format +msgid "" +" [--archive-user=username]\n" +"\n" +msgstr "" + +#: src/help.c:459 +#, c-format +msgid " -i, --backup-id=backup-id backup to restore\n" +msgstr "" + +#: src/help.c:463 +#, c-format +msgid " --force ignore invalid status of the restored backup\n" +msgstr "" + +#: src/help.c:464 +#, c-format +msgid " --no-sync do not sync restored files to disk\n" +msgstr "" + +#: src/help.c:465 +#, c-format +msgid " --no-validate disable backup validation during restore\n" +msgstr "" + +#: src/help.c:468 src/help.c:1060 +#, c-format +msgid " -T, --tablespace-mapping=OLDDIR=NEWDIR\n" +msgstr "" + +#: src/help.c:469 src/help.c:1061 +#, c-format +msgid " relocate the tablespace from directory OLDDIR to NEWDIR\n" +msgstr "" + +#: src/help.c:470 +#, c-format +msgid " --external-mapping=OLDDIR=NEWDIR\n" +msgstr "" + +#: src/help.c:471 +#, c-format +msgid " relocate the external directory from OLDDIR to NEWDIR\n" +msgstr "" + +#: src/help.c:472 +#, c-format +msgid " --skip-external-dirs do not restore all external directories\n" +msgstr "" + +#: src/help.c:474 +#, c-format +msgid "" +"\n" +" Incremental restore options:\n" +msgstr "" + +#: src/help.c:475 +#, c-format +msgid " -I, --incremental-mode=none|checksum|lsn\n" +msgstr "" + +#: src/help.c:476 +#, c-format +msgid " reuse valid pages available in PGDATA if they have not changed\n" +msgstr "" + +#: src/help.c:477 +#, c-format +msgid " (default: none)\n" +msgstr "" + +#: src/help.c:479 +#, c-format +msgid "" +"\n" +" Partial restore options:\n" +msgstr "" + +#: src/help.c:480 +#, c-format +msgid " --db-include dbname restore only specified databases\n" +msgstr "" + +#: src/help.c:481 +#, c-format +msgid " --db-exclude dbname do not restore specified databases\n" +msgstr "" + +#: src/help.c:483 +#, c-format +msgid "" +"\n" +" Recovery options:\n" +msgstr "" + +#: src/help.c:484 src/help.c:564 +#, c-format +msgid " --recovery-target-time=time time stamp up to which recovery will proceed\n" +msgstr "" + +#: src/help.c:485 src/help.c:565 +#, c-format +msgid " --recovery-target-xid=xid transaction ID up to which recovery will proceed\n" +msgstr "" + +#: src/help.c:486 src/help.c:566 +#, c-format +msgid " --recovery-target-lsn=lsn LSN of the write-ahead log location up to which recovery will proceed\n" +msgstr "" + +#: src/help.c:487 src/help.c:567 +#, c-format +msgid " --recovery-target-inclusive=boolean\n" +msgstr "" + +#: src/help.c:488 src/help.c:568 +#, c-format +msgid " whether we stop just after the recovery target\n" +msgstr "" + +#: src/help.c:489 src/help.c:569 +#, c-format +msgid " --recovery-target-timeline=timeline\n" +msgstr "" + +#: src/help.c:490 src/help.c:570 +#, c-format +msgid " recovering into a particular timeline\n" +msgstr "" + +#: src/help.c:491 +#, c-format +msgid " --recovery-target=immediate|latest\n" +msgstr "" + +#: src/help.c:492 +#, c-format +msgid " end recovery as soon as a consistent state is reached or as late as possible\n" +msgstr "" + +#: src/help.c:493 src/help.c:571 +#, c-format +msgid " --recovery-target-name=target-name\n" +msgstr "" + +#: src/help.c:494 src/help.c:572 +#, c-format +msgid " the named restore point to which recovery will proceed\n" +msgstr "" + +#: src/help.c:495 +#, c-format +msgid " --recovery-target-action=pause|promote|shutdown\n" +msgstr "" + +#: src/help.c:496 +#, c-format +msgid " action the server should take once the recovery target is reached\n" +msgstr "" + +#: src/help.c:497 +#, c-format +msgid " (default: pause)\n" +msgstr "" + +#: src/help.c:498 src/help.c:818 +#, c-format +msgid " --restore-command=cmdline command to use as 'restore_command' in recovery.conf; 'none' disables\n" +msgstr "" + +#: src/help.c:500 +#, c-format +msgid "" +"\n" +" Standby options:\n" +msgstr "" + +#: src/help.c:501 +#, c-format +msgid " -R, --restore-as-replica write a minimal recovery.conf in the output directory\n" +msgstr "" + +#: src/help.c:502 +#, c-format +msgid " to ease setting up a standby server\n" +msgstr "" + +#: src/help.c:503 +#, c-format +msgid " --primary-conninfo=primary_conninfo\n" +msgstr "" + +#: src/help.c:504 +#, c-format +msgid " connection string to be used for establishing connection\n" +msgstr "" + +#: src/help.c:505 +#, c-format +msgid " with the primary server\n" +msgstr "" + +#: src/help.c:506 +#, c-format +msgid " -S, --primary-slot-name=slotname replication slot to be used for WAL streaming from the primary server\n" +msgstr "" + +#: src/help.c:541 src/help.c:876 +#, c-format +msgid "" +"\n" +" Remote WAL archive options:\n" +msgstr "" + +#: src/help.c:542 src/help.c:877 +#, c-format +msgid " --archive-host=destination address or hostname for ssh connection to archive host\n" +msgstr "" + +#: src/help.c:543 src/help.c:878 +#, c-format +msgid " --archive-port=port port for ssh connection to archive host (default: 22)\n" +msgstr "" + +#: src/help.c:544 +#, c-format +msgid "" +" --archive-user=username user name for ssh connection to archive host (default: PostgreSQL user)\n" +"\n" +msgstr "" + +#: src/help.c:550 +#, c-format +msgid "" +"\n" +"%s validate -B backup-path [--instance=instance_name]\n" +msgstr "" + +#: src/help.c:556 +#, c-format +msgid "" +" [--skip-block-validation]\n" +"\n" +msgstr "" + +#: src/help.c:560 +#, c-format +msgid " -i, --backup-id=backup-id backup to validate\n" +msgstr "" + +#: src/help.c:595 src/help.c:722 src/help.c:768 +#, c-format +msgid "" +" --no-color disable the coloring of error and warning console messages\n" +"\n" +msgstr "" + +#: src/help.c:601 +#, c-format +msgid "" +"\n" +"%s checkdb [-B backup-path] [--instance=instance_name]\n" +msgstr "" + +#: src/help.c:602 +#, c-format +msgid " [-D pgdata-path] [-j num-threads] [--progress]\n" +msgstr "" + +#: src/help.c:604 +#, c-format +msgid "" +" [--heapallindexed] [--checkunique]\n" +"\n" +msgstr "" + +#: src/help.c:612 +#, c-format +msgid " --skip-block-validation skip file-level checking\n" +msgstr "" + +#: src/help.c:613 src/help.c:618 src/help.c:620 +#, c-format +msgid " can be used only with '--amcheck' option\n" +msgstr "" + +#: src/help.c:614 +#, c-format +msgid " --amcheck in addition to file-level block checking\n" +msgstr "" + +#: src/help.c:615 +#, c-format +msgid " check btree indexes via function 'bt_index_check()'\n" +msgstr "" + +#: src/help.c:616 +#, c-format +msgid " using 'amcheck' or 'amcheck_next' extensions\n" +msgstr "" + +#: src/help.c:617 +#, c-format +msgid " --heapallindexed also check that heap is indexed\n" +msgstr "" + +#: src/help.c:619 +#, c-format +msgid " --checkunique also check unique constraints\n" +msgstr "" + +#: src/help.c:631 +#, c-format +msgid " support strftime format (example: pg_probackup-%%Y-%%m-%%d_%%H%%M%%S.log\n" +msgstr "" + +#: src/help.c:650 src/help.c:1072 +#, c-format +msgid "" +" -W, --password force password prompt\n" +"\n" +msgstr "" + +#: src/help.c:656 +#, c-format +msgid "" +"\n" +"%s show -B backup-path\n" +msgstr "" + +#: src/help.c:658 +#, c-format +msgid "" +" [--format=format] [--archive]\n" +"\n" +msgstr "" + +#: src/help.c:661 +#, c-format +msgid " --instance=instance_name show info about specific instance\n" +msgstr "" + +#: src/help.c:662 +#, c-format +msgid " -i, --backup-id=backup-id show info about specific backups\n" +msgstr "" + +#: src/help.c:663 +#, c-format +msgid " --archive show WAL archive information\n" +msgstr "" + +#: src/help.c:664 +#, c-format +msgid " --format=format show format=PLAIN|JSON\n" +msgstr "" + +#: src/help.c:665 +#, c-format +msgid "" +" --no-color disable the coloring for plain format\n" +"\n" +msgstr "" + +#: src/help.c:671 +#, c-format +msgid "" +"\n" +"%s delete -B backup-path --instance=instance_name\n" +msgstr "" + +#: src/help.c:672 +#, c-format +msgid " [-i backup-id | --delete-expired | --merge-expired] [--delete-wal]\n" +msgstr "" + +#: src/help.c:677 +#, c-format +msgid "" +" [--no-validate] [--no-sync]\n" +"\n" +msgstr "" + +#: src/help.c:681 +#, c-format +msgid " -i, --backup-id=backup-id backup to delete\n" +msgstr "" + +#: src/help.c:684 src/help.c:745 +#, c-format +msgid " --no-validate disable validation during retention merge\n" +msgstr "" + +#: src/help.c:685 src/help.c:746 +#, c-format +msgid " --no-sync do not sync merged files to disk\n" +msgstr "" + +#: src/help.c:689 src/help.c:691 +#, c-format +msgid " retention policy\n" +msgstr "" + +#: src/help.c:700 +#, c-format +msgid " --status=backup_status delete all backups with specified status\n" +msgstr "" + +#: src/help.c:728 +#, c-format +msgid "" +"\n" +"%s merge -B backup-path --instance=instance_name\n" +msgstr "" + +#: src/help.c:729 +#, c-format +msgid " -i backup-id [-j num-threads] [--progress]\n" +msgstr "" + +#: src/help.c:737 +#, c-format +msgid "" +" [--log-rotation-age=log-rotation-age]\n" +"\n" +msgstr "" + +#: src/help.c:741 +#, c-format +msgid " -i, --backup-id=backup-id backup to merge\n" +msgstr "" + +#: src/help.c:774 +#, c-format +msgid "" +"\n" +"%s set-backup -B backup-path --instance=instance_name\n" +msgstr "" + +#: src/help.c:775 +#, c-format +msgid " -i backup-id\n" +msgstr "" + +#: src/help.c:776 +#, c-format +msgid "" +" [--ttl=interval] [--expire-time=time] [--note=text]\n" +"\n" +msgstr "" + +#: src/help.c:783 +#, c-format +msgid " --note=text add note to backup; 'none' to remove note\n" +msgstr "" + +#: src/help.c:790 +#, c-format +msgid "" +"\n" +"%s set-config -B backup-path --instance=instance_name\n" +msgstr "" + +#: src/help.c:810 src/help.c:908 src/help.c:952 src/help.c:996 +#, c-format +msgid "" +" [--ssh-options]\n" +"\n" +msgstr "" + +#: src/help.c:846 +#, c-format +msgid " --wal-depth=wal-depth number of latest valid backups with ability to perform\n" +msgstr "" + +#: src/help.c:847 +#, c-format +msgid " the point in time recovery; disables; (default: 0)\n" +msgstr "" + +#: src/help.c:852 src/help.c:970 +#, c-format +msgid " available options: 'zlib','pglz','none' (default: 'none')\n" +msgstr "" + +#: src/help.c:879 +#, c-format +msgid " --archive-user=username user name for ssh connection to archive host (default: PostgreSQL user)\n" +msgstr "" + +#: src/help.c:892 +#, c-format +msgid "" +"\n" +"%s show-config -B backup-path --instance=instance_name\n" +msgstr "" + +#: src/help.c:893 +#, c-format +msgid "" +" [--format=format]\n" +"\n" +msgstr "" + +#: src/help.c:897 +#, c-format +msgid "" +" --format=format show format=PLAIN|JSON\n" +"\n" +msgstr "" + +#: src/help.c:903 +#, c-format +msgid "" +"\n" +"%s add-instance -B backup-path -D pgdata-path\n" +msgstr "" + +#: src/help.c:905 +#, c-format +msgid " [-E external-directory-path]\n" +msgstr "" + +#: src/help.c:912 +#, c-format +msgid " --instance=instance_name name of the new instance\n" +msgstr "" + +#: src/help.c:926 src/help.c:983 src/help.c:1018 src/help.c:1083 +#, c-format +msgid "" +" (example: --ssh-options='-c cipher_spec -F configfile')\n" +"\n" +msgstr "" + +#: src/help.c:932 +#, c-format +msgid "" +"\n" +"%s del-instance -B backup-path --instance=instance_name\n" +msgstr "" + +#: src/help.c:935 +#, c-format +msgid "" +" --instance=instance_name name of the instance to delete\n" +"\n" +msgstr "" + +#: src/help.c:941 +#, c-format +msgid "" +"\n" +"%s archive-push -B backup-path --instance=instance_name\n" +msgstr "" + +#: src/help.c:955 src/help.c:999 +#, c-format +msgid " --instance=instance_name name of the instance to delete\n" +msgstr "" + +#: src/help.c:956 src/help.c:1002 +#, c-format +msgid " --wal-file-name=wal-file-name\n" +msgstr "" + +#: src/help.c:957 +#, c-format +msgid " name of the file to copy into WAL archive\n" +msgstr "" + +#: src/help.c:958 src/help.c:1000 +#, c-format +msgid " --wal-file-path=wal-file-path\n" +msgstr "" + +#: src/help.c:959 +#, c-format +msgid " relative destination path of the WAL archive\n" +msgstr "" + +#: src/help.c:961 +#, c-format +msgid " --batch-size=NUM number of files to be copied\n" +msgstr "" + +#: src/help.c:962 +#, c-format +msgid " --archive-timeout=timeout wait timeout before discarding stale temp file(default: 5min)\n" +msgstr "" + +#: src/help.c:963 +#, c-format +msgid " --no-ready-rename do not rename '.ready' files in 'archive_status' directory\n" +msgstr "" + +#: src/help.c:964 +#, c-format +msgid " --no-sync do not sync WAL file to disk\n" +msgstr "" + +#: src/help.c:965 +#, c-format +msgid " --overwrite overwrite archived WAL file\n" +msgstr "" + +#: src/help.c:977 src/help.c:1012 src/help.c:1077 +#, c-format +msgid " --remote-host=hostname remote host address or hostname\n" +msgstr "" + +#: src/help.c:989 +#, c-format +msgid "" +"\n" +"%s archive-get -B backup-path --instance=instance_name\n" +msgstr "" + +#: src/help.c:1001 +#, c-format +msgid " relative destination path name of the WAL file on the server\n" +msgstr "" + +#: src/help.c:1003 +#, c-format +msgid " name of the WAL file to retrieve from the archive\n" +msgstr "" + +#: src/help.c:1005 +#, c-format +msgid " --batch-size=NUM number of files to be prefetched\n" +msgstr "" + +#: src/help.c:1006 +#, c-format +msgid " --prefetch-dir=path location of the store area for prefetched WAL files\n" +msgstr "" + +#: src/help.c:1007 +#, c-format +msgid " --no-validate-wal skip validation of prefetched WAL file before using it\n" +msgstr "" + +#: src/help.c:1024 +#, c-format +msgid "" +"\n" +"%s help [command]\n" +msgstr "" + +#: src/help.c:1025 +#, c-format +msgid "" +"%s command --help\n" +"\n" +msgstr "" + +#: src/help.c:1031 +#, c-format +msgid "" +"\n" +"%s version\n" +msgstr "" + +#: src/help.c:1032 +#, c-format +msgid "" +"%s --version\n" +"\n" +msgstr "" + +#: src/help.c:1038 +#, c-format +msgid "" +"\n" +"%s catchup -b catchup-mode\n" +msgstr "" + +#: src/help.c:1041 +#, c-format +msgid " [--stream [-S slot-name]] [--temp-slot | --perm-slot]\n" +msgstr "" + +#: src/help.c:1050 +#, c-format +msgid "" +" [--help]\n" +"\n" +msgstr "" + +#: src/help.c:1052 +#, c-format +msgid " -b, --backup-mode=catchup-mode catchup mode=FULL|DELTA|PTRACK\n" +msgstr "" + +#: src/help.c:1053 +#, c-format +msgid " --stream stream the transaction log (only supported mode)\n" +msgstr "" + +#: src/help.c:1056 +#, c-format +msgid " -P --perm-slot create permanent replication slot\n" +msgstr "" + +#: src/help.c:1062 +#, c-format +msgid " -x, --exclude-path=path_prefix files with path_prefix (relative to pgdata) will be\n" +msgstr "" + +#: src/help.c:1063 +#, c-format +msgid " excluded from catchup (can be used multiple times)\n" +msgstr "" + +#: src/help.c:1064 +#, c-format +msgid " Dangerous option! Use at your own risk!\n" +msgstr "" diff --git a/src/help.c b/src/help.c index a494ab209..8ebe734a3 100644 --- a/src/help.c +++ b/src/help.c @@ -267,9 +267,9 @@ help_pg_probackup(void) { printf("\n"); if (PROGRAM_URL) - printf("Read the website for details. <%s>\n", PROGRAM_URL); + printf(_("Read the website for details <%s>.\n"), PROGRAM_URL); if (PROGRAM_EMAIL) - printf("Report bugs to <%s>.\n", PROGRAM_EMAIL); + printf(_("Report bugs to <%s>.\n"), PROGRAM_EMAIL); } } diff --git a/src/pg_probackup.c b/src/pg_probackup.c index c5ed13175..b9b3af0b9 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -308,6 +308,7 @@ main(int argc, char *argv[]) init_config(&instance_config, instance_name); PROGRAM_NAME = get_progname(argv[0]); + set_pglocale_pgservice(argv[0], PG_TEXTDOMAIN("pg_probackup")); PROGRAM_FULL_PATH = palloc0(MAXPGPATH); /* Get current time */ diff --git a/tests/Readme.md b/tests/Readme.md index 668552c94..ed1b22e03 100644 --- a/tests/Readme.md +++ b/tests/Readme.md @@ -31,7 +31,7 @@ Remote backup depends on key authentication to local machine via ssh as current export PGPROBACKUP_SSH_REMOTE=ON Run tests that are relied on advanced debugging features. For this mode, pg_probackup should be compiled without optimizations. For example: -CFLAGS="-O0" ./configure --prefix=/path/to/prefix --enable-debug --enable-cassert --enable-depend --enable-tap-tests +CFLAGS="-O0" ./configure --prefix=/path/to/prefix --enable-debug --enable-cassert --enable-depend --enable-tap-tests --enable-nls export PGPROBACKUP_GDB=ON diff --git a/tests/expected/option_help.out b/tests/expected/option_help.out index a8b4a64b3..00b50d10c 100644 --- a/tests/expected/option_help.out +++ b/tests/expected/option_help.out @@ -180,5 +180,5 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database. [--ssh-options] [--help] -Read the website for details. +Read the website for details . Report bugs to . diff --git a/tests/expected/option_help_ru.out b/tests/expected/option_help_ru.out new file mode 100644 index 000000000..ee8da9a1c --- /dev/null +++ b/tests/expected/option_help_ru.out @@ -0,0 +1,184 @@ + +pg_probackup - утилита для управления резервным копированием/восстановлением базы данных PostgreSQL. + + pg_probackup help [COMMAND] + + pg_probackup version + + pg_probackup init -B backup-path + + pg_probackup set-config -B backup-path --instance=instance_name + [-D pgdata-path] + [--external-dirs=external-directories-paths] + [--log-level-console=log-level-console] + [--log-level-file=log-level-file] + [--log-filename=log-filename] + [--error-log-filename=error-log-filename] + [--log-directory=log-directory] + [--log-rotation-size=log-rotation-size] + [--log-rotation-age=log-rotation-age] + [--retention-redundancy=retention-redundancy] + [--retention-window=retention-window] + [--wal-depth=wal-depth] + [--compress-algorithm=compress-algorithm] + [--compress-level=compress-level] + [--archive-timeout=timeout] + [-d dbname] [-h host] [-p port] [-U username] + [--remote-proto] [--remote-host] + [--remote-port] [--remote-path] [--remote-user] + [--ssh-options] + [--restore-command=cmdline] [--archive-host=destination] + [--archive-port=port] [--archive-user=username] + [--help] + + pg_probackup set-backup -B backup-path --instance=instance_name + -i backup-id [--ttl=interval] [--expire-time=timestamp] + [--note=text] + [--help] + + pg_probackup show-config -B backup-path --instance=instance_name + [--format=format] + [--help] + + pg_probackup backup -B backup-path -b backup-mode --instance=instance_name + [-D pgdata-path] [-C] + [--stream [-S slot-name] [--temp-slot]] + [--backup-pg-log] [-j num-threads] [--progress] + [--no-validate] [--skip-block-validation] + [--external-dirs=external-directories-paths] + [--no-sync] + [--log-level-console=log-level-console] + [--log-level-file=log-level-file] + [--log-filename=log-filename] + [--error-log-filename=error-log-filename] + [--log-directory=log-directory] + [--log-rotation-size=log-rotation-size] + [--log-rotation-age=log-rotation-age] [--no-color] + [--delete-expired] [--delete-wal] [--merge-expired] + [--retention-redundancy=retention-redundancy] + [--retention-window=retention-window] + [--wal-depth=wal-depth] + [--compress] + [--compress-algorithm=compress-algorithm] + [--compress-level=compress-level] + [--archive-timeout=archive-timeout] + [-d dbname] [-h host] [-p port] [-U username] + [-w --no-password] [-W --password] + [--remote-proto] [--remote-host] + [--remote-port] [--remote-path] [--remote-user] + [--ssh-options] + [--ttl=interval] [--expire-time=timestamp] [--note=text] + [--help] + + pg_probackup restore -B backup-path --instance=instance_name + [-D pgdata-path] [-i backup-id] [-j num-threads] + [--recovery-target-time=time|--recovery-target-xid=xid + |--recovery-target-lsn=lsn [--recovery-target-inclusive=boolean]] + [--recovery-target-timeline=timeline] + [--recovery-target=immediate|latest] + [--recovery-target-name=target-name] + [--recovery-target-action=pause|promote|shutdown] + [--restore-command=cmdline] + [-R | --restore-as-replica] [--force] + [--primary-conninfo=primary_conninfo] + [-S | --primary-slot-name=slotname] + [--no-validate] [--skip-block-validation] + [-T OLDDIR=NEWDIR] [--progress] + [--external-mapping=OLDDIR=NEWDIR] + [--skip-external-dirs] [--no-sync] + [-I | --incremental-mode=none|checksum|lsn] + [--db-include | --db-exclude] + [--remote-proto] [--remote-host] + [--remote-port] [--remote-path] [--remote-user] + [--ssh-options] + [--archive-host=hostname] + [--archive-port=port] [--archive-user=username] + [--help] + + pg_probackup validate -B backup-path [--instance=instance_name] + [-i backup-id] [--progress] [-j num-threads] + [--recovery-target-time=time|--recovery-target-xid=xid + |--recovery-target-lsn=lsn [--recovery-target-inclusive=boolean]] + [--recovery-target-timeline=timeline] + [--recovery-target-name=target-name] + [--skip-block-validation] + [--help] + + pg_probackup checkdb [-B backup-path] [--instance=instance_name] + [-D pgdata-path] [--progress] [-j num-threads] + [--amcheck] [--skip-block-validation] + [--heapallindexed] [--checkunique] + [--help] + + pg_probackup show -B backup-path + [--instance=instance_name [-i backup-id]] + [--format=format] [--archive] + [--no-color] [--help] + + pg_probackup delete -B backup-path --instance=instance_name + [-j num-threads] [--progress] + [--retention-redundancy=retention-redundancy] + [--retention-window=retention-window] + [--wal-depth=wal-depth] + [-i backup-id | --delete-expired | --merge-expired | --status=backup_status] + [--delete-wal] + [--dry-run] [--no-validate] [--no-sync] + [--help] + + pg_probackup merge -B backup-path --instance=instance_name + -i backup-id [--progress] [-j num-threads] + [--no-validate] [--no-sync] + [--help] + + pg_probackup add-instance -B backup-path -D pgdata-path + --instance=instance_name + [--external-dirs=external-directories-paths] + [--remote-proto] [--remote-host] + [--remote-port] [--remote-path] [--remote-user] + [--ssh-options] + [--help] + + pg_probackup del-instance -B backup-path + --instance=instance_name + [--help] + + pg_probackup archive-push -B backup-path --instance=instance_name + --wal-file-name=wal-file-name + [--wal-file-path=wal-file-path] + [-j num-threads] [--batch-size=batch_size] + [--archive-timeout=timeout] + [--no-ready-rename] [--no-sync] + [--overwrite] [--compress] + [--compress-algorithm=compress-algorithm] + [--compress-level=compress-level] + [--remote-proto] [--remote-host] + [--remote-port] [--remote-path] [--remote-user] + [--ssh-options] + [--help] + + pg_probackup archive-get -B backup-path --instance=instance_name + --wal-file-path=wal-file-path + --wal-file-name=wal-file-name + [-j num-threads] [--batch-size=batch_size] + [--no-validate-wal] + [--remote-proto] [--remote-host] + [--remote-port] [--remote-path] [--remote-user] + [--ssh-options] + [--help] + + pg_probackup catchup -b catchup-mode + --source-pgdata=path_to_pgdata_on_remote_server + --destination-pgdata=path_to_local_dir + [--stream [-S slot-name] [--temp-slot | --perm-slot]] + [-j num-threads] + [-T OLDDIR=NEWDIR] + [--exclude-path=path_prefix] + [-d dbname] [-h host] [-p port] [-U username] + [-w --no-password] [-W --password] + [--remote-proto] [--remote-host] + [--remote-port] [--remote-path] [--remote-user] + [--ssh-options] + [--help] + +Подробнее читайте на сайте . +Сообщайте об ошибках в . diff --git a/tests/option.py b/tests/option.py index 023a0c2c6..b57d7ef43 100644 --- a/tests/option.py +++ b/tests/option.py @@ -1,6 +1,7 @@ import unittest import os from .helpers.ptrack_helpers import ProbackupTest, ProbackupException +import locale module_name = 'option' @@ -226,3 +227,13 @@ def test_options_5(self): # Clean after yourself self.del_test_dir(module_name, fname) + + # @unittest.skip("skip") + def test_help_6(self): + """help options""" + self.test_env['LC_ALL'] = 'ru_RU.utf-8' + with open(os.path.join(self.dir_path, "expected/option_help_ru.out"), "rb") as help_out: + self.assertEqual( + self.run_pb(["--help"]), + help_out.read().decode("utf-8") + ) diff --git a/travis/run_tests.sh b/travis/run_tests.sh index 52b05105b..a62ad4de7 100755 --- a/travis/run_tests.sh +++ b/travis/run_tests.sh @@ -47,7 +47,7 @@ cd postgres # Go to postgres dir if [ "$PG_PROBACKUP_PTRACK" = "ON" ]; then git apply -3 ../ptrack/patches/${PTRACK_PATCH_PG_BRANCH}-ptrack-core.diff fi -CFLAGS="-O0" ./configure --prefix=$PGHOME --enable-debug --enable-cassert --enable-depend --enable-tap-tests +CFLAGS="-O0" ./configure --prefix=$PGHOME --enable-debug --enable-cassert --enable-depend --enable-tap-tests --enable-nls make -s -j$(nproc) install #make -s -j$(nproc) -C 'src/common' install #make -s -j$(nproc) -C 'src/port' install From 68b77a06bca055c24ef00dee1896409a0beb923b Mon Sep 17 00:00:00 2001 From: Daniel Shelepanov Date: Wed, 25 May 2022 14:45:25 +0300 Subject: [PATCH 258/525] [PBCKP-150] Reading buffer is flushed each time we verify the checksum. (#487) The race condition is covered with a unit-test, the buffer is flushed now so each of 300 reads requests the data from the disc. --- .travis.yml | 1 + src/data.c | 2 ++ tests/Readme.md | 2 ++ tests/__init__.py | 8 ++++- tests/time_consuming.py | 76 +++++++++++++++++++++++++++++++++++++++++ 5 files changed, 88 insertions(+), 1 deletion(-) create mode 100644 tests/time_consuming.py diff --git a/.travis.yml b/.travis.yml index 8e325c64f..26b2bc4e2 100644 --- a/.travis.yml +++ b/.travis.yml @@ -47,6 +47,7 @@ env: # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=replica # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=retention # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=restore +# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=time_consuming jobs: allow_failures: diff --git a/src/data.c b/src/data.c index 052e17486..e5a551127 100644 --- a/src/data.c +++ b/src/data.c @@ -349,6 +349,8 @@ prepare_page(pgFile *file, XLogRecPtr prev_backup_start_lsn, Assert(false); } } + /* avoid re-reading once buffered data, flushing on further attempts, see PBCKP-150 */ + fflush(in); } /* diff --git a/tests/Readme.md b/tests/Readme.md index ed1b22e03..500ed7c7a 100644 --- a/tests/Readme.md +++ b/tests/Readme.md @@ -41,6 +41,8 @@ Run suit of basic simple tests: Run ptrack tests: export PG_PROBACKUP_PTRACK=ON +Run long (time consuming) tests: + export PG_PROBACKUP_LONG=ON Usage: sudo echo 0 > /proc/sys/kernel/yama/ptrace_scope diff --git a/tests/__init__.py b/tests/__init__.py index 55d6ea9be..79537ad78 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -7,7 +7,7 @@ compression, page, ptrack, archive, exclude, cfs_backup, cfs_restore, \ cfs_validate_backup, auth_test, time_stamp, logging, \ locking, remote, external, config, checkdb, set_backup, incr_restore, \ - catchup, CVE_2018_1058 + catchup, CVE_2018_1058, time_consuming def load_tests(loader, tests, pattern): @@ -21,6 +21,12 @@ def load_tests(loader, tests, pattern): if os.environ['PG_PROBACKUP_PTRACK'] == 'ON': suite.addTests(loader.loadTestsFromModule(ptrack)) + # PG_PROBACKUP_LONG section for tests that are long + # by design e.g. they contain loops, sleeps and so on + if 'PG_PROBACKUP_LONG' in os.environ: + if os.environ['PG_PROBACKUP_LONG'] == 'ON': + suite.addTests(loader.loadTestsFromModule(time_consuming)) + # suite.addTests(loader.loadTestsFromModule(auth_test)) suite.addTests(loader.loadTestsFromModule(archive)) suite.addTests(loader.loadTestsFromModule(backup)) diff --git a/tests/time_consuming.py b/tests/time_consuming.py new file mode 100644 index 000000000..396ab716e --- /dev/null +++ b/tests/time_consuming.py @@ -0,0 +1,76 @@ +import os +import unittest +from .helpers.ptrack_helpers import ProbackupTest +import subprocess +from time import sleep + +module_name = 'time_consuming' + +class TimeConsumingTests(ProbackupTest, unittest.TestCase): + def test_pbckp150(self): + """ + https://jira.postgrespro.ru/browse/PBCKP-150 + create a node filled with pgbench + create FULL backup followed by PTRACK backup + run pgbench, vacuum VERBOSE FULL and ptrack backups in parallel + """ + # init node + fname = self.id().split('.')[3] + node = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + node.append_conf('postgresql.conf', + """ + max_connections = 100 + wal_keep_size = 16000 + ptrack.map_size = 1 + shared_preload_libraries='ptrack' + log_statement = 'none' + fsync = off + log_checkpoints = on + autovacuum = off + """) + + # init probackup and add an instance + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + + # run the node and init ptrack + node.slow_start() + node.safe_psql("postgres", "CREATE EXTENSION ptrack") + # populate it with pgbench + node.pgbench_init(scale=5) + + # FULL backup followed by PTRACK backup + self.backup_node(backup_dir, 'node', node, options=['--stream']) + self.backup_node(backup_dir, 'node', node, backup_type='ptrack', options=['--stream']) + + # run ordinary pgbench scenario to imitate some activity and another pgbench for vacuuming in parallel + nBenchDuration = 30 + pgbench = node.pgbench(options=['-c', '20', '-j', '8', '-T', str(nBenchDuration)]) + with open('/tmp/pbckp150vacuum.sql', 'w') as f: + f.write('VACUUM (FULL) pgbench_accounts, pgbench_tellers, pgbench_history; SELECT pg_sleep(1);\n') + pgbenchval = node.pgbench(options=['-c', '1', '-f', '/tmp/pbckp150vacuum.sql', '-T', str(nBenchDuration)]) + + # several PTRACK backups + for i in range(nBenchDuration): + print("[{}] backing up PTRACK diff...".format(i+1)) + self.backup_node(backup_dir, 'node', node, backup_type='ptrack', options=['--stream', '--log-level-console', 'VERBOSE']) + sleep(0.1) + # if the activity pgbench has finished, stop backing up + if pgbench.poll() is not None: + break + + pgbench.kill() + pgbenchval.kill() + pgbench.wait() + pgbenchval.wait() + + backups = self.show_pb(backup_dir, 'node') + for b in backups: + self.assertEqual("OK", b['status']) + + # Clean after yourself + self.del_test_dir(module_name, fname) From 0b5b37e8930e75793b23e0829d2f57cc5a13a34d Mon Sep 17 00:00:00 2001 From: asavchkov <79832668+asavchkov@users.noreply.github.com> Date: Thu, 26 May 2022 19:53:01 +0700 Subject: [PATCH 259/525] Add a workflow to build and test probackup on Windows (#484) * Add a workflow to build and test probackup on Windows * [PBCKP-149] fix test_basic_validate_nullified_heap_page_backup for windows Co-authored-by: Alexey Savchkov Co-authored-by: Mikhail A. Kulagin --- .github/workflows/build.yml | 94 +++++++++++++++++++++++++++++++++ gen_probackup_project.pl | 12 ++--- tests/helpers/ptrack_helpers.py | 6 +-- tests/validate.py | 5 +- 4 files changed, 104 insertions(+), 13 deletions(-) create mode 100644 .github/workflows/build.yml diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml new file mode 100644 index 000000000..ab1a5888d --- /dev/null +++ b/.github/workflows/build.yml @@ -0,0 +1,94 @@ +name: Build Probackup + +on: + push: + branches: + - "**" + # Runs triggered by pull requests are disabled to prevent executing potentially unsafe code from public pull requests + # pull_request: + # branches: + # - main + + # Allows you to run this workflow manually from the Actions tab + workflow_dispatch: + +jobs: + + build-win2019: + + runs-on: + - windows-2019 + + env: + zlib_dir: C:\dep\zlib + + steps: + + - uses: actions/checkout@v2 + + - name: Install pacman packages + run: | + $env:PATH += ";C:\msys64\usr\bin" + pacman -S --noconfirm --needed bison flex + + - name: Make zlib + run: | + git clone -b v1.2.11 --depth 1 https://github.com/madler/zlib.git + cd zlib + cmake -DCMAKE_INSTALL_PREFIX:PATH=C:\dep\zlib -G "Visual Studio 16 2019" . + cmake --build . --config Release --target ALL_BUILD + cmake --build . --config Release --target INSTALL + copy C:\dep\zlib\lib\zlibstatic.lib C:\dep\zlib\lib\zdll.lib + copy C:\dep\zlib\bin\zlib.dll C:\dep\zlib\lib + + - name: Get Postgres sources + run: git clone -b REL_14_STABLE https://github.com/postgres/postgres.git + + # Copy ptrack to contrib to build the ptrack extension + # Convert line breaks in the patch file to LF otherwise the patch doesn't apply + - name: Get Ptrack sources + run: | + git clone -b master --depth 1 https://github.com/postgrespro/ptrack.git + Copy-Item -Path ptrack -Destination postgres\contrib -Recurse + (Get-Content ptrack\patches\REL_14_STABLE-ptrack-core.diff -Raw).Replace("`r`n","`n") | Set-Content ptrack\patches\REL_14_STABLE-ptrack-core.diff -Force -NoNewline + cd postgres + git apply -3 ../ptrack/patches/REL_14_STABLE-ptrack-core.diff + + - name: Build Postgres + run: | + $env:PATH += ";C:\msys64\usr\bin" + cd postgres\src\tools\msvc + (Get-Content config_default.pl) -Replace "zlib *=>(.*?)(?=,? *#)", "zlib => '${{ env.zlib_dir }}'" | Set-Content config.pl + cmd.exe /s /c "`"C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvarsall.bat`" amd64 && .\build.bat" + + - name: Build Probackup + run: cmd.exe /s /c "`"C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvarsall.bat`" amd64 && perl .\gen_probackup_project.pl `"${{ github.workspace }}`"\postgres" + + - name: Install Postgres + run: | + cd postgres + src\tools\msvc\install.bat postgres_install + + - name: Install Testgres + run: | + git clone -b no-port-for --single-branch --depth 1 https://github.com/postgrespro/testgres.git + cd testgres + python setup.py install + + # Grant the Github runner user full control of the workspace for initdb to successfully process the data folder + - name: Test Probackup + run: | + icacls.exe "${{ github.workspace }}" /grant "${env:USERNAME}:(OI)(CI)F" + $env:PATH += ";${{ github.workspace }}\postgres\postgres_install\lib;${{ env.zlib_dir }}\lib" + $Env:LC_MESSAGES = "English" + $Env:PG_CONFIG = "${{ github.workspace }}\postgres\postgres_install\bin\pg_config.exe" + $Env:PGPROBACKUPBIN = "${{ github.workspace }}\postgres\Release\pg_probackup\pg_probackup.exe" + $Env:PG_PROBACKUP_PTRACK = "ON" + If (!$Env:MODE -Or $Env:MODE -Eq "basic") { + $Env:PG_PROBACKUP_TEST_BASIC = "ON" + python -m unittest -v tests + python -m unittest -v tests.init + } else { + python -m unittest -v tests.$Env:MODE + } + diff --git a/gen_probackup_project.pl b/gen_probackup_project.pl index c24db1228..8143b7d0d 100644 --- a/gen_probackup_project.pl +++ b/gen_probackup_project.pl @@ -13,11 +13,11 @@ BEGIN { $pgsrc = shift @ARGV; if($pgsrc eq "--help"){ - print STDERR "Usage $0 pg-source-dir \n"; - print STDERR "Like this: \n"; - print STDERR "$0 C:/PgProject/postgresql.10dev/postgrespro \n"; - print STDERR "May be need input this before: \n"; - print STDERR "CALL \"C:\\Program Files (x86)\\Microsoft Visual Studio 12.0\\VC\\vcvarsall\" amd64\n"; + print STDERR "Usage $0 pg-source-dir\n"; + print STDERR "Like this:\n"; + print STDERR "$0 C:/PgProject/postgresql.10dev/postgrespro\n"; + print STDERR "May need to run this first:\n"; + print STDERR "CALL \"C:\\Program Files (x86)\\Microsoft Visual Studio\\2019\\Community\\VC\\Auxiliary\\Build\\vcvarsall.bat\" amd64\n"; exit 1; } } @@ -133,7 +133,7 @@ sub build_pgprobackup unless (-d 'src/tools/msvc' && -d 'src'); # my $vsVersion = DetermineVisualStudioVersion(); - my $vsVersion = '12.00'; + my $vsVersion = '16.00'; $solution = CreateSolution($vsVersion, $config); diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index 3b14b7170..ffb87c5ec 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -89,11 +89,7 @@ def dir_files(base_dir): def is_enterprise(): # pg_config --help - if os.name == 'posix': - cmd = [os.environ['PG_CONFIG'], '--help'] - - elif os.name == 'nt': - cmd = [[os.environ['PG_CONFIG']], ['--help']] + cmd = [os.environ['PG_CONFIG'], '--help'] p = subprocess.Popen( cmd, diff --git a/tests/validate.py b/tests/validate.py index 0b04d92fe..e62826388 100644 --- a/tests/validate.py +++ b/tests/validate.py @@ -2,6 +2,7 @@ import unittest from .helpers.ptrack_helpers import ProbackupTest, ProbackupException from datetime import datetime, timedelta +from pathlib import Path import subprocess from sys import exit import time @@ -58,7 +59,7 @@ def test_basic_validate_nullified_heap_page_backup(self): with open(log_file_path) as f: log_content = f.read() self.assertIn( - 'File: "{0}" blknum 1, empty page'.format(file), + 'File: "{0}" blknum 1, empty page'.format(Path(file).as_posix()), log_content, 'Failed to detect nullified block') @@ -4247,4 +4248,4 @@ def test_no_validate_tablespace_map(self): # 715 MAXALIGN(header.compressed_size), in); # 716 if (read_len != MAXALIGN(header.compressed_size)) # -> 717 elog(ERROR, "cannot read block %u of \"%s\" read %lu of %d", -# 718 blknum, file->path, read_len, header.compressed_size); \ No newline at end of file +# 718 blknum, file->path, read_len, header.compressed_size); From 7be2e738a923bd65026cd7c95150d5a67d0ec228 Mon Sep 17 00:00:00 2001 From: avaness Date: Fri, 27 May 2022 18:56:38 +0300 Subject: [PATCH 260/525] PBCKP-145: added check of unlogged table is restored as empty table (#490) --- tests/exclude.py | 30 ++++++++++++++++++++++++++---- 1 file changed, 26 insertions(+), 4 deletions(-) diff --git a/tests/exclude.py b/tests/exclude.py index b98a483d0..2c4925881 100644 --- a/tests/exclude.py +++ b/tests/exclude.py @@ -203,8 +203,10 @@ def test_exclude_unlogged_tables_1(self): # @unittest.skip("skip") def test_exclude_unlogged_tables_2(self): """ - make node, create unlogged, take FULL, check - that unlogged was not backed up + 1. make node, create unlogged, take FULL, DELTA, PAGE, + check that unlogged table files was not backed up + 2. restore FULL, DELTA, PAGE to empty db, + ensure unlogged table exist and is epmty """ fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') @@ -220,6 +222,8 @@ def test_exclude_unlogged_tables_2(self): self.set_archiving(backup_dir, 'node', node) node.slow_start() + backup_ids = [] + for backup_type in ['full', 'delta', 'page']: if backup_type == 'full': @@ -231,14 +235,16 @@ def test_exclude_unlogged_tables_2(self): 'postgres', 'insert into test select generate_series(0,20050000)::text') - rel_path = node.safe_psql( + rel_path = node.execute( 'postgres', - "select pg_relation_filepath('test')").decode('utf-8').rstrip() + "select pg_relation_filepath('test')")[0][0] backup_id = self.backup_node( backup_dir, 'node', node, backup_type=backup_type, options=['--stream']) + backup_ids.append(backup_id) + filelist = self.get_backup_filelist( backup_dir, 'node', backup_id) @@ -258,9 +264,25 @@ def test_exclude_unlogged_tables_2(self): rel_path + '.3', filelist, "Unlogged table was not excluded") + # ensure restoring retrieves back only empty unlogged table + for backup_id in backup_ids: + node.stop() + node.cleanup() + + self.restore_node(backup_dir, 'node', node, backup_id=backup_id) + + node.slow_start() + + self.assertEqual( + node.execute( + 'postgres', + 'select count(*) from test')[0][0], + 0) + # Clean after yourself self.del_test_dir(module_name, fname) + # @unittest.skip("skip") def test_exclude_log_dir(self): """ From 02aef65853aa04fc6611b82904d8dfbfe59fdecd Mon Sep 17 00:00:00 2001 From: "d.lepikhova" Date: Tue, 31 May 2022 11:21:59 +0500 Subject: [PATCH 261/525] Fix is_enterprise checking in ptrack_helpers.py --- tests/helpers/ptrack_helpers.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index 3b14b7170..a4ec7c9cf 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -90,17 +90,17 @@ def dir_files(base_dir): def is_enterprise(): # pg_config --help if os.name == 'posix': - cmd = [os.environ['PG_CONFIG'], '--help'] + cmd = [os.environ['PG_CONFIG'], '--pgpro-edition'] elif os.name == 'nt': - cmd = [[os.environ['PG_CONFIG']], ['--help']] + cmd = [[os.environ['PG_CONFIG']], ['--pgpro-edition']] p = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) - if b'postgrespro.ru' in p.communicate()[0]: + if b'enterprise' in p.communicate()[0]: return True else: return False From 55a74902fde5c811c02b0ec2e64d8be89762e76a Mon Sep 17 00:00:00 2001 From: "d.lepikhova" Date: Tue, 31 May 2022 12:49:20 +0500 Subject: [PATCH 262/525] Fix test_checkdb_with_least_privileges. Add GRANT EXECUTE on function pgpro_edition for amcheck indexes --- tests/checkdb.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/checkdb.py b/tests/checkdb.py index 9b7adcd71..e066c5777 100644 --- a/tests/checkdb.py +++ b/tests/checkdb.py @@ -726,6 +726,9 @@ def test_checkdb_with_least_privileges(self): 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.string_to_array(text, text) TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.array_position(anyarray, anyelement) TO backup;' + 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass) TO backup; ' + 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool) TO backup;' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup; ' ) if ProbackupTest.enterprise: # amcheck-1.1 @@ -766,6 +769,7 @@ def test_checkdb_with_least_privileges(self): 'GRANT EXECUTE ON FUNCTION pg_catalog.array_position(anyarray, anyelement) TO backup; ' 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass) TO backup; ' 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool) TO backup;' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup; ' ) # checkunique parameter if ProbackupTest.enterprise: @@ -804,6 +808,7 @@ def test_checkdb_with_least_privileges(self): 'GRANT EXECUTE ON FUNCTION pg_catalog.array_position(anycompatiblearray, anycompatible) TO backup; ' 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass) TO backup; ' 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool) TO backup;' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup; ' ) # checkunique parameter if ProbackupTest.enterprise: @@ -811,11 +816,6 @@ def test_checkdb_with_least_privileges(self): "backupdb", "GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool, bool) TO backup") - if ProbackupTest.enterprise: - node.safe_psql( - "backupdb", - "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup") - # checkdb try: self.checkdb_node( From 5f2283c8deac88ea49ea6223a3aa72e2cf462eb5 Mon Sep 17 00:00:00 2001 From: "d.lepikhova" Date: Fri, 27 May 2022 14:00:10 +0500 Subject: [PATCH 263/525] Add backup start time as parameter for do_backup --- src/backup.c | 4 ++-- src/catalog.c | 35 +++++++++++++++++++++++++++-------- src/pg_probackup.c | 5 ++++- src/pg_probackup.h | 4 ++-- 4 files changed, 35 insertions(+), 13 deletions(-) diff --git a/src/backup.c b/src/backup.c index c575865c4..e8477da4c 100644 --- a/src/backup.c +++ b/src/backup.c @@ -695,7 +695,7 @@ pgdata_basic_setup(ConnectionOptions conn_opt, PGNodeInfo *nodeInfo) */ int do_backup(InstanceState *instanceState, pgSetBackupParams *set_backup_params, - bool no_validate, bool no_sync, bool backup_logs) + bool no_validate, bool no_sync, bool backup_logs, time_t start_time) { PGconn *backup_conn = NULL; PGNodeInfo nodeInfo; @@ -710,7 +710,7 @@ do_backup(InstanceState *instanceState, pgSetBackupParams *set_backup_params, current.external_dir_str = instance_config.external_dir_str; /* Create backup directory and BACKUP_CONTROL_FILE */ - pgBackupCreateDir(¤t, instanceState->instance_backup_subdir_path); + pgBackupCreateDir(¤t, instanceState, start_time); if (!instance_config.pgdata) elog(ERROR, "required parameter not specified: PGDATA " diff --git a/src/catalog.c b/src/catalog.c index b4ed8c189..516ee0ff8 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -23,7 +23,7 @@ static pgBackup* get_closest_backup(timelineInfo *tlinfo); static pgBackup* get_oldest_backup(timelineInfo *tlinfo); static const char *backupModes[] = {"", "PAGE", "PTRACK", "DELTA", "FULL"}; static pgBackup *readBackupControlFile(const char *path); -static time_t create_backup_dir(pgBackup *backup, const char *backup_instance_path); +static void create_backup_dir(pgBackup *backup, const char *backup_instance_path); static bool backup_lock_exit_hook_registered = false; static parray *locks = NULL; @@ -1420,10 +1420,12 @@ get_multi_timeline_parent(parray *backup_list, parray *tli_list, */ void -pgBackupCreateDir(pgBackup *backup, const char *backup_instance_path) +pgBackupCreateDir(pgBackup *backup, InstanceState *instanceState, time_t start_time) { int i; parray *subdirs = parray_new(); + parray * backups; + pgBackup *target_backup; parray_append(subdirs, pg_strdup(DATABASE_DIR)); @@ -1444,7 +1446,26 @@ pgBackupCreateDir(pgBackup *backup, const char *backup_instance_path) free_dir_list(external_list); } - backup->backup_id = create_backup_dir(backup, backup_instance_path); + /* Get list of all backups*/ + backups = catalog_get_backup_list(instanceState, INVALID_BACKUP_ID); + if (parray_num(backups) > 0) + { + target_backup = (pgBackup *) parray_get(backups, 0); + if (start_time > target_backup->backup_id) + { + backup->backup_id = start_time; + create_backup_dir(backup, instanceState->instance_backup_subdir_path); + } + else + { + elog(ERROR, "Cannot create directory for older backup"); + } + } + else + { + backup->backup_id = start_time; + create_backup_dir(backup, instanceState->instance_backup_subdir_path); + } if (backup->backup_id == 0) elog(ERROR, "Cannot create backup directory: %s", strerror(errno)); @@ -1471,7 +1492,7 @@ pgBackupCreateDir(pgBackup *backup, const char *backup_instance_path) * Create root directory for backup, * update pgBackup.root_dir if directory creation was a success */ -time_t +void create_backup_dir(pgBackup *backup, const char *backup_instance_path) { int attempts = 10; @@ -1480,9 +1501,8 @@ create_backup_dir(pgBackup *backup, const char *backup_instance_path) { int rc; char path[MAXPGPATH]; - time_t backup_id = time(NULL); - join_path_components(path, backup_instance_path, base36enc(backup_id)); + join_path_components(path, backup_instance_path, base36enc(backup->backup_id)); /* TODO: add wrapper for remote mode */ rc = dir_create_dir(path, DIR_PERMISSION, true); @@ -1490,7 +1510,7 @@ create_backup_dir(pgBackup *backup, const char *backup_instance_path) if (rc == 0) { backup->root_dir = pgut_strdup(path); - return backup_id; + return; } else { @@ -1499,7 +1519,6 @@ create_backup_dir(pgBackup *backup, const char *backup_instance_path) } } - return 0; } /* diff --git a/src/pg_probackup.c b/src/pg_probackup.c index b9b3af0b9..8d45d6e7f 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -939,6 +939,9 @@ main(int argc, char *argv[]) return do_init(catalogState); case BACKUP_CMD: { + time_t start_time; + time(&start_time); + current.stream = stream_wal; /* sanity */ @@ -947,7 +950,7 @@ main(int argc, char *argv[]) "(-b, --backup-mode)"); return do_backup(instanceState, set_backup_params, - no_validate, no_sync, backup_logs); + no_validate, no_sync, backup_logs, start_time); } case CATCHUP_CMD: return do_catchup(catchup_source_pgdata, catchup_destination_pgdata, num_threads, !no_sync, diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 4cd65980c..e4159f4ab 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -840,7 +840,7 @@ extern char** commands_args; /* in backup.c */ extern int do_backup(InstanceState *instanceState, pgSetBackupParams *set_backup_params, - bool no_validate, bool no_sync, bool backup_logs); + bool no_validate, bool no_sync, bool backup_logs, time_t start_time); extern void do_checkdb(bool need_amcheck, ConnectionOptions conn_opt, char *pgdata); extern BackupMode parse_backup_mode(const char *value); @@ -981,7 +981,7 @@ extern void write_backup_filelist(pgBackup *backup, parray *files, const char *root, parray *external_list, bool sync); -extern void pgBackupCreateDir(pgBackup *backup, const char *backup_instance_path); +extern void pgBackupCreateDir(pgBackup *backup, InstanceState *instanceState, time_t start_time); extern void pgNodeInit(PGNodeInfo *node); extern void pgBackupInit(pgBackup *backup); extern void pgBackupFree(void *backup); From 3c74ebf2f9a4eb22b9a9dfb955d3379e5c217f48 Mon Sep 17 00:00:00 2001 From: "d.lepikhova" Date: Tue, 31 May 2022 18:03:31 +0500 Subject: [PATCH 264/525] Add --start-time option for backup --- src/pg_probackup.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/pg_probackup.c b/src/pg_probackup.c index 8d45d6e7f..15f2542b0 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -78,6 +78,7 @@ pid_t my_pid = 0; __thread int my_thread_num = 1; bool progress = false; bool no_sync = false; +time_t start_time = 0; #if PG_VERSION_NUM >= 100000 char *replication_slot = NULL; bool temp_slot = false; @@ -200,6 +201,7 @@ static ConfigOption cmd_options[] = { 's', 'i', "backup-id", &backup_id_string, SOURCE_CMD_STRICT }, { 'b', 133, "no-sync", &no_sync, SOURCE_CMD_STRICT }, { 'b', 134, "no-color", &no_color, SOURCE_CMD_STRICT }, + { 'U', 241, "start-time", &start_time, SOURCE_CMD_STRICT }, /* backup options */ { 'b', 180, "backup-pg-log", &backup_logs, SOURCE_CMD_STRICT }, { 'f', 'b', "backup-mode", opt_backup_mode, SOURCE_CMD_STRICT }, @@ -939,10 +941,9 @@ main(int argc, char *argv[]) return do_init(catalogState); case BACKUP_CMD: { - time_t start_time; - time(&start_time); - current.stream = stream_wal; + if (start_time == 0) + start_time = current_time; /* sanity */ if (current.backup_mode == BACKUP_MODE_INVALID) From c81c54be4cac6f900e6b73df06788f349eecb3af Mon Sep 17 00:00:00 2001 From: "d.lepikhova" Date: Tue, 31 May 2022 18:35:54 +0500 Subject: [PATCH 265/525] Add --start-time option into help message --- src/help.c | 3 +++ tests/expected/option_help.out | 1 + 2 files changed, 4 insertions(+) diff --git a/src/help.c b/src/help.c index 8ebe734a3..7a1a1c580 100644 --- a/src/help.c +++ b/src/help.c @@ -150,6 +150,7 @@ help_pg_probackup(void) printf(_(" [--remote-port] [--remote-path] [--remote-user]\n")); printf(_(" [--ssh-options]\n")); printf(_(" [--ttl=interval] [--expire-time=timestamp] [--note=text]\n")); + printf(_(" [--start-time]\n")); printf(_(" [--help]\n")); @@ -323,6 +324,7 @@ help_backup(void) printf(_(" [--remote-port] [--remote-path] [--remote-user]\n")); printf(_(" [--ssh-options]\n")); printf(_(" [--ttl=interval] [--expire-time=timestamp] [--note=text]\n\n")); + printf(_(" [--start-time]\n")); printf(_(" -B, --backup-path=backup-path location of the backup storage area\n")); printf(_(" -b, --backup-mode=backup-mode backup mode=FULL|PAGE|DELTA|PTRACK\n")); @@ -343,6 +345,7 @@ help_backup(void) printf(_(" --no-sync do not sync backed up files to disk\n")); printf(_(" --note=text add note to backup\n")); printf(_(" (example: --note='backup before app update to v13.1')\n")); + printf(_(" --start-time set time of starting backup as a parameter for naming backup\n")); printf(_("\n Logging options:\n")); printf(_(" --log-level-console=log-level-console\n")); diff --git a/tests/expected/option_help.out b/tests/expected/option_help.out index 00b50d10c..9026b99b3 100644 --- a/tests/expected/option_help.out +++ b/tests/expected/option_help.out @@ -68,6 +68,7 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database. [--remote-port] [--remote-path] [--remote-user] [--ssh-options] [--ttl=interval] [--expire-time=timestamp] [--note=text] + [--start-time] [--help] pg_probackup restore -B backup-path --instance=instance_name From 884e8b09f315a7ff9b53bea6f8395b44d0ed22f2 Mon Sep 17 00:00:00 2001 From: dlepikhova <43872363+dlepikhova@users.noreply.github.com> Date: Wed, 1 Jun 2022 12:49:09 +0500 Subject: [PATCH 266/525] [pbckp-128] dry-run option for catchup (#477) * Added dry-run option for catchup. Run catchup without affect on the files and WAL --- src/catchup.c | 84 +++++++++++------- src/help.c | 4 + tests/catchup.py | 154 +++++++++++++++++++++++++++++++++ tests/expected/option_help.out | 1 + travis/run_tests.sh | 9 ++ 5 files changed, 220 insertions(+), 32 deletions(-) diff --git a/src/catchup.c b/src/catchup.c index 1b8f8084d..3c522afb7 100644 --- a/src/catchup.c +++ b/src/catchup.c @@ -2,7 +2,7 @@ * * catchup.c: sync DB cluster * - * Copyright (c) 2021, Postgres Professional + * Copyright (c) 2022, Postgres Professional * *------------------------------------------------------------------------- */ @@ -507,16 +507,20 @@ catchup_multithreaded_copy(int num_threads, /* Run threads */ thread_interrupted = false; threads = (pthread_t *) palloc(sizeof(pthread_t) * num_threads); - for (i = 0; i < num_threads; i++) + if (!dry_run) { - elog(VERBOSE, "Start thread num: %i", i); - pthread_create(&threads[i], NULL, &catchup_thread_runner, &(threads_args[i])); + for (i = 0; i < num_threads; i++) + { + elog(VERBOSE, "Start thread num: %i", i); + pthread_create(&threads[i], NULL, &catchup_thread_runner, &(threads_args[i])); + } } /* Wait threads */ for (i = 0; i < num_threads; i++) { - pthread_join(threads[i], NULL); + if (!dry_run) + pthread_join(threads[i], NULL); all_threads_successful &= threads_args[i].completed; transfered_bytes_result += threads_args[i].transfered_bytes; } @@ -706,9 +710,14 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, /* Start stream replication */ join_path_components(dest_xlog_path, dest_pgdata, PG_XLOG_DIR); - fio_mkdir(dest_xlog_path, DIR_PERMISSION, FIO_LOCAL_HOST); - start_WAL_streaming(source_conn, dest_xlog_path, &instance_config.conn_opt, - current.start_lsn, current.tli, false); + if (!dry_run) + { + fio_mkdir(dest_xlog_path, DIR_PERMISSION, FIO_LOCAL_HOST); + start_WAL_streaming(source_conn, dest_xlog_path, &instance_config.conn_opt, + current.start_lsn, current.tli, false); + } + else + elog(INFO, "WAL streaming skipping with --dry-run option"); source_filelist = parray_new(); @@ -779,9 +788,9 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, /* Build the page map from ptrack information */ make_pagemap_from_ptrack_2(source_filelist, source_conn, - source_node_info.ptrack_schema, - source_node_info.ptrack_version_num, - dest_redo.lsn); + source_node_info.ptrack_schema, + source_node_info.ptrack_version_num, + dest_redo.lsn); time(&end_time); elog(INFO, "Pagemap successfully extracted, time elapsed: %.0f sec", difftime(end_time, start_time)); @@ -820,9 +829,9 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, char dirpath[MAXPGPATH]; join_path_components(dirpath, dest_pgdata, file->rel_path); - elog(VERBOSE, "Create directory '%s'", dirpath); - fio_mkdir(dirpath, DIR_PERMISSION, FIO_LOCAL_HOST); + if (!dry_run) + fio_mkdir(dirpath, DIR_PERMISSION, FIO_LOCAL_HOST); } else { @@ -853,15 +862,18 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, elog(VERBOSE, "Create directory \"%s\" and symbolic link \"%s\"", linked_path, to_path); - /* create tablespace directory */ - if (fio_mkdir(linked_path, file->mode, FIO_LOCAL_HOST) != 0) - elog(ERROR, "Could not create tablespace directory \"%s\": %s", - linked_path, strerror(errno)); - - /* create link to linked_path */ - if (fio_symlink(linked_path, to_path, true, FIO_LOCAL_HOST) < 0) - elog(ERROR, "Could not create symbolic link \"%s\" -> \"%s\": %s", - linked_path, to_path, strerror(errno)); + if (!dry_run) + { + /* create tablespace directory */ + if (fio_mkdir(linked_path, file->mode, FIO_LOCAL_HOST) != 0) + elog(ERROR, "Could not create tablespace directory \"%s\": %s", + linked_path, strerror(errno)); + + /* create link to linked_path */ + if (fio_symlink(linked_path, to_path, true, FIO_LOCAL_HOST) < 0) + elog(ERROR, "Could not create symbolic link \"%s\" -> \"%s\": %s", + linked_path, to_path, strerror(errno)); + } } } @@ -930,7 +942,10 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, char fullpath[MAXPGPATH]; join_path_components(fullpath, dest_pgdata, file->rel_path); - fio_delete(file->mode, fullpath, FIO_LOCAL_HOST); + if (!dry_run) + { + fio_delete(file->mode, fullpath, FIO_LOCAL_HOST); + } elog(VERBOSE, "Deleted file \"%s\"", fullpath); /* shrink dest pgdata list */ @@ -961,7 +976,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, catchup_isok = transfered_datafiles_bytes != -1; /* at last copy control file */ - if (catchup_isok) + if (catchup_isok && !dry_run) { char from_fullpath[MAXPGPATH]; char to_fullpath[MAXPGPATH]; @@ -972,7 +987,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, transfered_datafiles_bytes += source_pg_control_file->size; } - if (!catchup_isok) + if (!catchup_isok && !dry_run) { char pretty_time[20]; char pretty_transfered_data_bytes[20]; @@ -1010,14 +1025,18 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, pg_free(stop_backup_query_text); } - wait_wal_and_calculate_stop_lsn(dest_xlog_path, stop_backup_result.lsn, ¤t); + if (!dry_run) + wait_wal_and_calculate_stop_lsn(dest_xlog_path, stop_backup_result.lsn, ¤t); #if PG_VERSION_NUM >= 90600 /* Write backup_label */ Assert(stop_backup_result.backup_label_content != NULL); - pg_stop_backup_write_file_helper(dest_pgdata, PG_BACKUP_LABEL_FILE, "backup label", - stop_backup_result.backup_label_content, stop_backup_result.backup_label_content_len, - NULL); + if (!dry_run) + { + pg_stop_backup_write_file_helper(dest_pgdata, PG_BACKUP_LABEL_FILE, "backup label", + stop_backup_result.backup_label_content, stop_backup_result.backup_label_content_len, + NULL); + } free(stop_backup_result.backup_label_content); stop_backup_result.backup_label_content = NULL; stop_backup_result.backup_label_content_len = 0; @@ -1040,6 +1059,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, #endif /* wait for end of wal streaming and calculate wal size transfered */ + if (!dry_run) { parray *wal_files_list = NULL; wal_files_list = parray_new(); @@ -1091,17 +1111,17 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, } /* Sync all copied files unless '--no-sync' flag is used */ - if (sync_dest_files) + if (sync_dest_files && !dry_run) catchup_sync_destination_files(dest_pgdata, FIO_LOCAL_HOST, source_filelist, source_pg_control_file); else elog(WARNING, "Files are not synced to disk"); /* Cleanup */ - if (dest_filelist) + if (dest_filelist && !dry_run) { parray_walk(dest_filelist, pgFileFree); - parray_free(dest_filelist); } + parray_free(dest_filelist); parray_walk(source_filelist, pgFileFree); parray_free(source_filelist); pgFileFree(source_pg_control_file); diff --git a/src/help.c b/src/help.c index 8ebe734a3..b22fa912e 100644 --- a/src/help.c +++ b/src/help.c @@ -261,6 +261,7 @@ help_pg_probackup(void) printf(_(" [--remote-proto] [--remote-host]\n")); printf(_(" [--remote-port] [--remote-path] [--remote-user]\n")); printf(_(" [--ssh-options]\n")); + printf(_(" [--dry-run]\n")); printf(_(" [--help]\n")); if ((PROGRAM_URL || PROGRAM_EMAIL)) @@ -1047,6 +1048,7 @@ help_catchup(void) printf(_(" [--remote-proto] [--remote-host]\n")); printf(_(" [--remote-port] [--remote-path] [--remote-user]\n")); printf(_(" [--ssh-options]\n")); + printf(_(" [--dry-run]\n")); printf(_(" [--help]\n\n")); printf(_(" -b, --backup-mode=catchup-mode catchup mode=FULL|DELTA|PTRACK\n")); @@ -1081,4 +1083,6 @@ help_catchup(void) printf(_(" --remote-user=username user name for ssh connection (default: current user)\n")); printf(_(" --ssh-options=ssh_options additional ssh options (default: none)\n")); printf(_(" (example: --ssh-options='-c cipher_spec -F configfile')\n\n")); + + printf(_(" --dry-run perform a trial run without any changes\n\n")); } diff --git a/tests/catchup.py b/tests/catchup.py index 8441deaaf..a83755c54 100644 --- a/tests/catchup.py +++ b/tests/catchup.py @@ -1455,3 +1455,157 @@ def test_config_exclusion(self): dst_pg.stop() #self.assertEqual(1, 0, 'Stop test') self.del_test_dir(module_name, self.fname) + +######################################### +# --dry-run +######################################### + def test_dry_run_catchup_full(self): + """ + Test dry-run option for full catchup + """ + # preparation 1: source + src_pg = self.make_simple_node( + base_dir = os.path.join(module_name, self.fname, 'src'), + set_replication = True + ) + src_pg.slow_start() + + # preparation 2: make clean shutdowned lagging behind replica + dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + + src_pg.pgbench_init(scale = 10) + pgbench = src_pg.pgbench(options=['-T', '10', '--no-vacuum']) + pgbench.wait() + + # save the condition before dry-run + content_before = self.pgdata_content(dst_pg.data_dir) + + # do full catchup + self.catchup_node( + backup_mode = 'FULL', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream', '--dry-run'] + ) + + # compare data dirs before and after catchup + self.compare_pgdata( + content_before, + self.pgdata_content(dst_pg.data_dir) + ) + + # Cleanup + src_pg.stop() + self.del_test_dir(module_name, self.fname) + + def test_dry_run_catchup_ptrack(self): + """ + Test dry-run option for catchup in incremental ptrack mode + """ + if not self.ptrack: + return unittest.skip('Skipped because ptrack support is disabled') + + # preparation 1: source + src_pg = self.make_simple_node( + base_dir = os.path.join(module_name, self.fname, 'src'), + set_replication = True, + ptrack_enable = True, + initdb_params = ['--data-checksums'] + ) + src_pg.slow_start() + src_pg.safe_psql("postgres", "CREATE EXTENSION ptrack") + + src_pg.pgbench_init(scale = 10) + pgbench = src_pg.pgbench(options=['-T', '10', '--no-vacuum']) + pgbench.wait() + + # preparation 2: make clean shutdowned lagging behind replica + dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + self.catchup_node( + backup_mode = 'FULL', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream'] + ) + self.set_replica(src_pg, dst_pg) + dst_options = {} + dst_options['port'] = str(dst_pg.port) + self.set_auto_conf(dst_pg, dst_options) + dst_pg.slow_start(replica = True) + dst_pg.stop() + + # save the condition before dry-run + content_before = self.pgdata_content(dst_pg.data_dir) + + # do incremental catchup + self.catchup_node( + backup_mode = 'PTRACK', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream', '--dry-run'] + ) + + # compare data dirs before and after cathup + self.compare_pgdata( + content_before, + self.pgdata_content(dst_pg.data_dir) + ) + + # Cleanup + src_pg.stop() + self.del_test_dir(module_name, self.fname) + + def test_dry_run_catchup_delta(self): + """ + Test dry-run option for catchup in incremental delta mode + """ + + # preparation 1: source + src_pg = self.make_simple_node( + base_dir = os.path.join(module_name, self.fname, 'src'), + set_replication = True, + initdb_params = ['--data-checksums'], + pg_options = { 'wal_log_hints': 'on' } + ) + src_pg.slow_start() + + src_pg.pgbench_init(scale = 10) + pgbench = src_pg.pgbench(options=['-T', '10', '--no-vacuum']) + pgbench.wait() + + # preparation 2: make clean shutdowned lagging behind replica + dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + self.catchup_node( + backup_mode = 'FULL', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream'] + ) + self.set_replica(src_pg, dst_pg) + dst_options = {} + dst_options['port'] = str(dst_pg.port) + self.set_auto_conf(dst_pg, dst_options) + dst_pg.slow_start(replica = True) + dst_pg.stop() + + # save the condition before dry-run + content_before = self.pgdata_content(dst_pg.data_dir) + + # do delta catchup + self.catchup_node( + backup_mode = 'DELTA', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream', "--dry-run"] + ) + + # compare data dirs before and after cathup + self.compare_pgdata( + content_before, + self.pgdata_content(dst_pg.data_dir) + ) + + # Cleanup + src_pg.stop() + self.del_test_dir(module_name, self.fname) + diff --git a/tests/expected/option_help.out b/tests/expected/option_help.out index 00b50d10c..8a1de1f67 100644 --- a/tests/expected/option_help.out +++ b/tests/expected/option_help.out @@ -178,6 +178,7 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database. [--remote-proto] [--remote-host] [--remote-port] [--remote-path] [--remote-user] [--ssh-options] + [--dry-run] [--help] Read the website for details . diff --git a/travis/run_tests.sh b/travis/run_tests.sh index a62ad4de7..37614f970 100755 --- a/travis/run_tests.sh +++ b/travis/run_tests.sh @@ -100,11 +100,20 @@ source pyenv/bin/activate pip3 install testgres echo "############### Testing:" +echo PG_PROBACKUP_PARANOIA=${PG_PROBACKUP_PARANOIA} +echo ARCHIVE_COMPRESSION=${ARCHIVE_COMPRESSION} +echo PGPROBACKUPBIN_OLD=${PGPROBACKUPBIN_OLD} +echo PGPROBACKUPBIN=${PGPROBACKUPBIN} +echo PGPROBACKUP_SSH_REMOTE=${PGPROBACKUP_SSH_REMOTE} +echo PGPROBACKUP_GDB=${PGPROBACKUP_GDB} +echo PG_PROBACKUP_PTRACK=${PG_PROBACKUP_PTRACK} if [ "$MODE" = "basic" ]; then export PG_PROBACKUP_TEST_BASIC=ON + echo PG_PROBACKUP_TEST_BASIC=${PG_PROBACKUP_TEST_BASIC} python3 -m unittest -v tests python3 -m unittest -v tests.init else + echo PG_PROBACKUP_TEST_BASIC=${PG_PROBACKUP_TEST_BASIC} python3 -m unittest -v tests.$MODE fi From 8bb0a618fb5608af098f12d04285e572054ad194 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Wed, 1 Jun 2022 10:59:19 +0300 Subject: [PATCH 267/525] Version 2.5.6 --- src/pg_probackup.h | 2 +- tests/expected/option_version.out | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 4cd65980c..2c4c61036 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -338,7 +338,7 @@ typedef enum ShowFormat #define BYTES_INVALID (-1) /* file didn`t changed since previous backup, DELTA backup do not rely on it */ #define FILE_NOT_FOUND (-2) /* file disappeared during backup */ #define BLOCKNUM_INVALID (-1) -#define PROGRAM_VERSION "2.5.5" +#define PROGRAM_VERSION "2.5.6" /* update when remote agent API or behaviour changes */ #define AGENT_PROTOCOL_VERSION 20501 diff --git a/tests/expected/option_version.out b/tests/expected/option_version.out index 29cd93f45..96f0f3446 100644 --- a/tests/expected/option_version.out +++ b/tests/expected/option_version.out @@ -1 +1 @@ -pg_probackup 2.5.5 \ No newline at end of file +pg_probackup 2.5.6 From 1b75b4ed62c6f2f8d2912bbd61efa9d7d3f27360 Mon Sep 17 00:00:00 2001 From: "d.lepikhova" Date: Fri, 3 Jun 2022 13:45:50 +0500 Subject: [PATCH 268/525] Add tests for --start-time option on the one and few nodes --- tests/backup.py | 371 +++++++++++++++++++++++++++++++- tests/helpers/ptrack_helpers.py | 5 +- 2 files changed, 374 insertions(+), 2 deletions(-) diff --git a/tests/backup.py b/tests/backup.py index 682409015..58fb4238c 100644 --- a/tests/backup.py +++ b/tests/backup.py @@ -1,6 +1,6 @@ import unittest import os -from time import sleep +from time import sleep, time from .helpers.ptrack_helpers import ProbackupTest, ProbackupException import shutil from distutils.dir_util import copy_tree @@ -3400,3 +3400,372 @@ def test_pg_stop_backup_missing_permissions(self): # Clean after yourself self.del_test_dir(module_name, fname) + + # @unittest.skip("skip") + def test_start_time(self): + + fname = self.id().split('.')[3] + node = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node'), + ptrack_enable=self.ptrack, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # FULL backup + startTime = int(time()) + self.backup_node( + backup_dir, 'node', node, backup_type="full", + options=['--stream', '--start-time', str(startTime)]) + + # DELTA backup + startTime = int(time()) + self.backup_node( + backup_dir, 'node', node, backup_type="delta", + options=['--stream', '--start-time', str(startTime)]) + + # PAGE backup + startTime = int(time()) + self.backup_node( + backup_dir, 'node', node, backup_type="page", + options=['--stream', '--start-time', str(startTime)]) + + if self.ptrack and node.major_version > 11: + node.safe_psql( + "postgres", + "create extension ptrack") + + # PTRACK backup + startTime = int(time()) + self.backup_node( + backup_dir, 'node', node, backup_type="ptrack", + options=['--stream', '--start-time', str(startTime)]) + + # Clean after yourself + self.del_test_dir(module_name, fname) + + # @unittest.skip("skip") + def test_start_time_incorrect_time(self): + + fname = self.id().split('.')[3] + node = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node'), + ptrack_enable=self.ptrack, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + startTime = int(time()) + #backup with correct start time + self.backup_node( + backup_dir, 'node', node, + options=['--stream', '--start-time', str(startTime)]) + #backups with incorrect start time + try: + self.backup_node( + backup_dir, 'node', node, backup_type="full", + options=['--stream', '--start-time', str(startTime-10000)]) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because start time for new backup must be newer " + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertRegex( + e.message, + "ERROR: Cannot create directory for older backup", + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) + + try: + self.backup_node( + backup_dir, 'node', node, backup_type="delta", + options=['--stream', '--start-time', str(startTime-10000)]) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because start time for new backup must be newer " + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertRegex( + e.message, + "ERROR: Cannot create directory for older backup", + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) + + try: + self.backup_node( + backup_dir, 'node', node, backup_type="page", + options=['--stream', '--start-time', str(startTime-10000)]) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because start time for new backup must be newer " + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertRegex( + e.message, + "ERROR: Cannot create directory for older backup", + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) + + if self.ptrack and node.major_version > 11: + node.safe_psql( + "postgres", + "create extension ptrack") + + try: + self.backup_node( + backup_dir, 'node', node, backup_type="page", + options=['--stream', '--start-time', str(startTime-10000)]) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because start time for new backup must be newer " + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertRegex( + e.message, + "ERROR: Cannot create directory for older backup", + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) + + # Clean after yourself + self.del_test_dir(module_name, fname) + + # @unittest.skip("skip") + def test_start_time_few_nodes(self): + + fname = self.id().split('.')[3] + node1 = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node1'), + ptrack_enable=self.ptrack, + initdb_params=['--data-checksums']) + + backup_dir1 = os.path.join(self.tmp_path, module_name, fname, 'backup1') + self.init_pb(backup_dir1) + self.add_instance(backup_dir1, 'node1', node1) + self.set_archiving(backup_dir1, 'node1', node1) + node1.slow_start() + + node2 = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node2'), + ptrack_enable=self.ptrack, + initdb_params=['--data-checksums']) + + backup_dir2 = os.path.join(self.tmp_path, module_name, fname, 'backup2') + self.init_pb(backup_dir2) + self.add_instance(backup_dir2, 'node2', node2) + self.set_archiving(backup_dir2, 'node2', node2) + node2.slow_start() + + # FULL backup + startTime = int(time()) + self.backup_node( + backup_dir1, 'node1', node1, backup_type="full", + options=['--stream', '--start-time', str(startTime)]) + self.backup_node( + backup_dir2, 'node2', node2, backup_type="full", + options=['--stream', '--start-time', str(startTime)]) + + show_backup1 = self.show_pb(backup_dir1, 'node1')[0] + show_backup2 = self.show_pb(backup_dir2, 'node2')[0] + self.assertEqual(show_backup1['id'], show_backup2['id']) + + # DELTA backup + startTime = int(time()) + self.backup_node( + backup_dir1, 'node1', node1, backup_type="delta", + options=['--stream', '--start-time', str(startTime)]) + self.backup_node( + backup_dir2, 'node2', node2, backup_type="delta", + options=['--stream', '--start-time', str(startTime)]) + show_backup1 = self.show_pb(backup_dir1, 'node1')[1] + show_backup2 = self.show_pb(backup_dir2, 'node2')[1] + self.assertEqual(show_backup1['id'], show_backup2['id']) + + # PAGE backup + startTime = int(time()) + self.backup_node( + backup_dir1, 'node1', node1, backup_type="page", + options=['--stream', '--start-time', str(startTime)]) + self.backup_node( + backup_dir2, 'node2', node2, backup_type="page", + options=['--stream', '--start-time', str(startTime)]) + show_backup1 = self.show_pb(backup_dir1, 'node1')[2] + show_backup2 = self.show_pb(backup_dir2, 'node2')[2] + self.assertEqual(show_backup1['id'], show_backup2['id']) + + # PTRACK backup + startTime = int(time()) + if self.ptrack and node1.major_version > 11: + node1.safe_psql( + "postgres", + "create extension ptrack") + self.backup_node( + backup_dir1, 'node1', node1, backup_type="ptrack", + options=['--stream', '--start-time', str(startTime)]) + + if self.ptrack and node2.major_version > 11: + node2.safe_psql( + "postgres", + "create extension ptrack") + self.backup_node( + backup_dir2, 'node2', node2, backup_type="ptrack", + options=['--stream', '--start-time', str(startTime)]) + show_backup1 = self.show_pb(backup_dir1, 'node1')[3] + show_backup2 = self.show_pb(backup_dir2, 'node2')[3] + self.assertEqual(show_backup1['id'], show_backup2['id']) + + # Clean after yourself + self.del_test_dir(module_name, fname) + + # @unittest.skip("skip") + def test_start_time_few_nodes_incorrect_time(self): + + fname = self.id().split('.')[3] + node1 = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node1'), + ptrack_enable=self.ptrack, + initdb_params=['--data-checksums']) + + backup_dir1 = os.path.join(self.tmp_path, module_name, fname, 'backup1') + self.init_pb(backup_dir1) + self.add_instance(backup_dir1, 'node1', node1) + self.set_archiving(backup_dir1, 'node1', node1) + node1.slow_start() + + node2 = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node2'), + ptrack_enable=self.ptrack, + initdb_params=['--data-checksums']) + + backup_dir2 = os.path.join(self.tmp_path, module_name, fname, 'backup2') + self.init_pb(backup_dir2) + self.add_instance(backup_dir2, 'node2', node2) + self.set_archiving(backup_dir2, 'node2', node2) + node2.slow_start() + + # FULL backup + startTime = int(time()) + self.backup_node( + backup_dir1, 'node1', node1, backup_type="full", + options=['--stream', '--start-time', str(startTime)]) + self.backup_node( + backup_dir2, 'node2', node2, backup_type="full", + options=['--stream', '--start-time', str(startTime-10000)]) + + show_backup1 = self.show_pb(backup_dir1, 'node1')[0] + show_backup2 = self.show_pb(backup_dir2, 'node2')[0] + self.assertGreater(show_backup1['id'], show_backup2['id']) + + # DELTA backup + startTime = int(time()) + self.backup_node( + backup_dir1, 'node1', node1, backup_type="delta", + options=['--stream', '--start-time', str(startTime)]) + # make backup with start time definitelly earlier, than existing + try: + self.backup_node( + backup_dir2, 'node2', node2, backup_type="delta", + options=['--stream', '--start-time', str(10000)]) + self.assertEqual( + 1, 0, + "Expecting Error because start time for new backup must be newer " + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertRegex( + e.message, + "ERROR: Cannot create directory for older backup", + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) + + show_backup1 = self.show_pb(backup_dir1, 'node1')[1] + show_backup2 = self.show_pb(backup_dir2, 'node2')[0] + self.assertGreater(show_backup1['id'], show_backup2['id']) + + # PAGE backup + startTime = int(time()) + self.backup_node( + backup_dir1, 'node1', node1, backup_type="page", + options=['--stream', '--start-time', str(startTime)]) + # make backup with start time definitelly earlier, than existing + try: + self.backup_node( + backup_dir2, 'node2', node2, backup_type="page", + options=['--stream', '--start-time', str(10000)]) + self.assertEqual( + 1, 0, + "Expecting Error because start time for new backup must be newer " + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertRegex( + e.message, + "ERROR: Cannot create directory for older backup", + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) + + show_backup1 = self.show_pb(backup_dir1, 'node1')[2] + show_backup2 = self.show_pb(backup_dir2, 'node2')[0] + self.assertGreater(show_backup1['id'], show_backup2['id']) + + # PTRACK backup + startTime = int(time()) + if self.ptrack and node1.major_version > 11: + node1.safe_psql( + "postgres", + "create extension ptrack") + self.backup_node( + backup_dir1, 'node1', node1, backup_type="ptrack", + options=['--stream', '--start-time', str(startTime)]) + + if self.ptrack and node2.major_version > 11: + node2.safe_psql( + "postgres", + "create extension ptrack") + # make backup with start time definitelly earlier, than existing + try: + self.backup_node( + backup_dir2, 'node2', node2, backup_type="ptrack", + options=['--stream', '--start-time', str(10000)]) + self.assertEqual( + 1, 0, + "Expecting Error because start time for new backup must be newer " + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertRegex( + e.message, + "ERROR: Cannot create directory for older backup", + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) + + # FULL backup + startTime = int(time()) + self.backup_node( + backup_dir1, 'node1', node1, backup_type="full", + options=['--stream', '--start-time', str(startTime)]) + self.backup_node( + backup_dir2, 'node2', node2, backup_type="full", + options=['--stream', '--start-time', str(startTime)]) + + show_backup1 = self.show_pb(backup_dir1, 'node1')[4] + show_backup2 = self.show_pb(backup_dir2, 'node2')[1] + self.assertEqual(show_backup1['id'], show_backup2['id']) + + # Clean after yourself + self.del_test_dir(module_name, fname) diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index ffb87c5ec..5f1d40ab3 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -938,7 +938,7 @@ def backup_node( backup_type='full', datname=False, options=[], asynchronous=False, gdb=False, old_binary=False, return_id=True, no_remote=False, - env=None + env=None, startTime=None ): if not node and not data_dir: print('You must provide ether node or data_dir for backup') @@ -971,6 +971,9 @@ def backup_node( if not old_binary: cmd_list += ['--no-sync'] + if startTime: + cmd_list += ['--start-time', startTime] + return self.run_pb(cmd_list + options, asynchronous, gdb, old_binary, return_id, env=env) def checkdb_node( From 41855701c7033ff358de8b56822ce607dd3303c9 Mon Sep 17 00:00:00 2001 From: Victor Spirin Date: Wed, 8 Jun 2022 00:38:51 +0300 Subject: [PATCH 269/525] [PBCKP-153] Added waldir option for location for the write-ahead log directory (-X, --waldir=WALDIR) --- src/dir.c | 30 +++++++++++++++++++++++++++++- src/help.c | 6 ++++++ src/merge.c | 2 +- src/pg_probackup.c | 17 +++++++++++++++++ src/pg_probackup.h | 5 ++++- src/restore.c | 2 +- 6 files changed, 58 insertions(+), 4 deletions(-) diff --git a/src/dir.c b/src/dir.c index 4ebe0939b..ac794cee4 100644 --- a/src/dir.c +++ b/src/dir.c @@ -1036,13 +1036,20 @@ opt_externaldir_map(ConfigOption *opt, const char *arg) */ void create_data_directories(parray *dest_files, const char *data_dir, const char *backup_dir, - bool extract_tablespaces, bool incremental, fio_location location) + bool extract_tablespaces, bool incremental, fio_location location, + const char* waldir_path) { int i; parray *links = NULL; mode_t pg_tablespace_mode = DIR_PERMISSION; char to_path[MAXPGPATH]; + if (waldir_path && !dir_is_empty(waldir_path, location)) + { + elog(ERROR, "WAL directory location is not empty: \"%s\"", waldir_path); + } + + /* get tablespace map */ if (extract_tablespaces) { @@ -1107,6 +1114,27 @@ create_data_directories(parray *dest_files, const char *data_dir, const char *ba /* skip external directory content */ if (dir->external_dir_num != 0) continue; + /* Create WAL directory and symlink if waldir_path is setting */ + if (waldir_path && strcmp(dir->rel_path, PG_XLOG_DIR) == 0) { + /* get full path to PG_XLOG_DIR */ + + join_path_components(to_path, data_dir, PG_XLOG_DIR); + + elog(VERBOSE, "Create directory \"%s\" and symbolic link \"%s\"", + waldir_path, to_path); + + /* create tablespace directory from waldir_path*/ + fio_mkdir(waldir_path, pg_tablespace_mode, location); + + /* create link to linked_path */ + if (fio_symlink(waldir_path, to_path, incremental, location) < 0) + elog(ERROR, "Could not create symbolic link \"%s\": %s", + to_path, strerror(errno)); + + continue; + + + } /* tablespace_map exists */ if (links) diff --git a/src/help.c b/src/help.c index b22fa912e..85894759e 100644 --- a/src/help.c +++ b/src/help.c @@ -169,6 +169,7 @@ help_pg_probackup(void) printf(_(" [-T OLDDIR=NEWDIR] [--progress]\n")); printf(_(" [--external-mapping=OLDDIR=NEWDIR]\n")); printf(_(" [--skip-external-dirs] [--no-sync]\n")); + printf(_(" [-X WALDIR | --waldir=WALDIR]\n")); printf(_(" [-I | --incremental-mode=none|checksum|lsn]\n")); printf(_(" [--db-include | --db-exclude]\n")); printf(_(" [--remote-proto] [--remote-host]\n")); @@ -435,6 +436,7 @@ help_restore(void) printf(_(" [-T OLDDIR=NEWDIR]\n")); printf(_(" [--external-mapping=OLDDIR=NEWDIR]\n")); printf(_(" [--skip-external-dirs]\n")); + printf(_(" [-X WALDIR | --waldir=WALDIR]\n")); printf(_(" [-I | --incremental-mode=none|checksum|lsn]\n")); printf(_(" [--db-include dbname | --db-exclude dbname]\n")); printf(_(" [--recovery-target-time=time|--recovery-target-xid=xid\n")); @@ -472,6 +474,10 @@ help_restore(void) printf(_(" relocate the external directory from OLDDIR to NEWDIR\n")); printf(_(" --skip-external-dirs do not restore all external directories\n")); + + printf(_(" -X, --waldir=WALDIR location for the write-ahead log directory\n")); + + printf(_("\n Incremental restore options:\n")); printf(_(" -I, --incremental-mode=none|checksum|lsn\n")); printf(_(" reuse valid pages available in PGDATA if they have not changed\n")); diff --git a/src/merge.c b/src/merge.c index ff39c2510..1ce92bb42 100644 --- a/src/merge.c +++ b/src/merge.c @@ -614,7 +614,7 @@ merge_chain(InstanceState *instanceState, /* Create directories */ create_data_directories(dest_backup->files, full_database_dir, - dest_backup->root_dir, false, false, FIO_BACKUP_HOST); + dest_backup->root_dir, false, false, FIO_BACKUP_HOST, NULL); /* External directories stuff */ if (dest_backup->external_dir_str) diff --git a/src/pg_probackup.c b/src/pg_probackup.c index b9b3af0b9..2c8100b83 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -122,6 +122,7 @@ static parray *datname_include_list = NULL; /* arrays for --exclude-path's */ static parray *exclude_absolute_paths_list = NULL; static parray *exclude_relative_paths_list = NULL; +static char* waldir_path = NULL; /* checkdb options */ bool need_amcheck = false; @@ -238,6 +239,7 @@ static ConfigOption cmd_options[] = { 's', 160, "primary-conninfo", &primary_conninfo, SOURCE_CMD_STRICT }, { 's', 'S', "primary-slot-name",&replication_slot, SOURCE_CMD_STRICT }, { 'f', 'I', "incremental-mode", opt_incr_restore_mode, SOURCE_CMD_STRICT }, + { 's', 'X', "waldir", &waldir_path, SOURCE_CMD_STRICT }, /* checkdb options */ { 'b', 195, "amcheck", &need_amcheck, SOURCE_CMD_STRICT }, { 'b', 196, "heapallindexed", &heapallindexed, SOURCE_CMD_STRICT }, @@ -754,6 +756,21 @@ main(int argc, char *argv[]) restore_params->partial_restore_type = INCLUDE; restore_params->partial_db_list = datname_include_list; } + + if (waldir_path) + { + /* clean up xlog directory name, check it's absolute */ + canonicalize_path(waldir_path); + if (!is_absolute_path(waldir_path)) + { + elog(ERROR, "WAL directory location must be an absolute path"); + } + if (strlen(waldir_path) > MAXPGPATH) + elog(ERROR, "Value specified to --waldir is too long"); + + } + restore_params->waldir = waldir_path; + } /* diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 2c4c61036..13650be8b 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -566,6 +566,8 @@ typedef struct pgRestoreParams /* options for partial restore */ PartialRestoreType partial_restore_type; parray *partial_db_list; + + char* waldir; } pgRestoreParams; /* Options needed for set-backup command */ @@ -1022,7 +1024,8 @@ extern void create_data_directories(parray *dest_files, const char *backup_dir, bool extract_tablespaces, bool incremental, - fio_location location); + fio_location location, + const char *waldir_path); extern void read_tablespace_map(parray *links, const char *backup_dir); extern void opt_tablespace_map(ConfigOption *opt, const char *arg); diff --git a/src/restore.c b/src/restore.c index d8d808a4e..fbf0c0398 100644 --- a/src/restore.c +++ b/src/restore.c @@ -801,7 +801,7 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain, create_data_directories(dest_files, instance_config.pgdata, dest_backup->root_dir, backup_has_tblspc, params->incremental_mode != INCR_NONE, - FIO_DB_HOST); + FIO_DB_HOST, params->waldir); /* * Restore dest_backup external directories. From 48a2c835d1c12353e23e08b901beaf39695773f9 Mon Sep 17 00:00:00 2001 From: Victor Spirin Date: Wed, 8 Jun 2022 17:40:49 +0300 Subject: [PATCH 270/525] [PBCKP-153] Added a test for the waldir option for the restore command --- tests/restore.py | 56 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) diff --git a/tests/restore.py b/tests/restore.py index bbdadeb23..668cff4fd 100644 --- a/tests/restore.py +++ b/tests/restore.py @@ -3916,3 +3916,59 @@ def test_restore_issue_313(self): # Clean after yourself self.del_test_dir(module_name, fname) + + # @unittest.skip("skip") + def test_restore_with_waldir(self): + """recovery using tablespace-mapping option and page backup""" + fname = self.id().split('.')[3] + node = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + + with node.connect("postgres") as con: + con.execute( + "CREATE TABLE tbl AS SELECT * " + "FROM generate_series(0,3) AS integer") + con.commit() + + # Full backup + backup_id = self.backup_node(backup_dir, 'node', node) + + node.stop() + node.cleanup() + + # Create waldir + waldir_path = os.path.join(node.base_dir, "waldir") + os.makedirs(waldir_path) + + # Test recovery from latest + self.assertIn( + "INFO: Restore of backup {0} completed.".format(backup_id), + self.restore_node( + backup_dir, 'node', node, + options=[ + "-X", "%s" % (waldir_path)]), + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd)) + node.slow_start() + + count = node.execute("postgres", "SELECT count(*) FROM tbl") + self.assertEqual(count[0][0], 4) + + # check pg_wal is symlink + if node.major_version >= 10: + wal_path=os.path.join(node.data_dir, "pg_wal") + else: + wal_path=os.path.join(node.data_dir, "pg_xlog") + + self.assertEqual(os.path.islink(wal_path), True) + + # Clean after yourself + self.del_test_dir(module_name, fname) From e72feb6813fa4862dbad12c657119c7bcfefe12b Mon Sep 17 00:00:00 2001 From: Sokolov Yura Date: Thu, 16 Jun 2022 09:26:02 +0300 Subject: [PATCH 271/525] rapid agent close + disable ssh control master. (#493) --- src/utils/file.c | 5 ++++- src/utils/remote.c | 3 +++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/src/utils/file.c b/src/utils/file.c index 7d1df554b..7103c8f1d 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -489,8 +489,10 @@ fio_disconnect(void) Assert(hdr.cop == FIO_DISCONNECTED); SYS_CHECK(close(fio_stdin)); SYS_CHECK(close(fio_stdout)); + SYS_CHECK(close(fio_stderr)); fio_stdin = 0; fio_stdout = 0; + fio_stderr = 0; wait_ssh(); } } @@ -3403,7 +3405,8 @@ fio_communicate(int in, int out) case FIO_DISCONNECT: hdr.cop = FIO_DISCONNECTED; IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); - break; + free(buf); + return; case FIO_GET_ASYNC_ERROR: fio_get_async_error_impl(out); break; diff --git a/src/utils/remote.c b/src/utils/remote.c index 2bfd24d1e..046ebd818 100644 --- a/src/utils/remote.c +++ b/src/utils/remote.c @@ -147,6 +147,9 @@ bool launch_agent(void) ssh_argv[ssh_argc++] = "-o"; ssh_argv[ssh_argc++] = "Compression=no"; + ssh_argv[ssh_argc++] = "-o"; + ssh_argv[ssh_argc++] = "ControlMaster=no"; + ssh_argv[ssh_argc++] = "-o"; ssh_argv[ssh_argc++] = "LogLevel=error"; From acc8edcd62d399d972e9dab8df8ecd85dbeb0fa2 Mon Sep 17 00:00:00 2001 From: avaness Date: Thu, 16 Jun 2022 11:46:19 +0300 Subject: [PATCH 272/525] minor hotfix for OptionTest.test_help_6, OptionTest.test_version_2 and tests/Readme.md FAQ (#494) Co-authored-by: Ivan Lazarev --- tests/Readme.md | 31 ++++++++++++++++++++++++++++++- tests/expected/option_help_ru.out | 1 + tests/option.py | 2 +- 3 files changed, 32 insertions(+), 2 deletions(-) diff --git a/tests/Readme.md b/tests/Readme.md index 500ed7c7a..f980b6aef 100644 --- a/tests/Readme.md +++ b/tests/Readme.md @@ -1,4 +1,4 @@ -[see wiki](https://confluence.postgrespro.ru/display/DEV/pg_probackup) +****[see wiki](https://confluence.postgrespro.ru/display/DEV/pg_probackup) ``` Note: For now these tests work on Linux and "kinda" work on Windows @@ -50,3 +50,32 @@ Usage: export PG_CONFIG=/path/to/pg_config python -m unittest [-v] tests[.specific_module][.class.test] ``` + +### Troubleshooting FAQ + +#### python test failures +1. Test failure reason like +``` +testgres.exceptions.QueryException ERROR: could not open extension control file "/home/avaness/postgres/postgres.build/share/extension/amcheck.control": No such file or directory +``` + +*Solution*: you have no `/contrib/` extensions installed + +```commandline +cd +make world install +``` + +2. Test failure + +``` +FAIL: test_help_6 (tests.option.OptionTest) +``` + +*Solution*: you didn't configure postgres build with `--enable-nls` + +```commandline +cd +make distclean + --enable-nls +``` diff --git a/tests/expected/option_help_ru.out b/tests/expected/option_help_ru.out index ee8da9a1c..68afb82f8 100644 --- a/tests/expected/option_help_ru.out +++ b/tests/expected/option_help_ru.out @@ -178,6 +178,7 @@ pg_probackup - утилита для управления резервным к [--remote-proto] [--remote-host] [--remote-port] [--remote-path] [--remote-user] [--ssh-options] + [--dry-run] [--help] Подробнее читайте на сайте . diff --git a/tests/option.py b/tests/option.py index b57d7ef43..23aa97c84 100644 --- a/tests/option.py +++ b/tests/option.py @@ -24,7 +24,7 @@ def test_version_2(self): """help options""" with open(os.path.join(self.dir_path, "expected/option_version.out"), "rb") as version_out: self.assertIn( - version_out.read().decode("utf-8"), + version_out.read().decode("utf-8").strip(), self.run_pb(["--version"]) ) From e11ca786b1a466aff773ebec5fae0b88692d140d Mon Sep 17 00:00:00 2001 From: Victor Spirin Date: Thu, 16 Jun 2022 12:02:27 +0300 Subject: [PATCH 273/525] [PBCKP-153] Changed expected/option_help.out and option_help_ru.out files for the tests.option.OptionTest.test_help_1 and help_6 --- tests/expected/option_help.out | 1 + tests/expected/option_help_ru.out | 2 ++ 2 files changed, 3 insertions(+) diff --git a/tests/expected/option_help.out b/tests/expected/option_help.out index 8a1de1f67..659164250 100644 --- a/tests/expected/option_help.out +++ b/tests/expected/option_help.out @@ -86,6 +86,7 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database. [-T OLDDIR=NEWDIR] [--progress] [--external-mapping=OLDDIR=NEWDIR] [--skip-external-dirs] [--no-sync] + [-X WALDIR | --waldir=WALDIR] [-I | --incremental-mode=none|checksum|lsn] [--db-include | --db-exclude] [--remote-proto] [--remote-host] diff --git a/tests/expected/option_help_ru.out b/tests/expected/option_help_ru.out index ee8da9a1c..2e90eb297 100644 --- a/tests/expected/option_help_ru.out +++ b/tests/expected/option_help_ru.out @@ -86,6 +86,7 @@ pg_probackup - утилита для управления резервным к [-T OLDDIR=NEWDIR] [--progress] [--external-mapping=OLDDIR=NEWDIR] [--skip-external-dirs] [--no-sync] + [-X WALDIR | --waldir=WALDIR] [-I | --incremental-mode=none|checksum|lsn] [--db-include | --db-exclude] [--remote-proto] [--remote-host] @@ -178,6 +179,7 @@ pg_probackup - утилита для управления резервным к [--remote-proto] [--remote-host] [--remote-port] [--remote-path] [--remote-user] [--ssh-options] + [--dry-run] [--help] Подробнее читайте на сайте . From 55238d572fb4d463ed3336ffa083df5260bdd2ee Mon Sep 17 00:00:00 2001 From: Sofia Kopikova Date: Mon, 20 Jun 2022 13:44:42 +0300 Subject: [PATCH 274/525] [PBCKP-120] skip partitioned indexes for checkdb --amcheck Tags: pg_probackup --- src/checkdb.c | 10 +++++++--- tests/checkdb.py | 9 +++++++++ 2 files changed, 16 insertions(+), 3 deletions(-) diff --git a/src/checkdb.c b/src/checkdb.c index 177fc3cc7..1133a7b5d 100644 --- a/src/checkdb.c +++ b/src/checkdb.c @@ -461,7 +461,9 @@ get_index_list(const char *dbname, bool first_db_with_amcheck, "LEFT JOIN pg_catalog.pg_class cls ON idx.indexrelid=cls.oid " "LEFT JOIN pg_catalog.pg_namespace nmspc ON cls.relnamespace=nmspc.oid " "LEFT JOIN pg_catalog.pg_am am ON cls.relam=am.oid " - "WHERE am.amname='btree' AND cls.relpersistence != 't' " + "WHERE am.amname='btree' " + "AND cls.relpersistence != 't' " + "AND cls.relkind != 'I' " "ORDER BY nmspc.nspname DESC", 0, NULL); } @@ -473,8 +475,10 @@ get_index_list(const char *dbname, bool first_db_with_amcheck, "LEFT JOIN pg_catalog.pg_class cls ON idx.indexrelid=cls.oid " "LEFT JOIN pg_catalog.pg_namespace nmspc ON cls.relnamespace=nmspc.oid " "LEFT JOIN pg_catalog.pg_am am ON cls.relam=am.oid " - "WHERE am.amname='btree' AND cls.relpersistence != 't' AND " - "(cls.reltablespace IN " + "WHERE am.amname='btree' " + "AND cls.relpersistence != 't' " + "AND cls.relkind != 'I' " + "AND (cls.reltablespace IN " "(SELECT oid from pg_catalog.pg_tablespace where spcname <> 'pg_global') " "OR cls.reltablespace = 0) " "ORDER BY nmspc.nspname DESC", diff --git a/tests/checkdb.py b/tests/checkdb.py index 9b7adcd71..4608366be 100644 --- a/tests/checkdb.py +++ b/tests/checkdb.py @@ -36,6 +36,15 @@ def test_checkdb_amcheck_only_sanity(self): node.safe_psql( "postgres", "create index on t_heap(id)") + + node.safe_psql( + "postgres", + "create table idxpart (a int) " + "partition by range (a)") + + node.safe_psql( + "postgres", + "create index on idxpart(a)") try: node.safe_psql( From 7e16642b663ccf66507b5fa7a270c7063db44633 Mon Sep 17 00:00:00 2001 From: avaness Date: Wed, 22 Jun 2022 12:54:20 +0300 Subject: [PATCH 275/525] [PBCKP-165] get_control_value() int64 buffer vulnerability fix (#496) * [PBCKP-165] get_control_value() int64 buffer vulnerability fix - added output buffer size limit check - splitted to get_get_control_value_str() & get_control_value_int64() api - included for windows build Co-authored-by: Ivan Lazarev --- src/catalog.c | 32 ++++++------ src/dir.c | 125 +++++++++++++++++++++++++-------------------- src/pg_probackup.h | 5 +- 3 files changed, 90 insertions(+), 72 deletions(-) diff --git a/src/catalog.c b/src/catalog.c index b4ed8c189..9d817913e 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -1084,15 +1084,15 @@ get_backup_filelist(pgBackup *backup, bool strict) COMP_FILE_CRC32(true, content_crc, buf, strlen(buf)); - get_control_value(buf, "path", path, NULL, true); - get_control_value(buf, "size", NULL, &write_size, true); - get_control_value(buf, "mode", NULL, &mode, true); - get_control_value(buf, "is_datafile", NULL, &is_datafile, true); - get_control_value(buf, "is_cfs", NULL, &is_cfs, false); - get_control_value(buf, "crc", NULL, &crc, true); - get_control_value(buf, "compress_alg", compress_alg_string, NULL, false); - get_control_value(buf, "external_dir_num", NULL, &external_dir_num, false); - get_control_value(buf, "dbOid", NULL, &dbOid, false); + get_control_value_str(buf, "path", path, sizeof(path),true); + get_control_value_int64(buf, "size", &write_size, true); + get_control_value_int64(buf, "mode", &mode, true); + get_control_value_int64(buf, "is_datafile", &is_datafile, true); + get_control_value_int64(buf, "is_cfs", &is_cfs, false); + get_control_value_int64(buf, "crc", &crc, true); + get_control_value_str(buf, "compress_alg", compress_alg_string, sizeof(compress_alg_string), false); + get_control_value_int64(buf, "external_dir_num", &external_dir_num, false); + get_control_value_int64(buf, "dbOid", &dbOid, false); file = pgFileInit(path); file->write_size = (int64) write_size; @@ -1107,28 +1107,28 @@ get_backup_filelist(pgBackup *backup, bool strict) /* * Optional fields */ - if (get_control_value(buf, "linked", linked, NULL, false) && linked[0]) + if (get_control_value_str(buf, "linked", linked, sizeof(linked), false) && linked[0]) { file->linked = pgut_strdup(linked); canonicalize_path(file->linked); } - if (get_control_value(buf, "segno", NULL, &segno, false)) + if (get_control_value_int64(buf, "segno", &segno, false)) file->segno = (int) segno; - if (get_control_value(buf, "n_blocks", NULL, &n_blocks, false)) + if (get_control_value_int64(buf, "n_blocks", &n_blocks, false)) file->n_blocks = (int) n_blocks; - if (get_control_value(buf, "n_headers", NULL, &n_headers, false)) + if (get_control_value_int64(buf, "n_headers", &n_headers, false)) file->n_headers = (int) n_headers; - if (get_control_value(buf, "hdr_crc", NULL, &hdr_crc, false)) + if (get_control_value_int64(buf, "hdr_crc", &hdr_crc, false)) file->hdr_crc = (pg_crc32) hdr_crc; - if (get_control_value(buf, "hdr_off", NULL, &hdr_off, false)) + if (get_control_value_int64(buf, "hdr_off", &hdr_off, false)) file->hdr_off = hdr_off; - if (get_control_value(buf, "hdr_size", NULL, &hdr_size, false)) + if (get_control_value_int64(buf, "hdr_size", &hdr_size, false)) file->hdr_size = (int) hdr_size; parray_append(files, file); diff --git a/src/dir.c b/src/dir.c index 4ebe0939b..e76122ae7 100644 --- a/src/dir.c +++ b/src/dir.c @@ -8,6 +8,7 @@ *------------------------------------------------------------------------- */ +#include #include "pg_probackup.h" #include "utils/file.h" @@ -130,6 +131,9 @@ static void opt_path_map(ConfigOption *opt, const char *arg, TablespaceList *list, const char *type); static void cleanup_tablespace(const char *path); +static void control_string_bad_format(const char* str); + + /* Tablespace mapping */ static TablespaceList tablespace_dirs = {NULL, NULL}; /* Extra directories mapping */ @@ -1467,7 +1471,7 @@ get_external_remap(char *current_dir) return current_dir; } -/* Parsing states for get_control_value() */ +/* Parsing states for get_control_value_str() */ #define CONTROL_WAIT_NAME 1 #define CONTROL_INNAME 2 #define CONTROL_WAIT_COLON 3 @@ -1481,26 +1485,62 @@ get_external_remap(char *current_dir) * The line has the following format: * {"name1":"value1", "name2":"value2"} * - * The value will be returned to "value_str" as string if it is not NULL. If it - * is NULL the value will be returned to "value_int64" as int64. + * The value will be returned in "value_int64" as int64. + * + * Returns true if the value was found in the line and parsed. + */ +bool +get_control_value_int64(const char *str, const char *name, int64 *value_int64, bool is_mandatory) +{ + + char buf_int64[32]; + + assert(value_int64); + + /* Set default value */ + *value_int64 = 0; + + if (!get_control_value_str(str, name, buf_int64, sizeof(buf_int64), is_mandatory)) + return false; + + if (!parse_int64(buf_int64, value_int64, 0)) + { + /* We assume that too big value is -1 */ + if (errno == ERANGE) + *value_int64 = BYTES_INVALID; + else + control_string_bad_format(str); + return false; + } + + return true; +} + +/* + * Get value from json-like line "str" of backup_content.control file. + * + * The line has the following format: + * {"name1":"value1", "name2":"value2"} + * + * The value will be returned to "value_str" as string. * * Returns true if the value was found in the line. */ + bool -get_control_value(const char *str, const char *name, - char *value_str, int64 *value_int64, bool is_mandatory) +get_control_value_str(const char *str, const char *name, + char *value_str, size_t value_str_size, bool is_mandatory) { int state = CONTROL_WAIT_NAME; char *name_ptr = (char *) name; char *buf = (char *) str; - char buf_int64[32], /* Buffer for "value_int64" */ - *buf_int64_ptr = buf_int64; + char *const value_str_start = value_str; - /* Set default values */ - if (value_str) - *value_str = '\0'; - else if (value_int64) - *value_int64 = 0; + assert(value_str); + assert(value_str_size > 0); + + /* Set default value */ + *value_str = '\0'; while (*buf) { @@ -1510,7 +1550,7 @@ get_control_value(const char *str, const char *name, if (*buf == '"') state = CONTROL_INNAME; else if (IsAlpha(*buf)) - goto bad_format; + control_string_bad_format(str); break; case CONTROL_INNAME: /* Found target field. Parse value. */ @@ -1529,57 +1569,32 @@ get_control_value(const char *str, const char *name, if (*buf == ':') state = CONTROL_WAIT_VALUE; else if (!IsSpace(*buf)) - goto bad_format; + control_string_bad_format(str); break; case CONTROL_WAIT_VALUE: if (*buf == '"') { state = CONTROL_INVALUE; - buf_int64_ptr = buf_int64; } else if (IsAlpha(*buf)) - goto bad_format; + control_string_bad_format(str); break; case CONTROL_INVALUE: /* Value was parsed, exit */ if (*buf == '"') { - if (value_str) - { - *value_str = '\0'; - } - else if (value_int64) - { - /* Length of buf_uint64 should not be greater than 31 */ - if (buf_int64_ptr - buf_int64 >= 32) - elog(ERROR, "field \"%s\" is out of range in the line %s of the file %s", - name, str, DATABASE_FILE_LIST); - - *buf_int64_ptr = '\0'; - if (!parse_int64(buf_int64, value_int64, 0)) - { - /* We assume that too big value is -1 */ - if (errno == ERANGE) - *value_int64 = BYTES_INVALID; - else - goto bad_format; - } - } - + *value_str = '\0'; return true; } else { - if (value_str) - { - *value_str = *buf; - value_str++; - } - else - { - *buf_int64_ptr = *buf; - buf_int64_ptr++; + /* verify if value_str not exceeds value_str_size limits */ + if (value_str - value_str_start >= value_str_size - 1) { + elog(ERROR, "field \"%s\" is out of range in the line %s of the file %s", + name, str, DATABASE_FILE_LIST); } + *value_str = *buf; + value_str++; } break; case CONTROL_WAIT_NEXT_NAME: @@ -1596,18 +1611,20 @@ get_control_value(const char *str, const char *name, /* There is no close quotes */ if (state == CONTROL_INNAME || state == CONTROL_INVALUE) - goto bad_format; + control_string_bad_format(str); /* Did not find target field */ if (is_mandatory) elog(ERROR, "field \"%s\" is not found in the line %s of the file %s", name, str, DATABASE_FILE_LIST); return false; +} -bad_format: - elog(ERROR, "%s file has invalid format in line %s", - DATABASE_FILE_LIST, str); - return false; /* Make compiler happy */ +static void +control_string_bad_format(const char* str) +{ + elog(ERROR, "%s file has invalid format in line %s", + DATABASE_FILE_LIST, str); } /* @@ -1841,8 +1858,8 @@ read_database_map(pgBackup *backup) db_map_entry *db_entry = (db_map_entry *) pgut_malloc(sizeof(db_map_entry)); - get_control_value(buf, "dbOid", NULL, &dbOid, true); - get_control_value(buf, "datname", datname, NULL, true); + get_control_value_int64(buf, "dbOid", &dbOid, true); + get_control_value_str(buf, "datname", datname, sizeof(datname), true); db_entry->dbOid = dbOid; db_entry->datname = pgut_strdup(datname); diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 2c4c61036..7eb62466f 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -1010,8 +1010,9 @@ extern CompressAlg parse_compress_alg(const char *arg); extern const char* deparse_compress_alg(int alg); /* in dir.c */ -extern bool get_control_value(const char *str, const char *name, - char *value_str, int64 *value_int64, bool is_mandatory); +extern bool get_control_value_int64(const char *str, const char *name, int64 *value_int64, bool is_mandatory); +extern bool get_control_value_str(const char *str, const char *name, + char *value_str, size_t value_str_size, bool is_mandatory); extern void dir_list_file(parray *files, const char *root, bool exclude, bool follow_symlink, bool add_root, bool backup_logs, bool skip_hidden, int external_dir_num, fio_location location); From 039e3c86786737264366b9a8bfcc675e10afeec4 Mon Sep 17 00:00:00 2001 From: "d.lepikhova" Date: Wed, 22 Jun 2022 14:36:55 +0500 Subject: [PATCH 276/525] Add checking enable-nls option in configure For correct work test_help_6 we need skip this test if PostgreSQL configured without --enable-nls --- tests/helpers/ptrack_helpers.py | 14 ++++++++++++++ tests/option.py | 16 ++++++++++------ 2 files changed, 24 insertions(+), 6 deletions(-) diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index ffb87c5ec..f2d316161 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -101,6 +101,19 @@ def is_enterprise(): else: return False +def enable_nls(): + cmd = [os.environ['PG_CONFIG'], '--configure'] + + p = subprocess.Popen( + cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE + ) + if b'enable-nls' in p.communicate()[0]: + return True + else: + return False + class ProbackupException(Exception): def __init__(self, message, cmd): @@ -147,6 +160,7 @@ def slow_start(self, replica=False): class ProbackupTest(object): # Class attributes enterprise = is_enterprise() + enable_nls = enable_nls() def __init__(self, *args, **kwargs): super(ProbackupTest, self).__init__(*args, **kwargs) diff --git a/tests/option.py b/tests/option.py index 23aa97c84..88e72ffd7 100644 --- a/tests/option.py +++ b/tests/option.py @@ -231,9 +231,13 @@ def test_options_5(self): # @unittest.skip("skip") def test_help_6(self): """help options""" - self.test_env['LC_ALL'] = 'ru_RU.utf-8' - with open(os.path.join(self.dir_path, "expected/option_help_ru.out"), "rb") as help_out: - self.assertEqual( - self.run_pb(["--help"]), - help_out.read().decode("utf-8") - ) + if ProbackupTest.enable_nls: + self.test_env['LC_ALL'] = 'ru_RU.utf-8' + with open(os.path.join(self.dir_path, "expected/option_help_ru.out"), "rb") as help_out: + self.assertEqual( + self.run_pb(["--help"]), + help_out.read().decode("utf-8") + ) + else: + return unittest.skip( + 'You need configure PostgreSQL with --enabled-nls option for this test') From 61cd6209772c8ac8ec34a80444a074f66650a4bf Mon Sep 17 00:00:00 2001 From: Victor Spirin Date: Fri, 24 Jun 2022 11:36:56 +0300 Subject: [PATCH 277/525] [PBCKP-153] global variable waldir_path renamed to gl_waldir_path --- src/pg_probackup.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/pg_probackup.c b/src/pg_probackup.c index 2c8100b83..193cd9c39 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -122,7 +122,7 @@ static parray *datname_include_list = NULL; /* arrays for --exclude-path's */ static parray *exclude_absolute_paths_list = NULL; static parray *exclude_relative_paths_list = NULL; -static char* waldir_path = NULL; +static char* gl_waldir_path = NULL; /* checkdb options */ bool need_amcheck = false; @@ -239,7 +239,7 @@ static ConfigOption cmd_options[] = { 's', 160, "primary-conninfo", &primary_conninfo, SOURCE_CMD_STRICT }, { 's', 'S', "primary-slot-name",&replication_slot, SOURCE_CMD_STRICT }, { 'f', 'I', "incremental-mode", opt_incr_restore_mode, SOURCE_CMD_STRICT }, - { 's', 'X', "waldir", &waldir_path, SOURCE_CMD_STRICT }, + { 's', 'X', "waldir", &gl_waldir_path, SOURCE_CMD_STRICT }, /* checkdb options */ { 'b', 195, "amcheck", &need_amcheck, SOURCE_CMD_STRICT }, { 'b', 196, "heapallindexed", &heapallindexed, SOURCE_CMD_STRICT }, @@ -757,19 +757,19 @@ main(int argc, char *argv[]) restore_params->partial_db_list = datname_include_list; } - if (waldir_path) + if (gl_waldir_path) { /* clean up xlog directory name, check it's absolute */ - canonicalize_path(waldir_path); - if (!is_absolute_path(waldir_path)) + canonicalize_path(gl_waldir_path); + if (!is_absolute_path(gl_waldir_path)) { elog(ERROR, "WAL directory location must be an absolute path"); } - if (strlen(waldir_path) > MAXPGPATH) + if (strlen(gl_waldir_path) > MAXPGPATH) elog(ERROR, "Value specified to --waldir is too long"); } - restore_params->waldir = waldir_path; + restore_params->waldir = gl_waldir_path; } From a3ac7d5e7a8d6ebeafda692ed1031eb2b34a8ab4 Mon Sep 17 00:00:00 2001 From: "d.lepikhova" Date: Fri, 24 Jun 2022 17:12:26 +0500 Subject: [PATCH 278/525] Add grants for pgpro_edition --- tests/backup.py | 15 +++++++++------ tests/checkdb.py | 1 + tests/ptrack.py | 6 ++---- tests/restore.py | 5 ++--- 4 files changed, 14 insertions(+), 13 deletions(-) diff --git a/tests/backup.py b/tests/backup.py index 682409015..b7fc4a924 100644 --- a/tests/backup.py +++ b/tests/backup.py @@ -1915,6 +1915,7 @@ def test_backup_with_least_privileges_role(self): "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;" ) # >= 10 else: @@ -1953,6 +1954,7 @@ def test_backup_with_least_privileges_role(self): "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;" ) if self.ptrack: @@ -1966,9 +1968,6 @@ def test_backup_with_least_privileges_role(self): "GRANT EXECUTE ON FUNCTION ptrack.ptrack_init_lsn() TO backup;") if ProbackupTest.enterprise: - node.safe_psql( - "backupdb", - "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup") node.safe_psql( "backupdb", @@ -3052,7 +3051,9 @@ def test_missing_replication_permission(self): "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;" + ) # >= 10 else: node.safe_psql( @@ -3075,12 +3076,12 @@ def test_missing_replication_permission(self): "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;" ) if ProbackupTest.enterprise: node.safe_psql( "backupdb", - "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup") sleep(2) @@ -3185,6 +3186,7 @@ def test_missing_replication_permission_1(self): "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;" ) # >= 10 else: @@ -3208,12 +3210,13 @@ def test_missing_replication_permission_1(self): "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;" + ) if ProbackupTest.enterprise: node.safe_psql( "backupdb", - "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup") replica.promote() diff --git a/tests/checkdb.py b/tests/checkdb.py index e066c5777..fec2e792c 100644 --- a/tests/checkdb.py +++ b/tests/checkdb.py @@ -698,6 +698,7 @@ def test_checkdb_with_least_privileges(self): 'GRANT EXECUTE ON FUNCTION pg_catalog.array_position(anyarray, anyelement) TO backup; ' # 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass) TO backup; ' 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool) TO backup;' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup; ' ) # PG 10 elif self.get_version(node) > 100000 and self.get_version(node) < 110000: diff --git a/tests/ptrack.py b/tests/ptrack.py index 5878f0700..5ecc669bb 100644 --- a/tests/ptrack.py +++ b/tests/ptrack.py @@ -582,6 +582,7 @@ def test_ptrack_unprivileged(self): "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + 'GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup; ' ) # >= 10 else: @@ -618,6 +619,7 @@ def test_ptrack_unprivileged(self): "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + 'GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup; ' ) node.safe_psql( @@ -635,10 +637,6 @@ def test_ptrack_unprivileged(self): "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup") if ProbackupTest.enterprise: - node.safe_psql( - "backupdb", - "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup") - node.safe_psql( "backupdb", "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup") diff --git a/tests/restore.py b/tests/restore.py index bbdadeb23..a9fe869e6 100644 --- a/tests/restore.py +++ b/tests/restore.py @@ -3268,6 +3268,7 @@ def test_missing_database_map(self): "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;" ) # >= 10 else: @@ -3305,6 +3306,7 @@ def test_missing_database_map(self): "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;" ) if self.ptrack: @@ -3319,9 +3321,6 @@ def test_missing_database_map(self): "CREATE EXTENSION ptrack WITH SCHEMA ptrack") if ProbackupTest.enterprise: - node.safe_psql( - "backupdb", - "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup") node.safe_psql( "backupdb", From 55d3fa8979ec00eda90e36594a6976ae739d2876 Mon Sep 17 00:00:00 2001 From: "d.lepikhova" Date: Wed, 29 Jun 2022 11:08:05 +0500 Subject: [PATCH 279/525] Rename enable_nls() function in ptrack_helpers.p is_nls_enabled() --- tests/helpers/ptrack_helpers.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index f2d316161..b5f1fe5b2 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -101,7 +101,7 @@ def is_enterprise(): else: return False -def enable_nls(): +def is_nls_enabled(): cmd = [os.environ['PG_CONFIG'], '--configure'] p = subprocess.Popen( @@ -160,7 +160,7 @@ def slow_start(self, replica=False): class ProbackupTest(object): # Class attributes enterprise = is_enterprise() - enable_nls = enable_nls() + enable_nls = is_nls_enabled() def __init__(self, *args, **kwargs): super(ProbackupTest, self).__init__(*args, **kwargs) From f544da1ecde143c57bda4205470267bed1a6056e Mon Sep 17 00:00:00 2001 From: "d.lepikhova" Date: Wed, 29 Jun 2022 22:17:31 +0500 Subject: [PATCH 280/525] Shorthand return-expression --- tests/helpers/ptrack_helpers.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index b5f1fe5b2..18fb3fc2e 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -109,10 +109,7 @@ def is_nls_enabled(): stdout=subprocess.PIPE, stderr=subprocess.PIPE ) - if b'enable-nls' in p.communicate()[0]: - return True - else: - return False + return b'enable-nls' in p.communicate()[0] class ProbackupException(Exception): From 32aae17928d165be7a8a19b015b87f8b885bc5dd Mon Sep 17 00:00:00 2001 From: Ivan Lazarev Date: Thu, 30 Jun 2022 02:28:29 +0300 Subject: [PATCH 281/525] [PBCKP-220] minor updates for gdb checks, checking CI tests --- tests/archive.py | 2 ++ tests/delta.py | 7 ++----- tests/helpers/ptrack_helpers.py | 17 +++++++++++++++-- tests/pgpro2068.py | 2 ++ tests/ptrack.py | 2 ++ tests/replica.py | 30 ++++++++++-------------------- 6 files changed, 33 insertions(+), 27 deletions(-) diff --git a/tests/archive.py b/tests/archive.py index 22b9d8693..e01b7d37e 100644 --- a/tests/archive.py +++ b/tests/archive.py @@ -290,6 +290,8 @@ def test_pgpro434_4(self): Check pg_stop_backup_timeout, libpq-timeout requested. Fixed in commit d84d79668b0c139 and assert fixed by ptrack 1.7 """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( diff --git a/tests/delta.py b/tests/delta.py index f365b6f9b..82fb714f7 100644 --- a/tests/delta.py +++ b/tests/delta.py @@ -472,11 +472,8 @@ def test_delta_vacuum_full(self): make node, make full and delta stream backups, restore them and check data correctness """ - if not self.gdb: - self.skipTest( - "Specify PGPROBACKUP_GDB and build without " - "optimizations for run this test" - ) + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index ffb87c5ec..b8449abe4 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -180,8 +180,8 @@ def __init__(self, *args, **kwargs): self.test_env['LC_MESSAGES'] = 'C' self.test_env['LC_TIME'] = 'C' - self.gdb = 'PGPROBACKUP_GDB' in os.environ and \ - os.environ['PGPROBACKUP_GDB'] == 'ON' + self.gdb = 'PGPROBACKUP_GDB' in self.test_env and \ + self.test_env['PGPROBACKUP_GDB'] == 'ON' self.paranoia = 'PG_PROBACKUP_PARANOIA' in self.test_env and \ self.test_env['PG_PROBACKUP_PARANOIA'] == 'ON' @@ -810,6 +810,7 @@ def run_pb(self, command, asynchronous=False, gdb=False, old_binary=False, retur if self.verbose: print(self.cmd) if gdb: + #TODO REVIEW XXX no self parameter return GDBobj([binary_path] + command, self.verbose) if asynchronous: return subprocess.Popen( @@ -1861,8 +1862,15 @@ def compare_pgdata(self, original_pgdata, restored_pgdata, exclusion_dict = dict self.assertFalse(fail, error_message) def gdb_attach(self, pid): + #TODO REVIEW XXX no self parameter return GDBobj([str(pid)], self.verbose, attach=True) + def _check_gdb_flag_or_skip_test(self): + if not self.gdb: + self.skipTest( + "Specify PGPROBACKUP_GDB and build without " + "optimizations for run this test" + ) class GdbException(Exception): def __init__(self, message=False): @@ -1877,6 +1885,11 @@ def __init__(self, cmd, verbose, attach=False): self.verbose = verbose self.output = '' + # Check gdb flag is set up + # if not self.gdb: + # raise GdbException("No `PGPROBACKUP_GDB=on` is set, " + # "test should call ProbackupTest::check_gdb_flag_or_skip_test() on its start " + # "and be skipped") # Check gdb presense try: gdb_version, _ = subprocess.Popen( diff --git a/tests/pgpro2068.py b/tests/pgpro2068.py index a80d317d4..b76345b89 100644 --- a/tests/pgpro2068.py +++ b/tests/pgpro2068.py @@ -18,6 +18,8 @@ def test_minrecpoint_on_replica(self): """ https://jira.postgrespro.ru/browse/PGPRO-2068 """ + self._check_gdb_flag_or_skip_test() + if not self.gdb: self.skipTest( "Specify PGPROBACKUP_GDB and build without " diff --git a/tests/ptrack.py b/tests/ptrack.py index 5878f0700..08ea90f8d 100644 --- a/tests/ptrack.py +++ b/tests/ptrack.py @@ -824,6 +824,8 @@ def test_ptrack_uncommitted_xact(self): def test_ptrack_vacuum_full(self): """make node, make full and ptrack stream backups, restore them and check data correctness""" + self._check_gdb_flag_or_skip_test() + backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') node = self.make_simple_node( base_dir=os.path.join(module_name, self.fname, 'node'), diff --git a/tests/replica.py b/tests/replica.py index 45eed3fb4..ba7076fab 100644 --- a/tests/replica.py +++ b/tests/replica.py @@ -634,11 +634,8 @@ def test_replica_promote(self): def test_replica_stop_lsn_null_offset(self): """ """ - if not self.gdb: - self.skipTest( - "Specify PGPROBACKUP_GDB and build without " - "optimizations for run this test" - ) + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') master = self.make_simple_node( @@ -722,11 +719,8 @@ def test_replica_stop_lsn_null_offset(self): def test_replica_stop_lsn_null_offset_next_record(self): """ """ - if not self.gdb: - self.skipTest( - "Specify PGPROBACKUP_GDB and build without " - "optimizations for run this test" - ) + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') master = self.make_simple_node( @@ -828,6 +822,8 @@ def test_replica_stop_lsn_null_offset_next_record(self): def test_archive_replica_null_offset(self): """ """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') master = self.make_simple_node( @@ -998,11 +994,8 @@ def test_replica_toast(self): make archive master, take full and page archive backups from master, set replica, make archive backup from replica """ - if not self.gdb: - self.skipTest( - "Specify PGPROBACKUP_GDB and build without " - "optimizations for run this test" - ) + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') master = self.make_simple_node( @@ -1104,11 +1097,8 @@ def test_replica_toast(self): def test_start_stop_lsn_in_the_same_segno(self): """ """ - if not self.gdb: - self.skipTest( - "Specify PGPROBACKUP_GDB and build without " - "optimizations for run this test" - ) + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') master = self.make_simple_node( From 26939d67c444156bfea5b3701d34bd5495df0e83 Mon Sep 17 00:00:00 2001 From: Ivan Lazarev Date: Fri, 1 Jul 2022 03:57:36 +0300 Subject: [PATCH 282/525] [PBCKP-220] removed inheritance GDBObj->ProbackupTest --- tests/helpers/ptrack_helpers.py | 26 ++++++++++++++------------ tests/replica.py | 4 ---- 2 files changed, 14 insertions(+), 16 deletions(-) diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index b8449abe4..e69d1213e 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -810,8 +810,7 @@ def run_pb(self, command, asynchronous=False, gdb=False, old_binary=False, retur if self.verbose: print(self.cmd) if gdb: - #TODO REVIEW XXX no self parameter - return GDBobj([binary_path] + command, self.verbose) + return GDBobj([binary_path] + command, self) if asynchronous: return subprocess.Popen( [binary_path] + command, @@ -1862,8 +1861,7 @@ def compare_pgdata(self, original_pgdata, restored_pgdata, exclusion_dict = dict self.assertFalse(fail, error_message) def gdb_attach(self, pid): - #TODO REVIEW XXX no self parameter - return GDBobj([str(pid)], self.verbose, attach=True) + return GDBobj([str(pid)], self, attach=True) def _check_gdb_flag_or_skip_test(self): if not self.gdb: @@ -1872,24 +1870,28 @@ def _check_gdb_flag_or_skip_test(self): "optimizations for run this test" ) + class GdbException(Exception): - def __init__(self, message=False): + def __init__(self, message="False"): self.message = message def __str__(self): return '\n ERROR: {0}\n'.format(repr(self.message)) -class GDBobj(ProbackupTest): - def __init__(self, cmd, verbose, attach=False): - self.verbose = verbose +#TODO REVIEW XXX no inheritance needed +# class GDBobj(ProbackupTest): +class GDBobj: + # TODO REVIEW XXX Type specification env:ProbackupTest is only for python3, is it ok? + def __init__(self, cmd, env: ProbackupTest, attach=False): + self.verbose = env.verbose self.output = '' # Check gdb flag is set up - # if not self.gdb: - # raise GdbException("No `PGPROBACKUP_GDB=on` is set, " - # "test should call ProbackupTest::check_gdb_flag_or_skip_test() on its start " - # "and be skipped") + if not env.gdb: + raise GdbException("No `PGPROBACKUP_GDB=on` is set, " + "test should call ProbackupTest::check_gdb_flag_or_skip_test() on its start " + "and be skipped") # Check gdb presense try: gdb_version, _ = subprocess.Popen( diff --git a/tests/replica.py b/tests/replica.py index ba7076fab..85034d501 100644 --- a/tests/replica.py +++ b/tests/replica.py @@ -719,7 +719,6 @@ def test_replica_stop_lsn_null_offset(self): def test_replica_stop_lsn_null_offset_next_record(self): """ """ - self._check_gdb_flag_or_skip_test() fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') @@ -743,7 +742,6 @@ def test_replica_stop_lsn_null_offset_next_record(self): # freeze bgwriter to get rid of RUNNING XACTS records bgwriter_pid = master.auxiliary_pids[ProcessType.BackgroundWriter][0] - gdb_checkpointer = self.gdb_attach(bgwriter_pid) self.backup_node(backup_dir, 'master', master) @@ -1097,7 +1095,6 @@ def test_replica_toast(self): def test_start_stop_lsn_in_the_same_segno(self): """ """ - self._check_gdb_flag_or_skip_test() fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') @@ -1121,7 +1118,6 @@ def test_start_stop_lsn_in_the_same_segno(self): # freeze bgwriter to get rid of RUNNING XACTS records bgwriter_pid = master.auxiliary_pids[ProcessType.BackgroundWriter][0] - gdb_checkpointer = self.gdb_attach(bgwriter_pid) self.backup_node(backup_dir, 'master', master, options=['--stream']) From 9c6e3ce3f751162cf7ac5405d0cc4ff462324181 Mon Sep 17 00:00:00 2001 From: Ivan Lazarev Date: Fri, 1 Jul 2022 13:52:20 +0300 Subject: [PATCH 283/525] [PBCKP-220] all gdb tests fixup --- .travis.yml | 1 + tests/archive.py | 2 ++ tests/backup.py | 18 ++++++++++++++++++ tests/checkdb.py | 2 ++ tests/helpers/ptrack_helpers.py | 3 --- tests/locking.py | 16 ++++++++++++++++ tests/logging.py | 4 ++++ tests/merge.py | 32 ++++++++++++++++++++++++++++++++ tests/replica.py | 1 + tests/restore.py | 4 ++++ tests/retention.py | 12 ++++++++++++ tests/validate.py | 2 ++ 12 files changed, 94 insertions(+), 3 deletions(-) diff --git a/.travis.yml b/.travis.yml index 26b2bc4e2..bac8a2c0d 100644 --- a/.travis.yml +++ b/.travis.yml @@ -34,6 +34,7 @@ env: - PG_VERSION=10 PG_BRANCH=REL_10_STABLE - PG_VERSION=9.6 PG_BRANCH=REL9_6_STABLE - PG_VERSION=9.5 PG_BRANCH=REL9_5_STABLE + - PG_VERSION=14 PG_BRANCH=REL_14_STABLE PTRACK_PATCH_PG_BRANCH=REL_14_STABLE PGPROBACKUP_GDB=ON PG_PROBACKUP_TEST_BASIC=OFF # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=archive # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=backup # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=catchup diff --git a/tests/archive.py b/tests/archive.py index e01b7d37e..cd8d4404f 100644 --- a/tests/archive.py +++ b/tests/archive.py @@ -228,6 +228,8 @@ def test_pgpro434_3(self): Check pg_stop_backup_timeout, needed backup_timeout Fixed in commit d84d79668b0c139 and assert fixed by ptrack 1.7 """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( diff --git a/tests/backup.py b/tests/backup.py index 682409015..20ac480e0 100644 --- a/tests/backup.py +++ b/tests/backup.py @@ -1095,6 +1095,8 @@ def test_tablespace_handling_2(self): # @unittest.skip("skip") def test_drop_rel_during_full_backup(self): """""" + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( @@ -1244,6 +1246,8 @@ def test_drop_db_during_full_backup(self): # @unittest.skip("skip") def test_drop_rel_during_backup_delta(self): """""" + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( @@ -1313,6 +1317,8 @@ def test_drop_rel_during_backup_delta(self): # @unittest.skip("skip") def test_drop_rel_during_backup_page(self): """""" + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( @@ -1445,6 +1451,8 @@ def test_basic_temp_slot_for_stream_backup(self): # @unittest.skip("skip") def test_backup_concurrent_drop_table(self): """""" + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( @@ -1579,6 +1587,8 @@ def test_pg_11_adjusted_wal_segment_size(self): # @unittest.skip("skip") def test_sigint_handling(self): """""" + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( @@ -1618,6 +1628,8 @@ def test_sigint_handling(self): # @unittest.skip("skip") def test_sigterm_handling(self): """""" + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( @@ -1656,6 +1668,8 @@ def test_sigterm_handling(self): # @unittest.skip("skip") def test_sigquit_handling(self): """""" + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( @@ -2906,6 +2920,8 @@ def test_incr_backup_filenode_map(self): # @unittest.skip("skip") def test_missing_wal_segment(self): """""" + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( @@ -3292,6 +3308,8 @@ def test_basic_backup_default_transaction_read_only(self): # @unittest.skip("skip") def test_backup_atexit(self): """""" + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( diff --git a/tests/checkdb.py b/tests/checkdb.py index 9b7adcd71..68dec14b6 100644 --- a/tests/checkdb.py +++ b/tests/checkdb.py @@ -17,6 +17,8 @@ class CheckdbTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") def test_checkdb_amcheck_only_sanity(self): """""" + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index e69d1213e..e3036d9c4 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -1879,10 +1879,7 @@ def __str__(self): return '\n ERROR: {0}\n'.format(repr(self.message)) -#TODO REVIEW XXX no inheritance needed -# class GDBobj(ProbackupTest): class GDBobj: - # TODO REVIEW XXX Type specification env:ProbackupTest is only for python3, is it ok? def __init__(self, cmd, env: ProbackupTest, attach=False): self.verbose = env.verbose self.output = '' diff --git a/tests/locking.py b/tests/locking.py index ef7aa1f25..0fe954cae 100644 --- a/tests/locking.py +++ b/tests/locking.py @@ -17,6 +17,8 @@ def test_locking_running_validate_1(self): run validate, expect it to successfully executed, concurrent RUNNING backup with pid file and active process is legal """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), @@ -72,6 +74,8 @@ def test_locking_running_validate_2(self): RUNNING backup with pid file AND without active pid is legal, but his status must be changed to ERROR and pid file is deleted """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), @@ -142,6 +146,8 @@ def test_locking_running_validate_2_specific_id(self): RUNNING backup with pid file AND without active pid is legal, but his status must be changed to ERROR and pid file is deleted """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), @@ -240,6 +246,8 @@ def test_locking_running_3(self): RUNNING backup without pid file AND without active pid is legal, his status must be changed to ERROR """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), @@ -310,6 +318,8 @@ def test_locking_restore_locked(self): Expect restore to sucseed because read-only locks do not conflict """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), @@ -352,6 +362,8 @@ def test_concurrent_delete_and_restore(self): Expect restore to fail because validation of intermediate backup is impossible """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), @@ -443,6 +455,8 @@ def test_locking_concurren_restore_and_delete(self): and stop it in the middle, delete full backup. Expect it to fail. """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), @@ -585,6 +599,8 @@ def test_shared_lock(self): """ Make sure that shared lock leaves no files with pids """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), diff --git a/tests/logging.py b/tests/logging.py index 47143cfb7..70ebcf6d1 100644 --- a/tests/logging.py +++ b/tests/logging.py @@ -12,6 +12,10 @@ class LogTest(ProbackupTest, unittest.TestCase): # @unittest.expectedFailure # PGPRO-2154 def test_log_rotation(self): + """ + """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), diff --git a/tests/merge.py b/tests/merge.py index fe0927f49..5f092543c 100644 --- a/tests/merge.py +++ b/tests/merge.py @@ -975,6 +975,8 @@ def test_continue_failed_merge(self): """ Check that failed MERGE can be continued """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( @@ -1051,6 +1053,8 @@ def test_continue_failed_merge_with_corrupted_delta_backup(self): """ Fail merge via gdb, corrupt DELTA backup, try to continue merge """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( @@ -1148,6 +1152,8 @@ def test_continue_failed_merge_2(self): """ Check that failed MERGE on delete can be continued """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( @@ -1219,6 +1225,8 @@ def test_continue_failed_merge_3(self): Check that failed MERGE cannot be continued if intermediate backup is missing. """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( @@ -1409,6 +1417,8 @@ def test_crash_after_opening_backup_control_1(self): check that crashing after opening backup.control for writing will not result in losing backup metadata """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( @@ -1461,6 +1471,8 @@ def test_crash_after_opening_backup_control_2(self): for writing will not result in losing metadata about backup files TODO: rewrite """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( @@ -1552,6 +1564,8 @@ def test_losing_file_after_failed_merge(self): for writing will not result in losing metadata about backup files TODO: rewrite """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( @@ -1639,6 +1653,8 @@ def test_losing_file_after_failed_merge(self): def test_failed_merge_after_delete(self): """ """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( @@ -1720,6 +1736,8 @@ def test_failed_merge_after_delete(self): def test_failed_merge_after_delete_1(self): """ """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( @@ -1796,6 +1814,8 @@ def test_failed_merge_after_delete_1(self): def test_failed_merge_after_delete_2(self): """ """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( @@ -1858,6 +1878,8 @@ def test_failed_merge_after_delete_2(self): def test_failed_merge_after_delete_3(self): """ """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( @@ -2281,6 +2303,8 @@ def test_smart_merge(self): def test_idempotent_merge(self): """ """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( @@ -2580,6 +2604,8 @@ def test_merge_page_header_map_retry(self): page header map cannot be trusted when running retry """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( @@ -2626,6 +2652,8 @@ def test_merge_page_header_map_retry(self): def test_missing_data_file(self): """ """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( @@ -2684,6 +2712,8 @@ def test_missing_data_file(self): def test_missing_non_data_file(self): """ """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( @@ -2741,6 +2771,8 @@ def test_missing_non_data_file(self): def test_merge_remote_mode(self): """ """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( diff --git a/tests/replica.py b/tests/replica.py index 85034d501..0a75ea173 100644 --- a/tests/replica.py +++ b/tests/replica.py @@ -719,6 +719,7 @@ def test_replica_stop_lsn_null_offset(self): def test_replica_stop_lsn_null_offset_next_record(self): """ """ + self._check_gdb_flag_or_skip_test() fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') diff --git a/tests/restore.py b/tests/restore.py index bbdadeb23..5a00bc23b 100644 --- a/tests/restore.py +++ b/tests/restore.py @@ -2379,6 +2379,8 @@ def test_pg_11_group_access(self): # @unittest.skip("skip") def test_restore_concurrent_drop_table(self): """""" + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( @@ -3797,6 +3799,8 @@ def test_truncate_postgresql_auto_conf(self): # @unittest.skip("skip") def test_concurrent_restore(self): """""" + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( diff --git a/tests/retention.py b/tests/retention.py index 19204807b..b0399a239 100644 --- a/tests/retention.py +++ b/tests/retention.py @@ -1499,6 +1499,8 @@ def test_window_error_backups_1(self): FULL -------window """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), @@ -1546,6 +1548,8 @@ def test_window_error_backups_2(self): FULL -------window """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), @@ -1588,6 +1592,8 @@ def test_window_error_backups_2(self): def test_retention_redundancy_overlapping_chains(self): """""" + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), @@ -1636,6 +1642,8 @@ def test_retention_redundancy_overlapping_chains(self): def test_retention_redundancy_overlapping_chains_1(self): """""" + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), @@ -1744,6 +1752,8 @@ def test_failed_merge_redundancy_retention(self): """ Check that retention purge works correctly with MERGING backups """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( @@ -2536,6 +2546,8 @@ def test_concurrent_running_full_backup(self): """ https://github.com/postgrespro/pg_probackup/issues/328 """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( diff --git a/tests/validate.py b/tests/validate.py index e62826388..41aa9ea23 100644 --- a/tests/validate.py +++ b/tests/validate.py @@ -3565,6 +3565,8 @@ def test_corrupt_pg_control_via_resetxlog(self): # @unittest.skip("skip") def test_validation_after_backup(self): """""" + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( From 125c9292a6ddd9372263894333b96ebdbb3ac767 Mon Sep 17 00:00:00 2001 From: Ivan Lazarev Date: Mon, 4 Jul 2022 02:37:38 +0300 Subject: [PATCH 284/525] [PBCKP-220] ALL tests with PGPROBACKUP=ON on CI --- .travis.yml | 2 +- travis/run_tests.sh | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index bac8a2c0d..e6330c4f5 100644 --- a/.travis.yml +++ b/.travis.yml @@ -34,7 +34,7 @@ env: - PG_VERSION=10 PG_BRANCH=REL_10_STABLE - PG_VERSION=9.6 PG_BRANCH=REL9_6_STABLE - PG_VERSION=9.5 PG_BRANCH=REL9_5_STABLE - - PG_VERSION=14 PG_BRANCH=REL_14_STABLE PTRACK_PATCH_PG_BRANCH=REL_14_STABLE PGPROBACKUP_GDB=ON PG_PROBACKUP_TEST_BASIC=OFF + - PG_VERSION=14 PG_BRANCH=REL_14_STABLE PTRACK_PATCH_PG_BRANCH=REL_14_STABLE MODE=TMP # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=archive # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=backup # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=catchup diff --git a/travis/run_tests.sh b/travis/run_tests.sh index 37614f970..c20c95dda 100755 --- a/travis/run_tests.sh +++ b/travis/run_tests.sh @@ -112,6 +112,9 @@ if [ "$MODE" = "basic" ]; then echo PG_PROBACKUP_TEST_BASIC=${PG_PROBACKUP_TEST_BASIC} python3 -m unittest -v tests python3 -m unittest -v tests.init +elif [ "$MODE" = "TMP" ]; then + echo MODE=TMP + PGPROBACKUP_GDB=ON python3 -m unittest -v tests else echo PG_PROBACKUP_TEST_BASIC=${PG_PROBACKUP_TEST_BASIC} python3 -m unittest -v tests.$MODE From 3e8a08edd5f9a20dd3d6f77914cd2b9c745a7980 Mon Sep 17 00:00:00 2001 From: Ivan Lazarev Date: Mon, 4 Jul 2022 06:04:17 +0300 Subject: [PATCH 285/525] [PBCKP-220] removed FULL tests, PGPROBACKUP=ON and other flags added on CI --- .gitignore | 1 - .travis.yml | 3 ++- tests/Readme.md | 26 +++++++------------------- tests/checkdb.py | 7 ++----- tests/pgpro2068.py | 5 ----- tests/replica.py | 7 ++----- tests/validate.py | 7 ++----- travis/run_tests.sh | 13 +++++++------ 8 files changed, 22 insertions(+), 47 deletions(-) diff --git a/.gitignore b/.gitignore index c0b4de331..502473605 100644 --- a/.gitignore +++ b/.gitignore @@ -50,7 +50,6 @@ /docker-compose.yml /Dockerfile /Dockerfile.in -/run_tests.sh /make_dockerfile.sh /backup_restore.sh diff --git a/.travis.yml b/.travis.yml index e6330c4f5..d5c9c68b7 100644 --- a/.travis.yml +++ b/.travis.yml @@ -34,7 +34,8 @@ env: - PG_VERSION=10 PG_BRANCH=REL_10_STABLE - PG_VERSION=9.6 PG_BRANCH=REL9_6_STABLE - PG_VERSION=9.5 PG_BRANCH=REL9_5_STABLE - - PG_VERSION=14 PG_BRANCH=REL_14_STABLE PTRACK_PATCH_PG_BRANCH=REL_14_STABLE MODE=TMP +# - PG_VERSION=14 PG_BRANCH=REL_14_STABLE PTRACK_PATCH_PG_BRANCH=REL_14_STABLE MODE=FULL ENV_FLAGS=PGPROBACKUP_GDB=ON + - PG_VERSION=14 PG_BRANCH=REL_14_STABLE PTRACK_PATCH_PG_BRANCH=REL_14_STABLE ENV_FLAGS=PGPROBACKUP_GDB=ON # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=archive # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=backup # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=catchup diff --git a/tests/Readme.md b/tests/Readme.md index f980b6aef..11c5272f9 100644 --- a/tests/Readme.md +++ b/tests/Readme.md @@ -51,31 +51,19 @@ Usage: python -m unittest [-v] tests[.specific_module][.class.test] ``` -### Troubleshooting FAQ +# Troubleshooting FAQ -#### python test failures -1. Test failure reason like +## Python tests failure +### 1. Could not open extension "..." ``` -testgres.exceptions.QueryException ERROR: could not open extension control file "/home/avaness/postgres/postgres.build/share/extension/amcheck.control": No such file or directory +testgres.exceptions.QueryException ERROR: could not open extension control file "/share/extension/amcheck.control": No such file or directory ``` -*Solution*: you have no `/contrib/` extensions installed +#### Solution: -```commandline -cd -make world install -``` - -2. Test failure - -``` -FAIL: test_help_6 (tests.option.OptionTest) -``` - -*Solution*: you didn't configure postgres build with `--enable-nls` +You have no `/contrib/...` extension installed, please do ```commandline cd -make distclean - --enable-nls +make install-world ``` diff --git a/tests/checkdb.py b/tests/checkdb.py index 68dec14b6..5b6dda250 100644 --- a/tests/checkdb.py +++ b/tests/checkdb.py @@ -546,11 +546,8 @@ def test_checkdb_checkunique(self): # @unittest.skip("skip") def test_checkdb_sigint_handling(self): """""" - if not self.gdb: - self.skipTest( - "Specify PGPROBACKUP_GDB and build without " - "optimizations for run this test" - ) + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( diff --git a/tests/pgpro2068.py b/tests/pgpro2068.py index b76345b89..3baa0ba0b 100644 --- a/tests/pgpro2068.py +++ b/tests/pgpro2068.py @@ -20,11 +20,6 @@ def test_minrecpoint_on_replica(self): """ self._check_gdb_flag_or_skip_test() - if not self.gdb: - self.skipTest( - "Specify PGPROBACKUP_GDB and build without " - "optimizations for run this test" - ) fname = self.id().split('.')[3] node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), diff --git a/tests/replica.py b/tests/replica.py index 0a75ea173..acf655aac 100644 --- a/tests/replica.py +++ b/tests/replica.py @@ -539,11 +539,8 @@ def test_replica_promote(self): start backup from replica, during backup promote replica check that backup is failed """ - if not self.gdb: - self.skipTest( - "Specify PGPROBACKUP_GDB and build without " - "optimizations for run this test" - ) + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') master = self.make_simple_node( diff --git a/tests/validate.py b/tests/validate.py index 41aa9ea23..22a03c3be 100644 --- a/tests/validate.py +++ b/tests/validate.py @@ -1089,11 +1089,8 @@ def test_validate_instance_with_several_corrupt_backups_interrupt(self): """ check that interrupt during validation is handled correctly """ - if not self.gdb: - self.skipTest( - "Specify PGPROBACKUP_GDB and build without " - "optimizations for run this test" - ) + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), diff --git a/travis/run_tests.sh b/travis/run_tests.sh index c20c95dda..5af619f97 100755 --- a/travis/run_tests.sh +++ b/travis/run_tests.sh @@ -107,17 +107,18 @@ echo PGPROBACKUPBIN=${PGPROBACKUPBIN} echo PGPROBACKUP_SSH_REMOTE=${PGPROBACKUP_SSH_REMOTE} echo PGPROBACKUP_GDB=${PGPROBACKUP_GDB} echo PG_PROBACKUP_PTRACK=${PG_PROBACKUP_PTRACK} +echo ADDITIONAL_ENV_FLAGS=${ENV_FLAGS} if [ "$MODE" = "basic" ]; then export PG_PROBACKUP_TEST_BASIC=ON echo PG_PROBACKUP_TEST_BASIC=${PG_PROBACKUP_TEST_BASIC} - python3 -m unittest -v tests - python3 -m unittest -v tests.init -elif [ "$MODE" = "TMP" ]; then - echo MODE=TMP - PGPROBACKUP_GDB=ON python3 -m unittest -v tests + ${ADDITIONAL_ENV_FLAGS} python3 -m unittest -v tests + ${ADDITIONAL_ENV_FLAGS} python3 -m unittest -v tests.init +#elif [ "$MODE" = "FULL" ]; then +# echo MODE=FULL +# ${ADDITIONAL_ENV_FLAGS} python3 -m unittest -v tests else echo PG_PROBACKUP_TEST_BASIC=${PG_PROBACKUP_TEST_BASIC} - python3 -m unittest -v tests.$MODE + ${ADDITIONAL_ENV_FLAGS} python3 -m unittest -v tests.$MODE fi # Generate *.gcov files From 37244019508ae3396e521f2ed87bb8c2eca9108d Mon Sep 17 00:00:00 2001 From: Ivan Lazarev Date: Mon, 4 Jul 2022 14:37:36 +0300 Subject: [PATCH 286/525] [PBCKP-220] final junk cleanup --- .travis.yml | 2 -- travis/run_tests.sh | 10 +++------- 2 files changed, 3 insertions(+), 9 deletions(-) diff --git a/.travis.yml b/.travis.yml index d5c9c68b7..26b2bc4e2 100644 --- a/.travis.yml +++ b/.travis.yml @@ -34,8 +34,6 @@ env: - PG_VERSION=10 PG_BRANCH=REL_10_STABLE - PG_VERSION=9.6 PG_BRANCH=REL9_6_STABLE - PG_VERSION=9.5 PG_BRANCH=REL9_5_STABLE -# - PG_VERSION=14 PG_BRANCH=REL_14_STABLE PTRACK_PATCH_PG_BRANCH=REL_14_STABLE MODE=FULL ENV_FLAGS=PGPROBACKUP_GDB=ON - - PG_VERSION=14 PG_BRANCH=REL_14_STABLE PTRACK_PATCH_PG_BRANCH=REL_14_STABLE ENV_FLAGS=PGPROBACKUP_GDB=ON # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=archive # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=backup # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=catchup diff --git a/travis/run_tests.sh b/travis/run_tests.sh index 5af619f97..37614f970 100755 --- a/travis/run_tests.sh +++ b/travis/run_tests.sh @@ -107,18 +107,14 @@ echo PGPROBACKUPBIN=${PGPROBACKUPBIN} echo PGPROBACKUP_SSH_REMOTE=${PGPROBACKUP_SSH_REMOTE} echo PGPROBACKUP_GDB=${PGPROBACKUP_GDB} echo PG_PROBACKUP_PTRACK=${PG_PROBACKUP_PTRACK} -echo ADDITIONAL_ENV_FLAGS=${ENV_FLAGS} if [ "$MODE" = "basic" ]; then export PG_PROBACKUP_TEST_BASIC=ON echo PG_PROBACKUP_TEST_BASIC=${PG_PROBACKUP_TEST_BASIC} - ${ADDITIONAL_ENV_FLAGS} python3 -m unittest -v tests - ${ADDITIONAL_ENV_FLAGS} python3 -m unittest -v tests.init -#elif [ "$MODE" = "FULL" ]; then -# echo MODE=FULL -# ${ADDITIONAL_ENV_FLAGS} python3 -m unittest -v tests + python3 -m unittest -v tests + python3 -m unittest -v tests.init else echo PG_PROBACKUP_TEST_BASIC=${PG_PROBACKUP_TEST_BASIC} - ${ADDITIONAL_ENV_FLAGS} python3 -m unittest -v tests.$MODE + python3 -m unittest -v tests.$MODE fi # Generate *.gcov files From 94caeb1793feddc0fe13b5f94615efcd0ca776dd Mon Sep 17 00:00:00 2001 From: "d.lepikhova" Date: Wed, 29 Jun 2022 22:19:31 +0500 Subject: [PATCH 287/525] Shorthand return-expression --- tests/helpers/ptrack_helpers.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index a4ec7c9cf..8da802193 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -100,11 +100,7 @@ def is_enterprise(): stdout=subprocess.PIPE, stderr=subprocess.PIPE ) - if b'enterprise' in p.communicate()[0]: - return True - else: - return False - + return b'postgrespro.ru' in p.communicate()[0] class ProbackupException(Exception): def __init__(self, message, cmd): From 3071cec4f647582eac8bd58ce5624f97e90745c4 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Mon, 11 Jul 2022 14:50:22 +0300 Subject: [PATCH 288/525] fix ArchiveTest.test_pgpro434_4 --- tests/archive.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/archive.py b/tests/archive.py index cd8d4404f..52fb225e8 100644 --- a/tests/archive.py +++ b/tests/archive.py @@ -317,7 +317,7 @@ def test_pgpro434_4(self): gdb.set_breakpoint('pg_stop_backup') gdb.run_until_break() - self.set_auto_conf(node, {'archive_command': "'exit 1'"}) + self.set_auto_conf(node, {'archive_command': 'exit 1'}) node.reload() os.environ["PGAPPNAME"] = "foo" From 81c53ea0bbda3251ab527dc950327b86e69e7645 Mon Sep 17 00:00:00 2001 From: Ivan Lazarev Date: Tue, 12 Jul 2022 15:04:21 +0300 Subject: [PATCH 289/525] [PBCKP-231] hotfix for python2 --- tests/helpers/ptrack_helpers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index e3036d9c4..de7742749 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -1880,7 +1880,7 @@ def __str__(self): class GDBobj: - def __init__(self, cmd, env: ProbackupTest, attach=False): + def __init__(self, cmd, env, attach=False): self.verbose = env.verbose self.output = '' From 65345f20a00cc29c4661298f60b7adfd51392360 Mon Sep 17 00:00:00 2001 From: "d.lepikhova" Date: Mon, 1 Aug 2022 19:59:39 +0500 Subject: [PATCH 290/525] Revert "Add --start-time option into help message" This reverts commit c81c54be4cac6f900e6b73df06788f349eecb3af. --- src/help.c | 3 --- tests/expected/option_help.out | 1 - 2 files changed, 4 deletions(-) diff --git a/src/help.c b/src/help.c index 7a1a1c580..8ebe734a3 100644 --- a/src/help.c +++ b/src/help.c @@ -150,7 +150,6 @@ help_pg_probackup(void) printf(_(" [--remote-port] [--remote-path] [--remote-user]\n")); printf(_(" [--ssh-options]\n")); printf(_(" [--ttl=interval] [--expire-time=timestamp] [--note=text]\n")); - printf(_(" [--start-time]\n")); printf(_(" [--help]\n")); @@ -324,7 +323,6 @@ help_backup(void) printf(_(" [--remote-port] [--remote-path] [--remote-user]\n")); printf(_(" [--ssh-options]\n")); printf(_(" [--ttl=interval] [--expire-time=timestamp] [--note=text]\n\n")); - printf(_(" [--start-time]\n")); printf(_(" -B, --backup-path=backup-path location of the backup storage area\n")); printf(_(" -b, --backup-mode=backup-mode backup mode=FULL|PAGE|DELTA|PTRACK\n")); @@ -345,7 +343,6 @@ help_backup(void) printf(_(" --no-sync do not sync backed up files to disk\n")); printf(_(" --note=text add note to backup\n")); printf(_(" (example: --note='backup before app update to v13.1')\n")); - printf(_(" --start-time set time of starting backup as a parameter for naming backup\n")); printf(_("\n Logging options:\n")); printf(_(" --log-level-console=log-level-console\n")); diff --git a/tests/expected/option_help.out b/tests/expected/option_help.out index 9026b99b3..00b50d10c 100644 --- a/tests/expected/option_help.out +++ b/tests/expected/option_help.out @@ -68,7 +68,6 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database. [--remote-port] [--remote-path] [--remote-user] [--ssh-options] [--ttl=interval] [--expire-time=timestamp] [--note=text] - [--start-time] [--help] pg_probackup restore -B backup-path --instance=instance_name From 8ce27c9713a1e63825c05a2c2ac7fe4481b69d34 Mon Sep 17 00:00:00 2001 From: "d.lepikhova" Date: Tue, 2 Aug 2022 18:13:11 +0500 Subject: [PATCH 291/525] Add warning about using --start-time option --- src/pg_probackup.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/pg_probackup.c b/src/pg_probackup.c index 15f2542b0..2738280c7 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -944,6 +944,11 @@ main(int argc, char *argv[]) current.stream = stream_wal; if (start_time == 0) start_time = current_time; + else + elog(WARNING, "Please do not use the --start-time option to start backup. " + "This is a service option required to work with other extensions. " + "We do not guarantee future support for this flag."); + /* sanity */ if (current.backup_mode == BACKUP_MODE_INVALID) From 6d1d739400501dc3a770ee451def75c032a984c6 Mon Sep 17 00:00:00 2001 From: Ivan Lazarev Date: Thu, 4 Aug 2022 16:15:40 +0300 Subject: [PATCH 292/525] [PBCKP-220] hotfix for PtrackTest --- tests/ptrack.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/ptrack.py b/tests/ptrack.py index 08ea90f8d..c101f9361 100644 --- a/tests/ptrack.py +++ b/tests/ptrack.py @@ -24,6 +24,8 @@ def test_drop_rel_during_backup_ptrack(self): """ drop relation during ptrack backup """ + self._check_gdb_flag_or_skip_test() + backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') node = self.make_simple_node( base_dir=os.path.join(module_name, self.fname, 'node'), @@ -996,6 +998,8 @@ def test_ptrack_get_block(self): make node, make full and ptrack stream backups, restore them and check data correctness """ + self._check_gdb_flag_or_skip_test() + backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') node = self.make_simple_node( base_dir=os.path.join(module_name, self.fname, 'node'), From 2e1950a7ff24b997d2b8c054060ca3a88c4973b5 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" <16117281+kulaginm@users.noreply.github.com> Date: Thu, 4 Aug 2022 17:16:17 +0300 Subject: [PATCH 293/525] [PBCKP-129] change catchup logging levels (#473) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [PBCKP-129] change catchup logging level verbosity: INFO – common information LOG – same as INFO + info about files VERBOSE – same as LOG + info about block and SQL queries --- src/archive.c | 16 ++++++++-------- src/backup.c | 35 +++++++++++++++++------------------ src/catchup.c | 30 +++++++++++++++--------------- src/data.c | 34 +++++++++++++++++----------------- src/dir.c | 16 ++++++++-------- src/merge.c | 25 ++++++++++++------------- src/restore.c | 29 ++++++++++++++--------------- src/util.c | 4 ++-- 8 files changed, 93 insertions(+), 96 deletions(-) diff --git a/src/archive.c b/src/archive.c index 0f32d9345..48114d955 100644 --- a/src/archive.c +++ b/src/archive.c @@ -3,7 +3,7 @@ * archive.c: - pg_probackup specific archive commands for archive backups. * * - * Portions Copyright (c) 2018-2021, Postgres Professional + * Portions Copyright (c) 2018-2022, Postgres Professional * *------------------------------------------------------------------------- */ @@ -361,7 +361,7 @@ push_file(WALSegno *xlogfile, const char *archive_status_dir, canonicalize_path(wal_file_ready); canonicalize_path(wal_file_done); /* It is ok to rename status file in archive_status directory */ - elog(VERBOSE, "Rename \"%s\" to \"%s\"", wal_file_ready, wal_file_done); + elog(LOG, "Rename \"%s\" to \"%s\"", wal_file_ready, wal_file_done); /* do not error out, if rename failed */ if (fio_rename(wal_file_ready, wal_file_done, FIO_DB_HOST) < 0) @@ -505,7 +505,7 @@ push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_d } part_opened: - elog(VERBOSE, "Temp WAL file successfully created: \"%s\"", to_fullpath_part); + elog(LOG, "Temp WAL file successfully created: \"%s\"", to_fullpath_part); /* Check if possible to skip copying */ if (fileExists(to_fullpath, FIO_BACKUP_HOST)) { @@ -595,7 +595,7 @@ push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_d to_fullpath_part, strerror(errno)); } - elog(VERBOSE, "Rename \"%s\" to \"%s\"", to_fullpath_part, to_fullpath); + elog(LOG, "Rename \"%s\" to \"%s\"", to_fullpath_part, to_fullpath); //copy_file_attributes(from_path, FIO_DB_HOST, to_path_temp, FIO_BACKUP_HOST, true); @@ -752,7 +752,7 @@ push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir, } part_opened: - elog(VERBOSE, "Temp WAL file successfully created: \"%s\"", to_fullpath_gz_part); + elog(LOG, "Temp WAL file successfully created: \"%s\"", to_fullpath_gz_part); /* Check if possible to skip copying, */ if (fileExists(to_fullpath_gz, FIO_BACKUP_HOST)) @@ -844,7 +844,7 @@ push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir, to_fullpath_gz_part, strerror(errno)); } - elog(VERBOSE, "Rename \"%s\" to \"%s\"", + elog(LOG, "Rename \"%s\" to \"%s\"", to_fullpath_gz_part, to_fullpath_gz); //copy_file_attributes(from_path, FIO_DB_HOST, to_path_temp, FIO_BACKUP_HOST, true); @@ -1155,7 +1155,7 @@ do_archive_get(InstanceState *instanceState, InstanceConfig *instance, const cha if (get_wal_file(wal_file_name, backup_wal_file_path, absolute_wal_file_path, false)) { fail_count = 0; - elog(INFO, "pg_probackup archive-get copied WAL file %s", wal_file_name); + elog(LOG, "pg_probackup archive-get copied WAL file %s", wal_file_name); n_fetched++; break; } @@ -1511,7 +1511,7 @@ get_wal_file_internal(const char *from_path, const char *to_path, FILE *out, char *buf = pgut_malloc(OUT_BUF_SIZE); /* 1MB buffer */ int exit_code = 0; - elog(VERBOSE, "Attempting to %s WAL file '%s'", + elog(LOG, "Attempting to %s WAL file '%s'", is_decompress ? "open compressed" : "open", from_path); /* open source file for read */ diff --git a/src/backup.c b/src/backup.c index c575865c4..f61266f5f 100644 --- a/src/backup.c +++ b/src/backup.c @@ -3,7 +3,7 @@ * backup.c: backup DB cluster, archived WAL * * Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION - * Portions Copyright (c) 2015-2019, Postgres Professional + * Portions Copyright (c) 2015-2022, Postgres Professional * *------------------------------------------------------------------------- */ @@ -116,7 +116,7 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, char pretty_time[20]; char pretty_bytes[20]; - elog(LOG, "Database backup start"); + elog(INFO, "Database backup start"); if(current.external_dir_str) { external_dirs = make_external_directory_list(current.external_dir_str, @@ -336,11 +336,11 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, /* Extract information about files in backup_list parsing their names:*/ parse_filelist_filenames(backup_files_list, instance_config.pgdata); - elog(LOG, "Current Start LSN: %X/%X, TLI: %X", + elog(INFO, "Current Start LSN: %X/%X, TLI: %X", (uint32) (current.start_lsn >> 32), (uint32) (current.start_lsn), current.tli); if (current.backup_mode != BACKUP_MODE_FULL) - elog(LOG, "Parent Start LSN: %X/%X, TLI: %X", + elog(INFO, "Parent Start LSN: %X/%X, TLI: %X", (uint32) (prev_backup->start_lsn >> 32), (uint32) (prev_backup->start_lsn), prev_backup->tli); @@ -412,7 +412,7 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, else join_path_components(dirpath, current.database_dir, file->rel_path); - elog(VERBOSE, "Create directory '%s'", dirpath); + elog(LOG, "Create directory '%s'", dirpath); fio_mkdir(dirpath, DIR_PERMISSION, FIO_BACKUP_HOST); } @@ -673,7 +673,7 @@ pgdata_basic_setup(ConnectionOptions conn_opt, PGNodeInfo *nodeInfo) nodeInfo->checksum_version = current.checksum_version; if (current.checksum_version) - elog(LOG, "This PostgreSQL instance was initialized with data block checksums. " + elog(INFO, "This PostgreSQL instance was initialized with data block checksums. " "Data block corruption will be detected"); else elog(WARNING, "This PostgreSQL instance was initialized without data block checksums. " @@ -1513,7 +1513,7 @@ wait_wal_and_calculate_stop_lsn(const char *xlog_path, XLogRecPtr stop_lsn, pgBa stop_lsn_exists = true; } - elog(LOG, "stop_lsn: %X/%X", + elog(INFO, "stop_lsn: %X/%X", (uint32) (stop_lsn >> 32), (uint32) (stop_lsn)); /* @@ -1902,7 +1902,7 @@ pg_stop_backup(InstanceState *instanceState, pgBackup *backup, PGconn *pg_startb backup->recovery_xid = stop_backup_result.snapshot_xid; - elog(LOG, "Getting the Recovery Time from WAL"); + elog(INFO, "Getting the Recovery Time from WAL"); /* iterate over WAL from stop_backup lsn to start_backup lsn */ if (!read_recovery_info(xlog_path, backup->tli, @@ -1910,7 +1910,7 @@ pg_stop_backup(InstanceState *instanceState, pgBackup *backup, PGconn *pg_startb backup->start_lsn, backup->stop_lsn, &backup->recovery_time)) { - elog(LOG, "Failed to find Recovery Time in WAL, forced to trust current_timestamp"); + elog(INFO, "Failed to find Recovery Time in WAL, forced to trust current_timestamp"); backup->recovery_time = stop_backup_result.invocation_time; } @@ -1992,9 +1992,8 @@ backup_files(void *arg) if (interrupted || thread_interrupted) elog(ERROR, "interrupted during backup"); - if (progress) - elog(INFO, "Progress: (%d/%d). Process file \"%s\"", - i + 1, n_backup_files_list, file->rel_path); + elog(progress ? INFO : LOG, "Progress: (%d/%d). Process file \"%s\"", + i + 1, n_backup_files_list, file->rel_path); /* Handle zero sized files */ if (file->size == 0) @@ -2064,11 +2063,11 @@ backup_files(void *arg) if (file->write_size == BYTES_INVALID) { - elog(VERBOSE, "Skipping the unchanged file: \"%s\"", from_fullpath); + elog(LOG, "Skipping the unchanged file: \"%s\"", from_fullpath); continue; } - elog(VERBOSE, "File \"%s\". Copied "INT64_FORMAT " bytes", + elog(LOG, "File \"%s\". Copied "INT64_FORMAT " bytes", from_fullpath, file->write_size); } @@ -2186,26 +2185,26 @@ set_cfs_datafiles(parray *files, const char *root, char *relative, size_t i) elog(ERROR, "Out of memory"); len = strlen("/pg_compression"); cfs_tblspc_path[strlen(cfs_tblspc_path) - len] = 0; - elog(VERBOSE, "CFS DIRECTORY %s, pg_compression path: %s", cfs_tblspc_path, relative); + elog(LOG, "CFS DIRECTORY %s, pg_compression path: %s", cfs_tblspc_path, relative); for (p = (int) i; p >= 0; p--) { prev_file = (pgFile *) parray_get(files, (size_t) p); - elog(VERBOSE, "Checking file in cfs tablespace %s", prev_file->rel_path); + elog(LOG, "Checking file in cfs tablespace %s", prev_file->rel_path); if (strstr(prev_file->rel_path, cfs_tblspc_path) != NULL) { if (S_ISREG(prev_file->mode) && prev_file->is_datafile) { - elog(VERBOSE, "Setting 'is_cfs' on file %s, name %s", + elog(LOG, "Setting 'is_cfs' on file %s, name %s", prev_file->rel_path, prev_file->name); prev_file->is_cfs = true; } } else { - elog(VERBOSE, "Breaking on %s", prev_file->rel_path); + elog(LOG, "Breaking on %s", prev_file->rel_path); break; } } diff --git a/src/catchup.c b/src/catchup.c index 3c522afb7..79e3361a8 100644 --- a/src/catchup.c +++ b/src/catchup.c @@ -2,7 +2,7 @@ * * catchup.c: sync DB cluster * - * Copyright (c) 2022, Postgres Professional + * Copyright (c) 2021-2022, Postgres Professional * *------------------------------------------------------------------------- */ @@ -203,7 +203,7 @@ catchup_preflight_checks(PGNodeInfo *source_node_info, PGconn *source_conn, /* fill dest_redo.lsn and dest_redo.tli */ get_redo(dest_pgdata, FIO_LOCAL_HOST, &dest_redo); - elog(VERBOSE, "source.tli = %X, dest_redo.lsn = %X/%X, dest_redo.tli = %X", + elog(LOG, "source.tli = %X, dest_redo.lsn = %X/%X, dest_redo.tli = %X", current.tli, (uint32) (dest_redo.lsn >> 32), (uint32) dest_redo.lsn, dest_redo.tli); if (current.tli != 1) @@ -398,9 +398,8 @@ catchup_thread_runner(void *arg) if (interrupted || thread_interrupted) elog(ERROR, "Interrupted during catchup"); - if (progress) - elog(INFO, "Progress: (%d/%d). Process file \"%s\"", - i + 1, n_files, file->rel_path); + elog(progress ? INFO : LOG, "Progress: (%d/%d). Process file \"%s\"", + i + 1, n_files, file->rel_path); /* construct destination filepath */ Assert(file->external_dir_num == 0); @@ -447,12 +446,12 @@ catchup_thread_runner(void *arg) if (file->write_size == BYTES_INVALID) { - elog(VERBOSE, "Skipping the unchanged file: \"%s\", read %li bytes", from_fullpath, file->read_size); + elog(LOG, "Skipping the unchanged file: \"%s\", read %li bytes", from_fullpath, file->read_size); continue; } arguments->transfered_bytes += file->write_size; - elog(VERBOSE, "File \"%s\". Copied "INT64_FORMAT " bytes", + elog(LOG, "File \"%s\". Copied "INT64_FORMAT " bytes", from_fullpath, file->write_size); } @@ -607,7 +606,7 @@ filter_filelist(parray *filelist, const char *pgdata, && parray_bsearch(exclude_relative_paths_list, file->rel_path, pgPrefixCompareString)!= NULL) ) { - elog(LOG, "%s file \"%s\" excluded with --exclude-path option", logging_string, full_path); + elog(INFO, "%s file \"%s\" excluded with --exclude-path option", logging_string, full_path); file->excluded = true; } } @@ -650,7 +649,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, if (exclude_relative_paths_list != NULL) parray_qsort(exclude_relative_paths_list, pgCompareString); - elog(LOG, "Database catchup start"); + elog(INFO, "Database catchup start"); if (current.backup_mode != BACKUP_MODE_FULL) { @@ -697,7 +696,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, /* Call pg_start_backup function in PostgreSQL connect */ pg_start_backup(label, smooth_checkpoint, ¤t, &source_node_info, source_conn); - elog(LOG, "pg_start_backup START LSN %X/%X", (uint32) (current.start_lsn >> 32), (uint32) (current.start_lsn)); + elog(INFO, "pg_start_backup START LSN %X/%X", (uint32) (current.start_lsn >> 32), (uint32) (current.start_lsn)); } /* Sanity: source cluster must be "in future" relatively to dest cluster */ @@ -772,11 +771,11 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, elog(INFO, "Source PGDATA size: %s (excluded %s)", pretty_source_bytes, pretty_bytes); } - elog(LOG, "Start LSN (source): %X/%X, TLI: %X", + elog(INFO, "Start LSN (source): %X/%X, TLI: %X", (uint32) (current.start_lsn >> 32), (uint32) (current.start_lsn), current.tli); if (current.backup_mode != BACKUP_MODE_FULL) - elog(LOG, "LSN in destination: %X/%X, TLI: %X", + elog(INFO, "LSN in destination: %X/%X, TLI: %X", (uint32) (dest_redo.lsn >> 32), (uint32) (dest_redo.lsn), dest_redo.tli); @@ -829,7 +828,8 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, char dirpath[MAXPGPATH]; join_path_components(dirpath, dest_pgdata, file->rel_path); - elog(VERBOSE, "Create directory '%s'", dirpath); + + elog(LOG, "Create directory '%s'", dirpath); if (!dry_run) fio_mkdir(dirpath, DIR_PERMISSION, FIO_LOCAL_HOST); } @@ -859,7 +859,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, join_path_components(to_path, dest_pgdata, file->rel_path); - elog(VERBOSE, "Create directory \"%s\" and symbolic link \"%s\"", + elog(INFO, "Create directory \"%s\" and symbolic link \"%s\"", linked_path, to_path); if (!dry_run) @@ -946,7 +946,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, { fio_delete(file->mode, fullpath, FIO_LOCAL_HOST); } - elog(VERBOSE, "Deleted file \"%s\"", fullpath); + elog(LOG, "Deleted file \"%s\"", fullpath); /* shrink dest pgdata list */ pgFileFree(file); diff --git a/src/data.c b/src/data.c index e5a551127..5c5fdf4f0 100644 --- a/src/data.c +++ b/src/data.c @@ -3,7 +3,7 @@ * data.c: utils to parse and backup data pages * * Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION - * Portions Copyright (c) 2015-2019, Postgres Professional + * Portions Copyright (c) 2015-2022, Postgres Professional * *------------------------------------------------------------------------- */ @@ -696,7 +696,7 @@ catchup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpa use_pagemap = true; if (use_pagemap) - elog(VERBOSE, "Using pagemap for file \"%s\"", file->rel_path); + elog(LOG, "Using pagemap for file \"%s\"", file->rel_path); /* Remote mode */ if (fio_is_remote(FIO_DB_HOST)) @@ -795,7 +795,7 @@ backup_non_data_file(pgFile *file, pgFile *prev_file, } /* - * If nonedata file exists in previous backup + * If non-data file exists in previous backup * and its mtime is less than parent backup start time ... */ if ((pg_strcasecmp(file->name, RELMAPPER_FILENAME) != 0) && (prev_file && file->exists_in_prev && @@ -1197,7 +1197,7 @@ restore_data_file_internal(FILE *in, FILE *out, pgFile *file, uint32 backup_vers datapagemap_add(map, blknum); } - elog(VERBOSE, "Copied file \"%s\": %lu bytes", from_fullpath, write_len); + elog(LOG, "Copied file \"%s\": %lu bytes", from_fullpath, write_len); return write_len; } @@ -1220,7 +1220,7 @@ restore_non_data_file_internal(FILE *in, FILE *out, pgFile *file, /* check for interrupt */ if (interrupted || thread_interrupted) - elog(ERROR, "Interrupted during nonedata file restore"); + elog(ERROR, "Interrupted during non-data file restore"); read_len = fread(buf, 1, STDIO_BUFSIZE, in); @@ -1241,7 +1241,7 @@ restore_non_data_file_internal(FILE *in, FILE *out, pgFile *file, pg_free(buf); - elog(VERBOSE, "Copied file \"%s\": %lu bytes", from_fullpath, file->write_size); + elog(LOG, "Copied file \"%s\": %lu bytes", from_fullpath, file->write_size); } size_t @@ -1286,7 +1286,7 @@ restore_non_data_file(parray *parent_chain, pgBackup *dest_backup, */ if (!tmp_file) { - elog(ERROR, "Failed to locate nonedata file \"%s\" in backup %s", + elog(ERROR, "Failed to locate non-data file \"%s\" in backup %s", dest_file->rel_path, base36enc(tmp_backup->start_time)); continue; } @@ -1311,14 +1311,14 @@ restore_non_data_file(parray *parent_chain, pgBackup *dest_backup, /* sanity */ if (!tmp_backup) - elog(ERROR, "Failed to locate a backup containing full copy of nonedata file \"%s\"", + elog(ERROR, "Failed to locate a backup containing full copy of non-data file \"%s\"", to_fullpath); if (!tmp_file) - elog(ERROR, "Failed to locate a full copy of nonedata file \"%s\"", to_fullpath); + elog(ERROR, "Failed to locate a full copy of non-data file \"%s\"", to_fullpath); if (tmp_file->write_size <= 0) - elog(ERROR, "Full copy of nonedata file has invalid size: %li. " + elog(ERROR, "Full copy of non-data file has invalid size: %li. " "Metadata corruption in backup %s in file: \"%s\"", tmp_file->write_size, base36enc(tmp_backup->start_time), to_fullpath); @@ -1331,7 +1331,7 @@ restore_non_data_file(parray *parent_chain, pgBackup *dest_backup, if (file_crc == tmp_file->crc) { - elog(VERBOSE, "Already existing nonedata file \"%s\" has the same checksum, skip restore", + elog(LOG, "Already existing non-data file \"%s\" has the same checksum, skip restore", to_fullpath); return 0; } @@ -1359,7 +1359,7 @@ restore_non_data_file(parray *parent_chain, pgBackup *dest_backup, elog(ERROR, "Cannot open backup file \"%s\": %s", from_fullpath, strerror(errno)); - /* disable stdio buffering for nonedata files */ + /* disable stdio buffering for non-data files */ setvbuf(in, NULL, _IONBF, BUFSIZ); /* do actual work */ @@ -1683,7 +1683,7 @@ validate_file_pages(pgFile *file, const char *fullpath, XLogRecPtr stop_lsn, int n_hdr = -1; off_t cur_pos_in = 0; - elog(VERBOSE, "Validate relation blocks for file \"%s\"", fullpath); + elog(LOG, "Validate relation blocks for file \"%s\"", fullpath); /* should not be possible */ Assert(!(backup_version >= 20400 && file->n_headers <= 0)); @@ -1742,7 +1742,7 @@ validate_file_pages(pgFile *file, const char *fullpath, XLogRecPtr stop_lsn, elog(ERROR, "Cannot seek block %u of \"%s\": %s", blknum, fullpath, strerror(errno)); else - elog(INFO, "Seek to %u", headers[n_hdr].pos); + elog(VERBOSE, "Seek to %u", headers[n_hdr].pos); cur_pos_in = headers[n_hdr].pos; } @@ -1766,7 +1766,7 @@ validate_file_pages(pgFile *file, const char *fullpath, XLogRecPtr stop_lsn, /* backward compatibility kludge TODO: remove in 3.0 */ if (compressed_size == PageIsTruncated) { - elog(INFO, "Block %u of \"%s\" is truncated", + elog(VERBOSE, "Block %u of \"%s\" is truncated", blknum, fullpath); continue; } @@ -1837,10 +1837,10 @@ validate_file_pages(pgFile *file, const char *fullpath, XLogRecPtr stop_lsn, switch (rc) { case PAGE_IS_NOT_FOUND: - elog(LOG, "File \"%s\", block %u, page is NULL", file->rel_path, blknum); + elog(VERBOSE, "File \"%s\", block %u, page is NULL", file->rel_path, blknum); break; case PAGE_IS_ZEROED: - elog(LOG, "File: %s blknum %u, empty zeroed page", file->rel_path, blknum); + elog(VERBOSE, "File: %s blknum %u, empty zeroed page", file->rel_path, blknum); break; case PAGE_HEADER_IS_INVALID: elog(WARNING, "Page header is looking insane: %s, block %i", file->rel_path, blknum); diff --git a/src/dir.c b/src/dir.c index 0b2d18778..561586f87 100644 --- a/src/dir.c +++ b/src/dir.c @@ -3,7 +3,7 @@ * dir.c: directory operation utility. * * Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION - * Portions Copyright (c) 2015-2019, Postgres Professional + * Portions Copyright (c) 2015-2022, Postgres Professional * *------------------------------------------------------------------------- */ @@ -640,7 +640,7 @@ dir_check_file(pgFile *file, bool backup_logs) pgdata_exclude_files_non_exclusive[i]) == 0) { /* Skip */ - elog(VERBOSE, "Excluding file: %s", file->name); + elog(LOG, "Excluding file: %s", file->name); return CHECK_FALSE; } } @@ -649,7 +649,7 @@ dir_check_file(pgFile *file, bool backup_logs) if (strcmp(file->rel_path, pgdata_exclude_files[i]) == 0) { /* Skip */ - elog(VERBOSE, "Excluding file: %s", file->name); + elog(LOG, "Excluding file: %s", file->name); return CHECK_FALSE; } } @@ -669,7 +669,7 @@ dir_check_file(pgFile *file, bool backup_logs) /* exclude by dirname */ if (strcmp(file->name, pgdata_exclude_dir[i]) == 0) { - elog(VERBOSE, "Excluding directory content: %s", file->rel_path); + elog(LOG, "Excluding directory content: %s", file->rel_path); return CHECK_EXCLUDE_FALSE; } } @@ -679,7 +679,7 @@ dir_check_file(pgFile *file, bool backup_logs) if (strcmp(file->rel_path, PG_LOG_DIR) == 0) { /* Skip */ - elog(VERBOSE, "Excluding directory content: %s", file->rel_path); + elog(LOG, "Excluding directory content: %s", file->rel_path); return CHECK_EXCLUDE_FALSE; } } @@ -1166,7 +1166,7 @@ create_data_directories(parray *dest_files, const char *data_dir, const char *ba join_path_components(to_path, data_dir, dir->rel_path); - elog(VERBOSE, "Create directory \"%s\" and symbolic link \"%s\"", + elog(LOG, "Create directory \"%s\" and symbolic link \"%s\"", linked_path, to_path); /* create tablespace directory */ @@ -1183,7 +1183,7 @@ create_data_directories(parray *dest_files, const char *data_dir, const char *ba } /* This is not symlink, create directory */ - elog(VERBOSE, "Create directory \"%s\"", dir->rel_path); + elog(LOG, "Create directory \"%s\"", dir->rel_path); join_path_components(to_path, data_dir, dir->rel_path); @@ -1934,7 +1934,7 @@ cleanup_tablespace(const char *path) join_path_components(fullpath, path, file->rel_path); fio_delete(file->mode, fullpath, FIO_DB_HOST); - elog(VERBOSE, "Deleted file \"%s\"", fullpath); + elog(LOG, "Deleted file \"%s\"", fullpath); } parray_walk(files, pgFileFree); diff --git a/src/merge.c b/src/merge.c index 1ce92bb42..1ce49f9a2 100644 --- a/src/merge.c +++ b/src/merge.c @@ -2,7 +2,7 @@ * * merge.c: merge FULL and incremental backups * - * Copyright (c) 2018-2019, Postgres Professional + * Copyright (c) 2018-2022, Postgres Professional * *------------------------------------------------------------------------- */ @@ -171,7 +171,7 @@ do_merge(InstanceState *instanceState, time_t backup_id, bool no_validate, bool elog(ERROR, "Merge target is full backup and has multiple direct children, " "you must specify child backup id you want to merge with"); - elog(LOG, "Looking for closest incremental backup to merge with"); + elog(INFO, "Looking for closest incremental backup to merge with"); /* Look for closest child backup */ for (i = 0; i < parray_num(backups); i++) @@ -810,7 +810,7 @@ merge_chain(InstanceState *instanceState, join_path_components(full_file_path, full_database_dir, full_file->rel_path); pgFileDelete(full_file->mode, full_file_path); - elog(VERBOSE, "Deleted \"%s\"", full_file_path); + elog(LOG, "Deleted \"%s\"", full_file_path); } } @@ -956,9 +956,8 @@ merge_files(void *arg) if (S_ISDIR(dest_file->mode)) goto done; - if (progress) - elog(INFO, "Progress: (%d/%lu). Merging file \"%s\"", - i + 1, n_files, dest_file->rel_path); + elog(progress ? INFO : LOG, "Progress: (%d/%lu). Merging file \"%s\"", + i + 1, n_files, dest_file->rel_path); if (dest_file->is_datafile && !dest_file->is_cfs) tmp_file->segno = dest_file->segno; @@ -1063,7 +1062,7 @@ merge_files(void *arg) { BackupPageHeader2 *headers = NULL; - elog(VERBOSE, "The file didn`t changed since FULL backup, skip merge: \"%s\"", + elog(LOG, "The file didn`t changed since FULL backup, skip merge: \"%s\"", file->rel_path); tmp_file->crc = file->crc; @@ -1144,7 +1143,7 @@ remove_dir_with_files(const char *path) join_path_components(full_path, path, file->rel_path); pgFileDelete(file->mode, full_path); - elog(VERBOSE, "Deleted \"%s\"", full_path); + elog(LOG, "Deleted \"%s\"", full_path); } /* cleanup */ @@ -1193,7 +1192,7 @@ reorder_external_dirs(pgBackup *to_backup, parray *to_external, char new_path[MAXPGPATH]; makeExternalDirPathByNum(old_path, externaldir_template, i + 1); makeExternalDirPathByNum(new_path, externaldir_template, from_num); - elog(VERBOSE, "Rename %s to %s", old_path, new_path); + elog(LOG, "Rename %s to %s", old_path, new_path); if (rename (old_path, new_path) == -1) elog(ERROR, "Could not rename directory \"%s\" to \"%s\": %s", old_path, new_path, strerror(errno)); @@ -1346,7 +1345,7 @@ merge_non_data_file(parray *parent_chain, pgBackup *full_backup, */ if (!from_file) { - elog(ERROR, "Failed to locate nonedata file \"%s\" in backup %s", + elog(ERROR, "Failed to locate non-data file \"%s\" in backup %s", dest_file->rel_path, base36enc(from_backup->start_time)); continue; } @@ -1357,11 +1356,11 @@ merge_non_data_file(parray *parent_chain, pgBackup *full_backup, /* sanity */ if (!from_backup) - elog(ERROR, "Failed to found a backup containing full copy of nonedata file \"%s\"", + elog(ERROR, "Failed to found a backup containing full copy of non-data file \"%s\"", dest_file->rel_path); if (!from_file) - elog(ERROR, "Failed to locate a full copy of nonedata file \"%s\"", dest_file->rel_path); + elog(ERROR, "Failed to locate a full copy of non-data file \"%s\"", dest_file->rel_path); /* set path to source file */ if (from_file->external_dir_num) @@ -1450,4 +1449,4 @@ is_forward_compatible(parray *parent_chain) } return true; -} \ No newline at end of file +} diff --git a/src/restore.c b/src/restore.c index fbf0c0398..c877290b1 100644 --- a/src/restore.c +++ b/src/restore.c @@ -3,7 +3,7 @@ * restore.c: restore DB cluster and archived WAL. * * Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION - * Portions Copyright (c) 2015-2019, Postgres Professional + * Portions Copyright (c) 2015-2022, Postgres Professional * *------------------------------------------------------------------------- */ @@ -843,7 +843,7 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain, external_path = parray_get(external_dirs, file->external_dir_num - 1); join_path_components(dirpath, external_path, file->rel_path); - elog(VERBOSE, "Create external directory \"%s\"", dirpath); + elog(LOG, "Create external directory \"%s\"", dirpath); fio_mkdir(dirpath, file->mode, FIO_DB_HOST); } } @@ -923,7 +923,7 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain, join_path_components(fullpath, pgdata_path, file->rel_path); fio_delete(file->mode, fullpath, FIO_DB_HOST); - elog(VERBOSE, "Deleted file \"%s\"", fullpath); + elog(LOG, "Deleted file \"%s\"", fullpath); /* shrink pgdata list */ pgFileFree(file); @@ -1131,9 +1131,8 @@ restore_files(void *arg) if (interrupted || thread_interrupted) elog(ERROR, "Interrupted during restore"); - if (progress) - elog(INFO, "Progress: (%d/%lu). Restore file \"%s\"", - i + 1, n_files, dest_file->rel_path); + elog(progress ? INFO : LOG, "Progress: (%d/%lu). Restore file \"%s\"", + i + 1, n_files, dest_file->rel_path); /* Only files from pgdata can be skipped by partial restore */ if (arguments->dbOid_exclude_list && dest_file->external_dir_num == 0) @@ -1149,7 +1148,7 @@ restore_files(void *arg) create_empty_file(FIO_BACKUP_HOST, arguments->to_root, FIO_DB_HOST, dest_file); - elog(VERBOSE, "Skip file due to partial restore: \"%s\"", + elog(LOG, "Skip file due to partial restore: \"%s\"", dest_file->rel_path); continue; } @@ -1159,7 +1158,7 @@ restore_files(void *arg) if ((dest_file->external_dir_num == 0) && strcmp(PG_TABLESPACE_MAP_FILE, dest_file->rel_path) == 0) { - elog(VERBOSE, "Skip tablespace_map"); + elog(LOG, "Skip tablespace_map"); continue; } @@ -1167,7 +1166,7 @@ restore_files(void *arg) if ((dest_file->external_dir_num == 0) && strcmp(DATABASE_MAP, dest_file->rel_path) == 0) { - elog(VERBOSE, "Skip database_map"); + elog(LOG, "Skip database_map"); continue; } @@ -1239,9 +1238,9 @@ restore_files(void *arg) strerror(errno)); if (!dest_file->is_datafile || dest_file->is_cfs) - elog(VERBOSE, "Restoring nonedata file: \"%s\"", to_fullpath); + elog(LOG, "Restoring non-data file: \"%s\"", to_fullpath); else - elog(VERBOSE, "Restoring data file: \"%s\"", to_fullpath); + elog(LOG, "Restoring data file: \"%s\"", to_fullpath); // If destination file is 0 sized, then just close it and go for the next if (dest_file->write_size == 0) @@ -1261,10 +1260,10 @@ restore_files(void *arg) } else { - /* disable stdio buffering for local destination nonedata file */ + /* disable stdio buffering for local destination non-data file */ if (!fio_is_remote_file(out)) setvbuf(out, NULL, _IONBF, BUFSIZ); - /* Destination file is nonedata file */ + /* Destination file is non-data file */ arguments->restored_bytes += restore_non_data_file(arguments->parent_chain, arguments->dest_backup, dest_file, out, to_fullpath, already_exists); @@ -1773,7 +1772,7 @@ read_timeline_history(const char *arclog_path, TimeLineID targetTLI, bool strict } if (fd && (ferror(fd))) - elog(ERROR, "Failed to read from file: \"%s\"", path); + elog(ERROR, "Failed to read from file: \"%s\"", path); if (fd) fclose(fd); @@ -2188,7 +2187,7 @@ check_incremental_compatibility(const char *pgdata, uint64 system_identifier, * data files content, because based on pg_control information we will * choose a backup suitable for lsn based incremental restore. */ - elog(INFO, "Trying to read pg_control file in destination directory"); + elog(LOG, "Trying to read pg_control file in destination directory"); system_id_pgdata = get_system_identifier(pgdata, FIO_DB_HOST, false); diff --git a/src/util.c b/src/util.c index fb33fd046..4d6c50a07 100644 --- a/src/util.c +++ b/src/util.c @@ -3,7 +3,7 @@ * util.c: log messages to log file or stderr, and misc code. * * Portions Copyright (c) 2009-2011, NIPPON TELEGRAPH AND TELEPHONE CORPORATION - * Portions Copyright (c) 2015-2019, Postgres Professional + * Portions Copyright (c) 2015-2021, Postgres Professional * *------------------------------------------------------------------------- */ @@ -589,7 +589,7 @@ datapagemap_print_debug(datapagemap_t *map) iter = datapagemap_iterate(map); while (datapagemap_next(iter, &blocknum)) - elog(INFO, " block %u", blocknum); + elog(VERBOSE, " block %u", blocknum); pg_free(iter); } From c0c07ac2ff3c9a749c4ad5ec57a23a3d6e800cca Mon Sep 17 00:00:00 2001 From: Daniel Shelepanov Date: Wed, 15 Jun 2022 22:55:59 +0300 Subject: [PATCH 294/525] [PBCKP-216] Setting C locale globally, env locale is only set while doing while printing big tables ...in order to impose dot-based floating point representation on logging and JSON-representation tags: pg_probackup --- src/pg_probackup.c | 6 +++++ src/pg_probackup.h | 2 ++ src/show.c | 53 +++++++++++++++++++++++++++++++++++++-- src/utils/configuration.c | 3 +++ 4 files changed, 62 insertions(+), 2 deletions(-) diff --git a/src/pg_probackup.c b/src/pg_probackup.c index b9b3af0b9..3ffc3bb9e 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -311,6 +311,10 @@ main(int argc, char *argv[]) set_pglocale_pgservice(argv[0], PG_TEXTDOMAIN("pg_probackup")); PROGRAM_FULL_PATH = palloc0(MAXPGPATH); + // Setting C locale for numeric values in order to impose dot-based floating-point representation + memorize_environment_locale(); + setlocale(LC_NUMERIC, "C"); + /* Get current time */ current_time = time(NULL); @@ -1024,6 +1028,8 @@ main(int argc, char *argv[]) break; } + free_environment_locale(); + return 0; } diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 2c4c61036..77893eadc 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -905,6 +905,8 @@ extern InstanceConfig *readInstanceConfigFile(InstanceState *instanceState); /* in show.c */ extern int do_show(CatalogState *catalogState, InstanceState *instanceState, time_t requested_backup_id, bool show_archive); +extern void memorize_environment_locale(void); +extern void free_environment_locale(void); /* in delete.c */ extern void do_delete(InstanceState *instanceState, time_t backup_id); diff --git a/src/show.c b/src/show.c index 22c40cf43..db8a9e225 100644 --- a/src/show.c +++ b/src/show.c @@ -3,7 +3,7 @@ * show.c: show backup information. * * Portions Copyright (c) 2009-2011, NIPPON TELEGRAPH AND TELEPHONE CORPORATION - * Portions Copyright (c) 2015-2019, Postgres Professional + * Portions Copyright (c) 2015-2022, Postgres Professional * *------------------------------------------------------------------------- */ @@ -12,6 +12,7 @@ #include #include +#include #include #include "utils/json.h" @@ -71,6 +72,43 @@ static PQExpBufferData show_buf; static bool first_instance = true; static int32 json_level = 0; +static const char* lc_env_locale; +typedef enum { + LOCALE_C, // Used for formatting output to unify the dot-based floating point representation + LOCALE_ENV // Default environment locale +} output_numeric_locale; + +#ifdef HAVE_USELOCALE +static locale_t env_locale, c_locale; +#endif +void memorize_environment_locale() { + lc_env_locale = (const char *)getenv("LC_NUMERIC"); + lc_env_locale = lc_env_locale != NULL ? lc_env_locale : "C"; +#ifdef HAVE_USELOCALE + env_locale = newlocale(LC_NUMERIC_MASK, lc_env_locale, (locale_t)0); + c_locale = newlocale(LC_NUMERIC_MASK, "C", (locale_t)0); +#else +#ifdef HAVE__CONFIGTHREADLOCALE + _configthreadlocale(_ENABLE_PER_THREAD_LOCALE); +#endif +#endif +} + +void free_environment_locale() { +#ifdef HAVE_USELOCALE + freelocale(env_locale); + freelocale(c_locale); +#endif +} + +static void set_output_numeric_locale(output_numeric_locale loc) { +#ifdef HAVE_USELOCALE + uselocale(loc == LOCALE_C ? c_locale : env_locale); +#else + setlocale(LC_NUMERIC, loc == LOCALE_C ? "C" : lc_env_locale); +#endif +} + /* * Entry point of pg_probackup SHOW subcommand. */ @@ -513,6 +551,9 @@ show_instance_plain(const char *instance_name, parray *backup_list, bool show_na ShowBackendRow *rows; TimeLineID parent_tli = 0; + // Since we've been printing a table, set LC_NUMERIC to its default environment value + set_output_numeric_locale(LOCALE_ENV); + for (i = 0; i < SHOW_FIELDS_COUNT; i++) widths[i] = strlen(names[i]); @@ -726,6 +767,8 @@ show_instance_plain(const char *instance_name, parray *backup_list, bool show_na } pfree(rows); + // Restore the C locale + set_output_numeric_locale(LOCALE_C); } /* @@ -806,6 +849,9 @@ show_archive_plain(const char *instance_name, uint32 xlog_seg_size, uint32 widths_sum = 0; ShowArchiveRow *rows; + // Since we've been printing a table, set LC_NUMERIC to its default environment value + set_output_numeric_locale(LOCALE_ENV); + for (i = 0; i < SHOW_ARCHIVE_FIELDS_COUNT; i++) widths[i] = strlen(names[i]); @@ -973,6 +1019,8 @@ show_archive_plain(const char *instance_name, uint32 xlog_seg_size, } pfree(rows); + // Restore the C locale + set_output_numeric_locale(LOCALE_C); //TODO: free timelines } @@ -1045,8 +1093,9 @@ show_archive_json(const char *instance_name, uint32 xlog_seg_size, appendPQExpBuffer(buf, "%lu", tlinfo->size); json_add_key(buf, "zratio", json_level); + if (tlinfo->size != 0) - zratio = ((float)xlog_seg_size*tlinfo->n_xlog_files) / tlinfo->size; + zratio = ((float) xlog_seg_size * tlinfo->n_xlog_files) / tlinfo->size; appendPQExpBuffer(buf, "%.2f", zratio); if (tlinfo->closest_backup != NULL) diff --git a/src/utils/configuration.c b/src/utils/configuration.c index 04bfbbe3b..7ab242aa3 100644 --- a/src/utils/configuration.c +++ b/src/utils/configuration.c @@ -18,6 +18,9 @@ #include "getopt_long.h" +#ifndef WIN32 +#include +#endif #include #define MAXPG_LSNCOMPONENT 8 From f88410e5d7d359a2355265a3fc35e67271877e17 Mon Sep 17 00:00:00 2001 From: Ivan Lazarev Date: Thu, 4 Aug 2022 16:15:40 +0300 Subject: [PATCH 295/525] [PBCKP-220] hotfix for PtrackTest --- tests/ptrack.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/ptrack.py b/tests/ptrack.py index 9741c9561..d46ece119 100644 --- a/tests/ptrack.py +++ b/tests/ptrack.py @@ -24,6 +24,8 @@ def test_drop_rel_during_backup_ptrack(self): """ drop relation during ptrack backup """ + self._check_gdb_flag_or_skip_test() + backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') node = self.make_simple_node( base_dir=os.path.join(module_name, self.fname, 'node'), @@ -994,6 +996,8 @@ def test_ptrack_get_block(self): make node, make full and ptrack stream backups, restore them and check data correctness """ + self._check_gdb_flag_or_skip_test() + backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') node = self.make_simple_node( base_dir=os.path.join(module_name, self.fname, 'node'), From 1e6cf10c87ae6c59d9383b246f2b9f47a78771ab Mon Sep 17 00:00:00 2001 From: Vyacheslav Makarov Date: Thu, 4 Aug 2022 01:27:26 +0300 Subject: [PATCH 296/525] [PBCKP-169]: adding json format for logs. Added 2 flags. --log-format-console=plain|json --log-format-file=plain|json Option 'log-format-console' set only from terminal. --- src/configure.c | 124 ++++++++++----- src/help.c | 49 +++++- src/pg_probackup.c | 11 ++ src/utils/json.c | 18 +++ src/utils/json.h | 1 + src/utils/logger.c | 240 +++++++++++++++++++++++++----- src/utils/logger.h | 10 ++ tests/expected/option_help.out | 3 + tests/expected/option_help_ru.out | 3 + 9 files changed, 386 insertions(+), 73 deletions(-) diff --git a/src/configure.c b/src/configure.c index 9ffe2d7a7..6e8700de1 100644 --- a/src/configure.c +++ b/src/configure.c @@ -17,10 +17,14 @@ static void assign_log_level_console(ConfigOption *opt, const char *arg); static void assign_log_level_file(ConfigOption *opt, const char *arg); +static void assign_log_format_console(ConfigOption *opt, const char *arg); +static void assign_log_format_file(ConfigOption *opt, const char *arg); static void assign_compress_alg(ConfigOption *opt, const char *arg); static char *get_log_level_console(ConfigOption *opt); static char *get_log_level_file(ConfigOption *opt); +static char *get_log_format_console(ConfigOption *opt); +static char *get_log_format_file(ConfigOption *opt); static char *get_compress_alg(ConfigOption *opt); static void show_configure_start(void); @@ -154,90 +158,100 @@ ConfigOption instance_options[] = OPTION_LOG_GROUP, 0, get_log_level_file }, { - 's', 214, "log-filename", + 'f', 214, "log-format-console", + assign_log_format_console, SOURCE_CMD_STRICT, 0, + OPTION_LOG_GROUP, 0, get_log_format_console + }, + { + 'f', 215, "log-format-file", + assign_log_format_file, SOURCE_CMD, 0, + OPTION_LOG_GROUP, 0, get_log_format_file + }, + { + 's', 216, "log-filename", &instance_config.logger.log_filename, SOURCE_CMD, 0, OPTION_LOG_GROUP, 0, option_get_value }, { - 's', 215, "error-log-filename", + 's', 217, "error-log-filename", &instance_config.logger.error_log_filename, SOURCE_CMD, 0, OPTION_LOG_GROUP, 0, option_get_value }, { - 's', 216, "log-directory", + 's', 218, "log-directory", &instance_config.logger.log_directory, SOURCE_CMD, 0, OPTION_LOG_GROUP, 0, option_get_value }, { - 'U', 217, "log-rotation-size", + 'U', 219, "log-rotation-size", &instance_config.logger.log_rotation_size, SOURCE_CMD, SOURCE_DEFAULT, OPTION_LOG_GROUP, OPTION_UNIT_KB, option_get_value }, { - 'U', 218, "log-rotation-age", + 'U', 220, "log-rotation-age", &instance_config.logger.log_rotation_age, SOURCE_CMD, SOURCE_DEFAULT, OPTION_LOG_GROUP, OPTION_UNIT_MS, option_get_value }, /* Retention options */ { - 'u', 219, "retention-redundancy", + 'u', 221, "retention-redundancy", &instance_config.retention_redundancy, SOURCE_CMD, 0, OPTION_RETENTION_GROUP, 0, option_get_value }, { - 'u', 220, "retention-window", + 'u', 222, "retention-window", &instance_config.retention_window, SOURCE_CMD, 0, OPTION_RETENTION_GROUP, 0, option_get_value }, { - 'u', 221, "wal-depth", + 'u', 223, "wal-depth", &instance_config.wal_depth, SOURCE_CMD, 0, OPTION_RETENTION_GROUP, 0, option_get_value }, /* Compression options */ { - 'f', 222, "compress-algorithm", + 'f', 224, "compress-algorithm", assign_compress_alg, SOURCE_CMD, 0, OPTION_COMPRESS_GROUP, 0, get_compress_alg }, { - 'u', 223, "compress-level", + 'u', 225, "compress-level", &instance_config.compress_level, SOURCE_CMD, 0, OPTION_COMPRESS_GROUP, 0, option_get_value }, /* Remote backup options */ { - 's', 224, "remote-proto", + 's', 226, "remote-proto", &instance_config.remote.proto, SOURCE_CMD, 0, OPTION_REMOTE_GROUP, 0, option_get_value }, { - 's', 225, "remote-host", + 's', 227, "remote-host", &instance_config.remote.host, SOURCE_CMD, 0, OPTION_REMOTE_GROUP, 0, option_get_value }, { - 's', 226, "remote-port", + 's', 228, "remote-port", &instance_config.remote.port, SOURCE_CMD, 0, OPTION_REMOTE_GROUP, 0, option_get_value }, { - 's', 227, "remote-path", + 's', 229, "remote-path", &instance_config.remote.path, SOURCE_CMD, 0, OPTION_REMOTE_GROUP, 0, option_get_value }, { - 's', 228, "remote-user", + 's', 230, "remote-user", &instance_config.remote.user, SOURCE_CMD, 0, OPTION_REMOTE_GROUP, 0, option_get_value }, { - 's', 229, "ssh-options", + 's', 231, "ssh-options", &instance_config.remote.ssh_options, SOURCE_CMD, 0, OPTION_REMOTE_GROUP, 0, option_get_value }, { - 's', 230, "ssh-config", + 's', 232, "ssh-config", &instance_config.remote.ssh_config, SOURCE_CMD, 0, OPTION_REMOTE_GROUP, 0, option_get_value }, @@ -388,6 +402,8 @@ readInstanceConfigFile(InstanceState *instanceState) InstanceConfig *instance = pgut_new(InstanceConfig); char *log_level_console = NULL; char *log_level_file = NULL; + char *log_format_console = NULL; + char *log_format_file = NULL; char *compress_alg = NULL; int parsed_options; @@ -509,90 +525,100 @@ readInstanceConfigFile(InstanceState *instanceState) OPTION_LOG_GROUP, 0, option_get_value }, { - 's', 214, "log-filename", + 's', 214, "log-format-console", + &log_format_console, SOURCE_CMD_STRICT, 0, + OPTION_LOG_GROUP, 0, option_get_value + }, + { + 's', 215, "log-format-file", + &log_format_file, SOURCE_CMD, 0, + OPTION_LOG_GROUP, 0, option_get_value + }, + { + 's', 216, "log-filename", &instance->logger.log_filename, SOURCE_CMD, 0, OPTION_LOG_GROUP, 0, option_get_value }, { - 's', 215, "error-log-filename", + 's', 217, "error-log-filename", &instance->logger.error_log_filename, SOURCE_CMD, 0, OPTION_LOG_GROUP, 0, option_get_value }, { - 's', 216, "log-directory", + 's', 218, "log-directory", &instance->logger.log_directory, SOURCE_CMD, 0, OPTION_LOG_GROUP, 0, option_get_value }, { - 'U', 217, "log-rotation-size", + 'U', 219, "log-rotation-size", &instance->logger.log_rotation_size, SOURCE_CMD, SOURCE_DEFAULT, OPTION_LOG_GROUP, OPTION_UNIT_KB, option_get_value }, { - 'U', 218, "log-rotation-age", + 'U', 220, "log-rotation-age", &instance->logger.log_rotation_age, SOURCE_CMD, SOURCE_DEFAULT, OPTION_LOG_GROUP, OPTION_UNIT_MS, option_get_value }, /* Retention options */ { - 'u', 219, "retention-redundancy", + 'u', 221, "retention-redundancy", &instance->retention_redundancy, SOURCE_CMD, 0, OPTION_RETENTION_GROUP, 0, option_get_value }, { - 'u', 220, "retention-window", + 'u', 222, "retention-window", &instance->retention_window, SOURCE_CMD, 0, OPTION_RETENTION_GROUP, 0, option_get_value }, { - 'u', 221, "wal-depth", + 'u', 223, "wal-depth", &instance->wal_depth, SOURCE_CMD, 0, OPTION_RETENTION_GROUP, 0, option_get_value }, /* Compression options */ { - 's', 222, "compress-algorithm", + 's', 224, "compress-algorithm", &compress_alg, SOURCE_CMD, 0, OPTION_LOG_GROUP, 0, option_get_value }, { - 'u', 223, "compress-level", + 'u', 225, "compress-level", &instance->compress_level, SOURCE_CMD, 0, OPTION_COMPRESS_GROUP, 0, option_get_value }, /* Remote backup options */ { - 's', 224, "remote-proto", + 's', 226, "remote-proto", &instance->remote.proto, SOURCE_CMD, 0, OPTION_REMOTE_GROUP, 0, option_get_value }, { - 's', 225, "remote-host", + 's', 227, "remote-host", &instance->remote.host, SOURCE_CMD, 0, OPTION_REMOTE_GROUP, 0, option_get_value }, { - 's', 226, "remote-port", + 's', 228, "remote-port", &instance->remote.port, SOURCE_CMD, 0, OPTION_REMOTE_GROUP, 0, option_get_value }, { - 's', 227, "remote-path", + 's', 229, "remote-path", &instance->remote.path, SOURCE_CMD, 0, OPTION_REMOTE_GROUP, 0, option_get_value }, { - 's', 228, "remote-user", + 's', 230, "remote-user", &instance->remote.user, SOURCE_CMD, 0, OPTION_REMOTE_GROUP, 0, option_get_value }, { - 's', 229, "ssh-options", + 's', 231, "ssh-options", &instance->remote.ssh_options, SOURCE_CMD, 0, OPTION_REMOTE_GROUP, 0, option_get_value }, { - 's', 230, "ssh-config", + 's', 232, "ssh-config", &instance->remote.ssh_config, SOURCE_CMD, 0, OPTION_REMOTE_GROUP, 0, option_get_value }, @@ -625,6 +651,12 @@ readInstanceConfigFile(InstanceState *instanceState) if (log_level_file) instance->logger.log_level_file = parse_log_level(log_level_file); + if (log_format_console) + instance->logger.log_format_console = parse_log_format(log_format_console); + + if (log_format_file) + instance->logger.log_format_file = parse_log_format(log_format_file); + if (compress_alg) instance->compress_alg = parse_compress_alg(compress_alg); @@ -649,6 +681,18 @@ assign_log_level_file(ConfigOption *opt, const char *arg) instance_config.logger.log_level_file = parse_log_level(arg); } +static void +assign_log_format_console(ConfigOption *opt, const char *arg) +{ + instance_config.logger.log_format_console = parse_log_format(arg); +} + +static void +assign_log_format_file(ConfigOption *opt, const char *arg) +{ + instance_config.logger.log_format_file = parse_log_format(arg); +} + static void assign_compress_alg(ConfigOption *opt, const char *arg) { @@ -667,6 +711,18 @@ get_log_level_file(ConfigOption *opt) return pstrdup(deparse_log_level(instance_config.logger.log_level_file)); } +static char * +get_log_format_console(ConfigOption *opt) +{ + return pstrdup(deparse_log_format(instance_config.logger.log_format_console)); +} + +static char * +get_log_format_file(ConfigOption *opt) +{ + return pstrdup(deparse_log_format(instance_config.logger.log_format_file)); +} + static char * get_compress_alg(ConfigOption *opt) { diff --git a/src/help.c b/src/help.c index 85894759e..116a0711c 100644 --- a/src/help.c +++ b/src/help.c @@ -94,6 +94,7 @@ help_pg_probackup(void) printf(_(" [--external-dirs=external-directories-paths]\n")); printf(_(" [--log-level-console=log-level-console]\n")); printf(_(" [--log-level-file=log-level-file]\n")); + printf(_(" [--log-format-file=log-format-file]\n")); printf(_(" [--log-filename=log-filename]\n")); printf(_(" [--error-log-filename=error-log-filename]\n")); printf(_(" [--log-directory=log-directory]\n")); @@ -131,6 +132,8 @@ help_pg_probackup(void) printf(_(" [--no-sync]\n")); printf(_(" [--log-level-console=log-level-console]\n")); printf(_(" [--log-level-file=log-level-file]\n")); + printf(_(" [--log-format-console=log-format-console]\n")); + printf(_(" [--log-format-file=log-format-file]\n")); printf(_(" [--log-filename=log-filename]\n")); printf(_(" [--error-log-filename=error-log-filename]\n")); printf(_(" [--log-directory=log-directory]\n")); @@ -306,6 +309,8 @@ help_backup(void) printf(_(" [--no-sync]\n")); printf(_(" [--log-level-console=log-level-console]\n")); printf(_(" [--log-level-file=log-level-file]\n")); + printf(_(" [--log-format-console=log-format-console]\n")); + printf(_(" [--log-format-file=log-format-file]\n")); printf(_(" [--log-filename=log-filename]\n")); printf(_(" [--error-log-filename=error-log-filename]\n")); printf(_(" [--log-directory=log-directory]\n")); @@ -353,6 +358,12 @@ help_backup(void) printf(_(" --log-level-file=log-level-file\n")); printf(_(" level for file logging (default: off)\n")); printf(_(" available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n")); + printf(_(" --log-format-console=log-format-console\n")); + printf(_(" defines the format of the console log (default: plain)\n")); + printf(_(" available options: 'plain', 'json'\n")); + printf(_(" --log-format-file=log-format-file\n")); + printf(_(" defines the format of log files (default: plain)\n")); + printf(_(" available options: 'plain', 'json'\n")); printf(_(" --log-filename=log-filename\n")); printf(_(" filename for file logging (default: 'pg_probackup.log')\n")); printf(_(" support strftime format (example: pg_probackup-%%Y-%%m-%%d_%%H%%M%%S.log)\n")); @@ -476,7 +487,7 @@ help_restore(void) printf(_(" -X, --waldir=WALDIR location for the write-ahead log directory\n")); - + printf(_("\n Incremental restore options:\n")); printf(_(" -I, --incremental-mode=none|checksum|lsn\n")); @@ -519,6 +530,12 @@ help_restore(void) printf(_(" --log-level-file=log-level-file\n")); printf(_(" level for file logging (default: off)\n")); printf(_(" available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n")); + printf(_(" --log-format-console=log-format-console\n")); + printf(_(" defines the format of the console log (default: plain)\n")); + printf(_(" available options: 'plain', 'json'\n")); + printf(_(" --log-format-file=log-format-file\n")); + printf(_(" defines the format of log files (default: plain)\n")); + printf(_(" available options: 'plain', 'json'\n")); printf(_(" --log-filename=log-filename\n")); printf(_(" filename for file logging (default: 'pg_probackup.log')\n")); printf(_(" support strftime format (example: pg_probackup-%%Y-%%m-%%d_%%H%%M%%S.log)\n")); @@ -586,6 +603,12 @@ help_validate(void) printf(_(" --log-level-file=log-level-file\n")); printf(_(" level for file logging (default: off)\n")); printf(_(" available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n")); + printf(_(" --log-format-console=log-format-console\n")); + printf(_(" defines the format of the console log (default: plain)\n")); + printf(_(" available options: 'plain', 'json'\n")); + printf(_(" --log-format-file=log-format-file\n")); + printf(_(" defines the format of log files (default: plain)\n")); + printf(_(" available options: 'plain', 'json'\n")); printf(_(" --log-filename=log-filename\n")); printf(_(" filename for file logging (default: 'pg_probackup.log')\n")); printf(_(" support strftime format (example: pg_probackup-%%Y-%%m-%%d_%%H%%M%%S.log)\n")); @@ -633,6 +656,12 @@ help_checkdb(void) printf(_(" --log-level-file=log-level-file\n")); printf(_(" level for file logging (default: off)\n")); printf(_(" available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n")); + printf(_(" --log-format-console=log-format-console\n")); + printf(_(" defines the format of the console log (default: plain)\n")); + printf(_(" available options: 'plain', 'json'\n")); + printf(_(" --log-format-file=log-format-file\n")); + printf(_(" defines the format of log files (default: plain)\n")); + printf(_(" available options: 'plain', 'json'\n")); printf(_(" --log-filename=log-filename\n")); printf(_(" filename for file logging (default: 'pg_probackup.log')\n")); printf(_(" support strftime format (example: pg_probackup-%%Y-%%m-%%d_%%H%%M%%S.log\n")); @@ -713,6 +742,12 @@ help_delete(void) printf(_(" --log-level-file=log-level-file\n")); printf(_(" level for file logging (default: off)\n")); printf(_(" available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n")); + printf(_(" --log-format-console=log-format-console\n")); + printf(_(" defines the format of the console log (default: plain)\n")); + printf(_(" available options: 'plain', 'json'\n")); + printf(_(" --log-format-file=log-format-file\n")); + printf(_(" defines the format of log files (default: plain)\n")); + printf(_(" available options: 'plain', 'json'\n")); printf(_(" --log-filename=log-filename\n")); printf(_(" filename for file logging (default: 'pg_probackup.log')\n")); printf(_(" support strftime format (example: pg_probackup-%%Y-%%m-%%d_%%H%%M%%S.log)\n")); @@ -737,6 +772,8 @@ help_merge(void) printf(_(" [--no-validate] [--no-sync]\n")); printf(_(" [--log-level-console=log-level-console]\n")); printf(_(" [--log-level-file=log-level-file]\n")); + printf(_(" [--log-format-console=log-format-console]\n")); + printf(_(" [--log-format-file=log-format-file]\n")); printf(_(" [--log-filename=log-filename]\n")); printf(_(" [--error-log-filename=error-log-filename]\n")); printf(_(" [--log-directory=log-directory]\n")); @@ -759,6 +796,12 @@ help_merge(void) printf(_(" --log-level-file=log-level-file\n")); printf(_(" level for file logging (default: off)\n")); printf(_(" available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n")); + printf(_(" --log-format-console=log-format-console\n")); + printf(_(" defines the format of the console log (default: plain)\n")); + printf(_(" available options: 'plain', 'json'\n")); + printf(_(" --log-format-file=log-format-file\n")); + printf(_(" defines the format of log files (default: plain)\n")); + printf(_(" available options: 'plain', 'json'\n")); printf(_(" --log-filename=log-filename\n")); printf(_(" filename for file logging (default: 'pg_probackup.log')\n")); printf(_(" support strftime format (example: pg_probackup-%%Y-%%m-%%d_%%H%%M%%S.log)\n")); @@ -800,6 +843,7 @@ help_set_config(void) printf(_(" [--restore-command=cmdline]\n")); printf(_(" [--log-level-console=log-level-console]\n")); printf(_(" [--log-level-file=log-level-file]\n")); + printf(_(" [--log-format-file=log-format-file]\n")); printf(_(" [--log-filename=log-filename]\n")); printf(_(" [--error-log-filename=error-log-filename]\n")); printf(_(" [--log-directory=log-directory]\n")); @@ -831,6 +875,9 @@ help_set_config(void) printf(_(" --log-level-file=log-level-file\n")); printf(_(" level for file logging (default: off)\n")); printf(_(" available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n")); + printf(_(" --log-format-file=log-format-file\n")); + printf(_(" defines the format of log files (default: plain)\n")); + printf(_(" available options: 'plain', 'json'\n")); printf(_(" --log-filename=log-filename\n")); printf(_(" filename for file logging (default: 'pg_probackup.log')\n")); printf(_(" support strftime format (example: pg_probackup-%%Y-%%m-%%d_%%H%%M%%S.log)\n")); diff --git a/src/pg_probackup.c b/src/pg_probackup.c index c5ff02d8a..a242d7f59 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -427,6 +427,17 @@ main(int argc, char *argv[]) /* Parse command line only arguments */ config_get_opt(argc, argv, cmd_options, instance_options); + if (backup_subcmd == SET_CONFIG_CMD) + { + for (int i = 0; i < argc; i++) + { + if (strncmp("--log-format-console", argv[i], strlen("--log-format-console")) == 0) + { + elog(ERROR, "Option 'log-format-console' set only from terminal\n"); + } + } + } + pgut_init(); if (no_color) diff --git a/src/utils/json.c b/src/utils/json.c index 9f13a958f..2c8e0fe9b 100644 --- a/src/utils/json.c +++ b/src/utils/json.c @@ -144,3 +144,21 @@ json_add_escaped(PQExpBuffer buf, const char *str) } appendPQExpBufferChar(buf, '"'); } + +void +json_add_min(PQExpBuffer buf, JsonToken type) +{ + switch (type) + { + case JT_BEGIN_OBJECT: + appendPQExpBufferChar(buf, '{'); + add_comma = false; + break; + case JT_END_OBJECT: + appendPQExpBufferStr(buf, "}\n"); + add_comma = true; + break; + default: + break; + } +} diff --git a/src/utils/json.h b/src/utils/json.h index cc9f1168d..f80832e69 100644 --- a/src/utils/json.h +++ b/src/utils/json.h @@ -25,6 +25,7 @@ typedef enum } JsonToken; extern void json_add(PQExpBuffer buf, JsonToken type, int32 *level); +extern void json_add_min(PQExpBuffer buf, JsonToken type); extern void json_add_key(PQExpBuffer buf, const char *name, int32 level); extern void json_add_value(PQExpBuffer buf, const char *name, const char *value, int32 level, bool escaped); diff --git a/src/utils/logger.c b/src/utils/logger.c index 70bd5dcc4..7ea41f74e 100644 --- a/src/utils/logger.c +++ b/src/utils/logger.c @@ -19,14 +19,19 @@ #include "utils/configuration.h" +#include "json.h" + /* Logger parameters */ LoggerConfig logger_config = { LOG_LEVEL_CONSOLE_DEFAULT, LOG_LEVEL_FILE_DEFAULT, LOG_FILENAME_DEFAULT, NULL, + NULL, LOG_ROTATION_SIZE_DEFAULT, - LOG_ROTATION_AGE_DEFAULT + LOG_ROTATION_AGE_DEFAULT, + LOG_FORMAT_CONSOLE_DEFAULT, + LOG_FORMAT_FILE_DEFAULT }; /* Implementation for logging.h */ @@ -227,6 +232,35 @@ write_elevel(FILE *stream, int elevel) } } +static void +write_elevel_for_json(PQExpBuffer buf, int elevel) +{ + switch (elevel) + { + case VERBOSE: + appendPQExpBufferStr(buf, "\"VERBOSE\""); + break; + case LOG: + appendPQExpBufferStr(buf, "\"LOG\""); + break; + case INFO: + appendPQExpBufferStr(buf, "\"INFO\""); + break; + case NOTICE: + appendPQExpBufferStr(buf, "\"NOTICE\""); + break; + case WARNING: + appendPQExpBufferStr(buf, "\"WARNING\""); + break; + case ERROR: + appendPQExpBufferStr(buf, "\"ERROR\""); + break; + default: + elog_stderr(ERROR, "invalid logging level: %d", elevel); + break; + } +} + /* * Exit with code if it is an error. * Check for in_cleanup flag to avoid deadlock in case of ERROR in cleanup @@ -276,6 +310,12 @@ elog_internal(int elevel, bool file_only, const char *message) time_t log_time = (time_t) time(NULL); char strfbuf[128]; char str_pid[128]; + char str_pid_json[128]; + char str_thread_json[64]; + PQExpBufferData show_buf; + PQExpBuffer buf_json = &show_buf; + int8 format_console, + format_file; write_to_file = elevel >= logger_config.log_level_file && logger_config.log_directory @@ -283,6 +323,8 @@ elog_internal(int elevel, bool file_only, const char *message) write_to_error_log = elevel >= ERROR && logger_config.error_log_filename && logger_config.log_directory && logger_config.log_directory[0] != '\0'; write_to_stderr = elevel >= logger_config.log_level_console && !file_only; + format_console = logger_config.log_format_console; + format_file = logger_config.log_format_file; if (remote_agent) { @@ -292,10 +334,27 @@ elog_internal(int elevel, bool file_only, const char *message) pthread_lock(&log_file_mutex); loggin_in_progress = true; - if (write_to_file || write_to_error_log || is_archive_cmd) + if (write_to_file || write_to_error_log || is_archive_cmd || + format_console == JSON) strftime(strfbuf, sizeof(strfbuf), "%Y-%m-%d %H:%M:%S %Z", localtime(&log_time)); + if (format_file == JSON || format_console == JSON) + { + snprintf(str_pid_json, sizeof(str_pid_json), "%d", my_pid); + snprintf(str_thread_json, sizeof(str_thread_json), "[%d-1]", my_thread_num); + + initPQExpBuffer(&show_buf); + json_add_min(buf_json, JT_BEGIN_OBJECT); + json_add_value(buf_json, "ts", strfbuf, 0, true); + json_add_value(buf_json, "pid", str_pid_json, 0, true); + json_add_key(buf_json, "level", 0); + write_elevel_for_json(buf_json, elevel); + json_add_value(buf_json, "msg", message, 0, true); + json_add_value(buf_json, "my_thread_num", str_thread_json, 0, true); + json_add_min(buf_json, JT_END_OBJECT); + } + snprintf(str_pid, sizeof(str_pid), "[%d]:", my_pid); /* @@ -307,12 +366,18 @@ elog_internal(int elevel, bool file_only, const char *message) { if (log_file == NULL) open_logfile(&log_file, logger_config.log_filename ? logger_config.log_filename : LOG_FILENAME_DEFAULT); + if (format_file == JSON) + { + fputs(buf_json->data, log_file); + } + else + { + fprintf(log_file, "%s ", strfbuf); + fprintf(log_file, "%s ", str_pid); + write_elevel(log_file, elevel); - fprintf(log_file, "%s ", strfbuf); - fprintf(log_file, "%s ", str_pid); - write_elevel(log_file, elevel); - - fprintf(log_file, "%s\n", message); + fprintf(log_file, "%s\n", message); + } fflush(log_file); } @@ -326,11 +391,18 @@ elog_internal(int elevel, bool file_only, const char *message) if (error_log_file == NULL) open_logfile(&error_log_file, logger_config.error_log_filename); - fprintf(error_log_file, "%s ", strfbuf); - fprintf(error_log_file, "%s ", str_pid); - write_elevel(error_log_file, elevel); + if (format_file == JSON) + { + fputs(buf_json->data, error_log_file); + } + else + { + fprintf(error_log_file, "%s ", strfbuf); + fprintf(error_log_file, "%s ", str_pid); + write_elevel(error_log_file, elevel); - fprintf(error_log_file, "%s\n", message); + fprintf(error_log_file, "%s\n", message); + } fflush(error_log_file); } @@ -340,35 +412,47 @@ elog_internal(int elevel, bool file_only, const char *message) */ if (write_to_stderr) { - if (is_archive_cmd) + if (format_console == JSON) { - char str_thread[64]; - /* [Issue #213] fix pgbadger parsing */ - snprintf(str_thread, sizeof(str_thread), "[%d-1]:", my_thread_num); - - fprintf(stderr, "%s ", strfbuf); - fprintf(stderr, "%s ", str_pid); - fprintf(stderr, "%s ", str_thread); + fprintf(stderr, "%s", buf_json->data); } - else if (show_color) + else { - /* color WARNING and ERROR messages */ - if (elevel == WARNING) - fprintf(stderr, "%s", TC_YELLOW_BOLD); - else if (elevel == ERROR) - fprintf(stderr, "%s", TC_RED_BOLD); - } + if (is_archive_cmd) + { + char str_thread[64]; + /* [Issue #213] fix pgbadger parsing */ + snprintf(str_thread, sizeof(str_thread), "[%d-1]:", my_thread_num); - write_elevel(stderr, elevel); + fprintf(stderr, "%s ", strfbuf); + fprintf(stderr, "%s ", str_pid); + fprintf(stderr, "%s ", str_thread); + } + else if (show_color) + { + /* color WARNING and ERROR messages */ + if (elevel == WARNING) + fprintf(stderr, "%s", TC_YELLOW_BOLD); + else if (elevel == ERROR) + fprintf(stderr, "%s", TC_RED_BOLD); + } + + write_elevel(stderr, elevel); + + /* main payload */ + fprintf(stderr, "%s", message); - /* main payload */ - fprintf(stderr, "%s", message); + /* reset color to default */ + if (show_color && (elevel == WARNING || elevel == ERROR)) + fprintf(stderr, "%s", TC_RESET); - /* reset color to default */ - if (show_color && (elevel == WARNING || elevel == ERROR)) - fprintf(stderr, "%s", TC_RESET); + fprintf(stderr, "\n"); + } - fprintf(stderr, "\n"); + if (format_file == JSON || format_console == JSON) + { + termPQExpBuffer(buf_json); + } fflush(stderr); } @@ -386,7 +470,15 @@ elog_internal(int elevel, bool file_only, const char *message) static void elog_stderr(int elevel, const char *fmt, ...) { - va_list args; + va_list args; + PQExpBufferData show_buf; + PQExpBuffer buf_json = &show_buf; + time_t log_time = (time_t) time(NULL); + char strfbuf[128]; + char str_pid[128]; + char str_thread_json[64]; + char *message; + int8 format_console; /* * Do not log message if severity level is less than log_level. @@ -397,11 +489,37 @@ elog_stderr(int elevel, const char *fmt, ...) va_start(args, fmt); - write_elevel(stderr, elevel); - vfprintf(stderr, fmt, args); - fputc('\n', stderr); - fflush(stderr); + format_console = logger_config.log_format_console; + if (format_console == JSON) + { + strftime(strfbuf, sizeof(strfbuf), "%Y-%m-%d %H:%M:%S %Z", + localtime(&log_time)); + snprintf(str_pid, sizeof(str_pid), "%d", my_pid); + snprintf(str_thread_json, sizeof(str_thread_json), "[%d-1]", my_thread_num); + + initPQExpBuffer(&show_buf); + json_add_min(buf_json, JT_BEGIN_OBJECT); + json_add_value(buf_json, "ts", strfbuf, 0, true); + json_add_value(buf_json, "pid", str_pid, 0, true); + json_add_key(buf_json, "level", 0); + write_elevel_for_json(buf_json, elevel); + message = get_log_message(fmt, args); + json_add_value(buf_json, "msg", message, 0, true); + json_add_value(buf_json, "my_thread_num", str_thread_json, 0, true); + json_add_min(buf_json, JT_END_OBJECT); + fputs(buf_json->data, stderr); + pfree(message); + termPQExpBuffer(buf_json); + } + else + { + write_elevel(stderr, elevel); + vfprintf(stderr, fmt, args); + fputc('\n', stderr); + } + + fflush(stderr); va_end(args); exit_if_necessary(elevel); @@ -570,6 +688,36 @@ parse_log_level(const char *level) return 0; } +int +parse_log_format(const char *format) +{ + const char *v = format; + size_t len; + + if (v == NULL) + { + elog(ERROR, "log-format got invalid value"); + return 0; + } + + /* Skip all spaces detected */ + while (isspace((unsigned char)*v)) + v++; + len = strlen(v); + + if (len == 0) + elog(ERROR, "log-format is empty"); + + if (pg_strncasecmp("plain", v, len) == 0) + return PLAIN; + else if (pg_strncasecmp("json", v, len) == 0) + return JSON; + + /* Log format is invalid */ + elog(ERROR, "invalid log-format \"%s\"", format); + return 0; +} + /* * Converts integer representation of log level to string. */ @@ -599,6 +747,22 @@ deparse_log_level(int level) return NULL; } +const char * +deparse_log_format(int format) +{ + switch (format) + { + case PLAIN: + return "PLAIN"; + case JSON: + return "JSON"; + default: + elog(ERROR, "invalid log-format %d", format); + } + + return NULL; +} + /* * Construct logfile name using timestamp information. * diff --git a/src/utils/logger.h b/src/utils/logger.h index 6a7407e41..adc5061e0 100644 --- a/src/utils/logger.h +++ b/src/utils/logger.h @@ -21,6 +21,9 @@ #define ERROR 1 #define LOG_OFF 10 +#define PLAIN 0 +#define JSON 1 + typedef struct LoggerConfig { int log_level_console; @@ -32,6 +35,8 @@ typedef struct LoggerConfig uint64 log_rotation_size; /* Maximum lifetime of an individual log file in minutes */ uint64 log_rotation_age; + int8 log_format_console; + int8 log_format_file; } LoggerConfig; /* Logger parameters */ @@ -43,6 +48,9 @@ extern LoggerConfig logger_config; #define LOG_LEVEL_CONSOLE_DEFAULT INFO #define LOG_LEVEL_FILE_DEFAULT LOG_OFF +#define LOG_FORMAT_CONSOLE_DEFAULT PLAIN +#define LOG_FORMAT_FILE_DEFAULT PLAIN + #define LOG_FILENAME_DEFAULT "pg_probackup.log" #define LOG_DIRECTORY_DEFAULT "log" @@ -56,4 +64,6 @@ extern void init_console(void); extern int parse_log_level(const char *level); extern const char *deparse_log_level(int level); +extern int parse_log_format(const char *format); +extern const char *deparse_log_format(int format); #endif /* LOGGER_H */ diff --git a/tests/expected/option_help.out b/tests/expected/option_help.out index 659164250..5948d0503 100644 --- a/tests/expected/option_help.out +++ b/tests/expected/option_help.out @@ -12,6 +12,7 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database. [--external-dirs=external-directories-paths] [--log-level-console=log-level-console] [--log-level-file=log-level-file] + [--log-format-file=log-format-file] [--log-filename=log-filename] [--error-log-filename=error-log-filename] [--log-directory=log-directory] @@ -49,6 +50,8 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database. [--no-sync] [--log-level-console=log-level-console] [--log-level-file=log-level-file] + [--log-format-console=log-format-console] + [--log-format-file=log-format-file] [--log-filename=log-filename] [--error-log-filename=error-log-filename] [--log-directory=log-directory] diff --git a/tests/expected/option_help_ru.out b/tests/expected/option_help_ru.out index 2e90eb297..358c49428 100644 --- a/tests/expected/option_help_ru.out +++ b/tests/expected/option_help_ru.out @@ -12,6 +12,7 @@ pg_probackup - утилита для управления резервным к [--external-dirs=external-directories-paths] [--log-level-console=log-level-console] [--log-level-file=log-level-file] + [--log-format-file=log-format-file] [--log-filename=log-filename] [--error-log-filename=error-log-filename] [--log-directory=log-directory] @@ -49,6 +50,8 @@ pg_probackup - утилита для управления резервным к [--no-sync] [--log-level-console=log-level-console] [--log-level-file=log-level-file] + [--log-format-console=log-format-console] + [--log-format-file=log-format-file] [--log-filename=log-filename] [--error-log-filename=error-log-filename] [--log-directory=log-directory] From fffebdc0b38e70a0a0b20c1a0fa1e4b8ce71e820 Mon Sep 17 00:00:00 2001 From: Elena Indrupskaya Date: Wed, 17 Aug 2022 16:05:44 +0300 Subject: [PATCH 297/525] [DOC] [PGPRO-6635] Update pg_probackup documentation for version 2.5.7 [skip travis] --- doc/pgprobackup.xml | 68 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 68 insertions(+) diff --git a/doc/pgprobackup.xml b/doc/pgprobackup.xml index cb615fb17..fc2a341e8 100644 --- a/doc/pgprobackup.xml +++ b/doc/pgprobackup.xml @@ -3993,6 +3993,7 @@ pg_probackup restore -B backup_dir --instance cmdline] [--primary-conninfo=primary_conninfo] [-S | --primary-slot-name=slot_name] +[-X wal_dir | --waldir=wal_dir] [recovery_target_options] [logging_options] [remote_options] [partial_restore_options] [remote_wal_archive_options] @@ -4160,6 +4161,17 @@ pg_probackup restore -B backup_dir --instance + + + + + + + Specifies the directory where WAL should be stored. + + + + @@ -5185,6 +5197,60 @@ pg_probackup catchup -b catchup_mode + + + + Defines the format of the console log. Only set from the command line. Note that you cannot + specify this option in the pg_probackup.conf configuration file through + the command and that the + command also treats this option specified in the configuration file as an error. + Possible values are: + + + + + plain — sets the plain-text format of the console log. + + + + + json — sets the JSON format of the console log. + + + + + + Default: plain + + + + + + + + + Defines the format of log files used. Possible values are: + + + + + plain — sets the plain-text format of log files. + + + + + json — sets the JSON format of log files. + + + + + + Default: plain + + + + + @@ -6056,6 +6122,8 @@ archive-timeout = 5min # Logging parameters log-level-console = INFO log-level-file = OFF +log-format-console = PLAIN +log-format-file = PLAIN log-filename = pg_probackup.log log-rotation-size = 0 log-rotation-age = 0 From 03ad0d092aa7d5b751de2d9edd15ea7cb8c8417f Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Fri, 19 Aug 2022 11:44:58 +0300 Subject: [PATCH 298/525] Version 2.5.7 --- src/pg_probackup.h | 2 +- tests/expected/option_version.out | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/pg_probackup.h b/src/pg_probackup.h index e583d7745..8aad0a7cc 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -338,7 +338,7 @@ typedef enum ShowFormat #define BYTES_INVALID (-1) /* file didn`t changed since previous backup, DELTA backup do not rely on it */ #define FILE_NOT_FOUND (-2) /* file disappeared during backup */ #define BLOCKNUM_INVALID (-1) -#define PROGRAM_VERSION "2.5.6" +#define PROGRAM_VERSION "2.5.7" /* update when remote agent API or behaviour changes */ #define AGENT_PROTOCOL_VERSION 20501 diff --git a/tests/expected/option_version.out b/tests/expected/option_version.out index 96f0f3446..af186a98c 100644 --- a/tests/expected/option_version.out +++ b/tests/expected/option_version.out @@ -1 +1 @@ -pg_probackup 2.5.6 +pg_probackup 2.5.7 From a58c1831d6aef3514428b8981f6b478df18aca01 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Fri, 19 Aug 2022 15:50:17 +0300 Subject: [PATCH 299/525] [PGPRO-6635] Compatibility fix for older compilers --- src/pg_probackup.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/pg_probackup.c b/src/pg_probackup.c index 618705945..5867bd490 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -433,7 +433,8 @@ main(int argc, char *argv[]) if (backup_subcmd == SET_CONFIG_CMD) { - for (int i = 0; i < argc; i++) + int i; + for (i = 0; i < argc; i++) { if (strncmp("--log-format-console", argv[i], strlen("--log-format-console")) == 0) { From e918bae0dc7c5b4f8b5bb6748a47485c526e803f Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Fri, 26 Aug 2022 20:00:58 +0300 Subject: [PATCH 300/525] Version 2.5.8 --- src/pg_probackup.h | 2 +- tests/expected/option_version.out | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 8aad0a7cc..802cbb5c0 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -338,7 +338,7 @@ typedef enum ShowFormat #define BYTES_INVALID (-1) /* file didn`t changed since previous backup, DELTA backup do not rely on it */ #define FILE_NOT_FOUND (-2) /* file disappeared during backup */ #define BLOCKNUM_INVALID (-1) -#define PROGRAM_VERSION "2.5.7" +#define PROGRAM_VERSION "2.5.8" /* update when remote agent API or behaviour changes */ #define AGENT_PROTOCOL_VERSION 20501 diff --git a/tests/expected/option_version.out b/tests/expected/option_version.out index af186a98c..4de288907 100644 --- a/tests/expected/option_version.out +++ b/tests/expected/option_version.out @@ -1 +1 @@ -pg_probackup 2.5.7 +pg_probackup 2.5.8 From 3d7b9f0e807741a546f3d95eb48106e120a89bf7 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" <16117281+kulaginm@users.noreply.github.com> Date: Sun, 28 Aug 2022 00:39:33 +0300 Subject: [PATCH 301/525] =?UTF-8?q?[PBCKP-258]=20fix=20multiple=20permissi?= =?UTF-8?q?on=20tests=20(revert=20a3ac7d5e7a8d6ebeafd=E2=80=A6=20(#527)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .travis.yml | 2 ++ tests/backup.py | 58 +++++++++++++++--------------------------------- tests/checkdb.py | 31 +++++++++++++------------- tests/restore.py | 14 +++++------- 4 files changed, 41 insertions(+), 64 deletions(-) diff --git a/.travis.yml b/.travis.yml index 26b2bc4e2..a7dae2ed1 100644 --- a/.travis.yml +++ b/.travis.yml @@ -37,6 +37,7 @@ env: # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=archive # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=backup # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=catchup +# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=checkdb # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=compression # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=delta # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=locking @@ -52,6 +53,7 @@ env: jobs: allow_failures: - if: env(PG_BRANCH) = master + - if: env(PG_BRANCH) = REL_15_STABLE - if: env(PG_BRANCH) = REL9_5_STABLE # - if: env(MODE) IN (archive, backup, delta, locking, merge, replica, retention, restore) diff --git a/tests/backup.py b/tests/backup.py index 7d02f5b39..c5235120e 100644 --- a/tests/backup.py +++ b/tests/backup.py @@ -1889,8 +1889,7 @@ def test_backup_with_least_privileges_role(self): "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") # PG 9.6 elif self.get_version(node) > 90600 and self.get_version(node) < 100000: node.safe_psql( @@ -1928,9 +1927,7 @@ def test_backup_with_least_privileges_role(self): "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;" - ) + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") # >= 10 else: node.safe_psql( @@ -1967,9 +1964,7 @@ def test_backup_with_least_privileges_role(self): "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;" - ) + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") if self.ptrack: node.safe_psql( @@ -1982,10 +1977,10 @@ def test_backup_with_least_privileges_role(self): "GRANT EXECUTE ON FUNCTION ptrack.ptrack_init_lsn() TO backup;") if ProbackupTest.enterprise: - node.safe_psql( "backupdb", - "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup") + "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;") # FULL backup self.backup_node( @@ -2245,7 +2240,6 @@ def test_backup_with_less_privileges_role(self): if self.get_version(node) < 90600: node.safe_psql( 'backupdb', - "BEGIN; " "CREATE ROLE backup WITH LOGIN; " "GRANT USAGE ON SCHEMA pg_catalog TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " @@ -2256,14 +2250,11 @@ def test_backup_with_less_privileges_role(self): "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; " - "COMMIT;" - ) + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") # PG 9.6 elif self.get_version(node) > 90600 and self.get_version(node) < 100000: node.safe_psql( 'backupdb', - "BEGIN; " "CREATE ROLE backup WITH LOGIN; " "GRANT USAGE ON SCHEMA pg_catalog TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " @@ -2275,14 +2266,11 @@ def test_backup_with_less_privileges_role(self): "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; " - "COMMIT;" - ) + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") # >= 10 else: node.safe_psql( 'backupdb', - "BEGIN; " "CREATE ROLE backup WITH LOGIN; " "GRANT USAGE ON SCHEMA pg_catalog TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " @@ -2294,9 +2282,7 @@ def test_backup_with_less_privileges_role(self): "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; " - "COMMIT;" - ) + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") # enable STREAM backup node.safe_psql( @@ -3067,9 +3053,7 @@ def test_missing_replication_permission(self): "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;" - ) + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") # >= 10 else: node.safe_psql( @@ -3091,15 +3075,14 @@ def test_missing_replication_permission(self): "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;" - ) + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") if ProbackupTest.enterprise: node.safe_psql( "backupdb", - "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup") - + "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;") + sleep(2) replica.promote() @@ -3177,8 +3160,7 @@ def test_missing_replication_permission_1(self): "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") # PG 9.6 elif self.get_version(node) > 90600 and self.get_version(node) < 100000: node.safe_psql( @@ -3201,9 +3183,7 @@ def test_missing_replication_permission_1(self): "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;" - ) + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") # >= 10 else: node.safe_psql( @@ -3225,15 +3205,13 @@ def test_missing_replication_permission_1(self): "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;" - - ) + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") if ProbackupTest.enterprise: node.safe_psql( "backupdb", - "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup") + "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;") replica.promote() diff --git a/tests/checkdb.py b/tests/checkdb.py index 2df946cf6..bcda0fb23 100644 --- a/tests/checkdb.py +++ b/tests/checkdb.py @@ -666,8 +666,8 @@ def test_checkdb_with_least_privileges(self): 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.string_to_array(text, text) TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.array_position(anyarray, anyelement) TO backup; ' - 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool) TO backup;' # amcheck-next function - ) + 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool) TO backup;') # amcheck-next function + # PG 9.6 elif self.get_version(node) > 90600 and self.get_version(node) < 100000: node.safe_psql( @@ -696,9 +696,8 @@ def test_checkdb_with_least_privileges(self): 'GRANT EXECUTE ON FUNCTION pg_catalog.string_to_array(text, text) TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.array_position(anyarray, anyelement) TO backup; ' # 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass) TO backup; ' - 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool) TO backup;' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup; ' - ) + 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool) TO backup;') + # PG 10 elif self.get_version(node) > 100000 and self.get_version(node) < 110000: node.safe_psql( @@ -726,10 +725,8 @@ def test_checkdb_with_least_privileges(self): 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.string_to_array(text, text) TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.array_position(anyarray, anyelement) TO backup;' - 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass) TO backup; ' - 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool) TO backup;' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup; ' - ) + 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass) TO backup;') + if ProbackupTest.enterprise: # amcheck-1.1 node.safe_psql( @@ -768,9 +765,8 @@ def test_checkdb_with_least_privileges(self): 'GRANT EXECUTE ON FUNCTION pg_catalog.string_to_array(text, text) TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.array_position(anyarray, anyelement) TO backup; ' 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass) TO backup; ' - 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool) TO backup;' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup; ' - ) + 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool) TO backup;') + # checkunique parameter if ProbackupTest.enterprise: if (self.get_version(node) >= 111300 and self.get_version(node) < 120000 @@ -807,15 +803,20 @@ def test_checkdb_with_least_privileges(self): 'GRANT EXECUTE ON FUNCTION pg_catalog.string_to_array(text, text) TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.array_position(anycompatiblearray, anycompatible) TO backup; ' 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass) TO backup; ' - 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool) TO backup;' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup; ' - ) + 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool) TO backup;') + # checkunique parameter if ProbackupTest.enterprise: node.safe_psql( "backupdb", "GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool, bool) TO backup") + if ProbackupTest.enterprise: + node.safe_psql( + 'backupdb', + 'GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;') + # checkdb try: self.checkdb_node( diff --git a/tests/restore.py b/tests/restore.py index 37f133573..b619078d5 100644 --- a/tests/restore.py +++ b/tests/restore.py @@ -3230,8 +3230,7 @@ def test_missing_database_map(self): "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") # PG 9.6 elif self.get_version(node) > 90600 and self.get_version(node) < 100000: node.safe_psql( @@ -3269,9 +3268,7 @@ def test_missing_database_map(self): "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;" - ) + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") # >= 10 else: node.safe_psql( @@ -3307,9 +3304,7 @@ def test_missing_database_map(self): "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;" - ) + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") if self.ptrack: # TODO why backup works without these grants ? @@ -3326,7 +3321,8 @@ def test_missing_database_map(self): node.safe_psql( "backupdb", - "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup") + "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;") # FULL backup without database_map backup_id = self.backup_node( From 7d3e7f864c0e373cc766df62ecef420dfef87bad Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Mon, 29 Aug 2022 05:04:05 +0300 Subject: [PATCH 302/525] [PBCKP-129] fix tests.replica.ReplicaTest.test_replica_stop_lsn_null_offset_next_record --- tests/replica.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/replica.py b/tests/replica.py index acf655aac..24dbaa39e 100644 --- a/tests/replica.py +++ b/tests/replica.py @@ -806,7 +806,7 @@ def test_replica_stop_lsn_null_offset_next_record(self): log_content) self.assertIn( - 'LOG: stop_lsn: 0/4000000', + 'INFO: stop_lsn: 0/4000000', log_content) self.assertTrue(self.show_pb(backup_dir, 'replica')[0]['status'] == 'DONE') From 95471acc75434834881a260f6e5299c1822bfcad Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" <16117281+kulaginm@users.noreply.github.com> Date: Tue, 30 Aug 2022 12:42:47 +0300 Subject: [PATCH 303/525] [PBCKP-257] fix time_consuming.TimeConsumingTests.test_pbckp150 (#525) --- tests/time_consuming.py | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/tests/time_consuming.py b/tests/time_consuming.py index 396ab716e..c778b9bc3 100644 --- a/tests/time_consuming.py +++ b/tests/time_consuming.py @@ -15,22 +15,28 @@ def test_pbckp150(self): run pgbench, vacuum VERBOSE FULL and ptrack backups in parallel """ # init node + if self.pg_config_version < self.version_to_num('11.0'): + return unittest.skip('You need PostgreSQL >= 11 for this test') + if not self.ptrack: + return unittest.skip('Skipped because ptrack support is disabled') + fname = self.id().split('.')[3] node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums']) - node.append_conf('postgresql.conf', - """ - max_connections = 100 - wal_keep_size = 16000 - ptrack.map_size = 1 - shared_preload_libraries='ptrack' - log_statement = 'none' - fsync = off - log_checkpoints = on - autovacuum = off - """) + ptrack_enable=self.ptrack, + initdb_params=['--data-checksums'], + pg_options={ + 'max_connections': 100, + 'log_statement': 'none', + 'log_checkpoints': 'on', + 'autovacuum': 'off', + 'ptrack.map_size': 1}) + + if node.major_version >= 13: + self.set_auto_conf(node, {'wal_keep_size': '16000MB'}) + else: + self.set_auto_conf(node, {'wal_keep_segments': '1000'}) # init probackup and add an instance backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') From 1bb07627295ba44ed059d1d700db01d3a9799915 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" <16117281+kulaginm@users.noreply.github.com> Date: Thu, 1 Sep 2022 14:38:17 +0300 Subject: [PATCH 304/525] =?UTF-8?q?[PBCKP-259]=20fix=20for=20'ERROR:=20Can?= =?UTF-8?q?not=20create=20directory=20for=20older=20backup'=E2=80=A6=20(#5?= =?UTF-8?q?26)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [PBCKP-259] fix for 'ERROR: Cannot create directory for older backup', rewrite --start_time implementation * rewritten 5f2283c8deac88ea49ea6223a3aa72e2cf462eb5 * fixes for several tests * disabled tests.merge.MergeTest.test_merge_backup_from_future and tests.restore.RestoreTest.test_restore_backup_from_future as incorrect for now Co-authored-by: d.lepikhova --- src/backup.c | 60 +++++- src/catalog.c | 98 ++++----- src/pg_probackup.c | 8 +- src/pg_probackup.h | 7 +- tests/backup.py | 364 ++++++++------------------------ tests/helpers/ptrack_helpers.py | 25 ++- tests/merge.py | 6 +- tests/restore.py | 6 +- 8 files changed, 217 insertions(+), 357 deletions(-) diff --git a/src/backup.c b/src/backup.c index 9a451e72a..03ff7b72b 100644 --- a/src/backup.c +++ b/src/backup.c @@ -692,6 +692,8 @@ pgdata_basic_setup(ConnectionOptions conn_opt, PGNodeInfo *nodeInfo) /* * Entry point of pg_probackup BACKUP subcommand. + * + * if start_time == INVALID_BACKUP_ID then we can generate backup_id */ int do_backup(InstanceState *instanceState, pgSetBackupParams *set_backup_params, @@ -699,8 +701,13 @@ do_backup(InstanceState *instanceState, pgSetBackupParams *set_backup_params, { PGconn *backup_conn = NULL; PGNodeInfo nodeInfo; + time_t latest_backup_id = INVALID_BACKUP_ID; char pretty_bytes[20]; + if (!instance_config.pgdata) + elog(ERROR, "required parameter not specified: PGDATA " + "(-D, --pgdata)"); + /* Initialize PGInfonode */ pgNodeInit(&nodeInfo); @@ -709,12 +716,55 @@ do_backup(InstanceState *instanceState, pgSetBackupParams *set_backup_params, (pg_strcasecmp(instance_config.external_dir_str, "none") != 0)) current.external_dir_str = instance_config.external_dir_str; - /* Create backup directory and BACKUP_CONTROL_FILE */ - pgBackupCreateDir(¤t, instanceState, start_time); + /* Find latest backup_id */ + { + parray *backup_list = catalog_get_backup_list(instanceState, INVALID_BACKUP_ID); - if (!instance_config.pgdata) - elog(ERROR, "required parameter not specified: PGDATA " - "(-D, --pgdata)"); + if (parray_num(backup_list) > 0) + latest_backup_id = ((pgBackup *)parray_get(backup_list, 0))->backup_id; + + parray_walk(backup_list, pgBackupFree); + parray_free(backup_list); + } + + /* Try to pick backup_id and create backup directory with BACKUP_CONTROL_FILE */ + if (start_time != INVALID_BACKUP_ID) + { + /* If user already choosed backup_id for us, then try to use it. */ + if (start_time <= latest_backup_id) + /* don't care about freeing base36enc_dup memory, we exit anyway */ + elog(ERROR, "Can't assign backup_id from requested start_time (%s), " + "this time must be later that backup %s", + base36enc_dup(start_time), base36enc_dup(latest_backup_id)); + + current.backup_id = start_time; + pgBackupInitDir(¤t, instanceState->instance_backup_subdir_path); + } + else + { + /* We can generate our own unique backup_id + * Sometimes (when we try to backup twice in one second) + * backup_id will be duplicated -> try more times. + */ + int attempts = 10; + + if (time(NULL) < latest_backup_id) + elog(ERROR, "Can't assign backup_id, there is already a backup in future (%s)", + base36enc(latest_backup_id)); + + do + { + current.backup_id = time(NULL); + pgBackupInitDir(¤t, instanceState->instance_backup_subdir_path); + if (current.backup_id == INVALID_BACKUP_ID) + sleep(1); + } + while (current.backup_id == INVALID_BACKUP_ID && attempts-- > 0); + } + + /* If creation of backup dir was unsuccessful, there will be WARNINGS in logs already */ + if (current.backup_id == INVALID_BACKUP_ID) + elog(ERROR, "Can't create backup directory"); /* Update backup status and other metainfo. */ current.status = BACKUP_STATUS_RUNNING; diff --git a/src/catalog.c b/src/catalog.c index c118e954a..47513096c 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -23,7 +23,7 @@ static pgBackup* get_closest_backup(timelineInfo *tlinfo); static pgBackup* get_oldest_backup(timelineInfo *tlinfo); static const char *backupModes[] = {"", "PAGE", "PTRACK", "DELTA", "FULL"}; static pgBackup *readBackupControlFile(const char *path); -static void create_backup_dir(pgBackup *backup, const char *backup_instance_path); +static int create_backup_dir(pgBackup *backup, const char *backup_instance_path); static bool backup_lock_exit_hook_registered = false; static parray *locks = NULL; @@ -969,6 +969,7 @@ catalog_get_backup_list(InstanceState *instanceState, time_t requested_backup_id } else if (strcmp(base36enc(backup->start_time), data_ent->d_name) != 0) { + /* TODO there is no such guarantees */ elog(WARNING, "backup ID in control file \"%s\" doesn't match name of the backup folder \"%s\"", base36enc(backup->start_time), backup_conf_path); } @@ -1411,22 +1412,34 @@ get_multi_timeline_parent(parray *backup_list, parray *tli_list, return NULL; } -/* Create backup directory in $BACKUP_PATH - * Note, that backup_id attribute is updated, - * so it is possible to get diffrent values in +/* + * Create backup directory in $BACKUP_PATH + * (with proposed backup->backup_id) + * and initialize this directory. + * If creation of directory fails, then + * backup_id will be cleared (set to INVALID_BACKUP_ID). + * It is possible to get diffrent values in * pgBackup.start_time and pgBackup.backup_id. * It may be ok or maybe not, so it's up to the caller * to fix it or let it be. */ void -pgBackupCreateDir(pgBackup *backup, InstanceState *instanceState, time_t start_time) +pgBackupInitDir(pgBackup *backup, const char *backup_instance_path) { - int i; - parray *subdirs = parray_new(); - parray * backups; - pgBackup *target_backup; + int i; + char temp[MAXPGPATH]; + parray *subdirs; + /* Try to create backup directory at first */ + if (create_backup_dir(backup, backup_instance_path) != 0) + { + /* Clear backup_id as indication of error */ + backup->backup_id = INVALID_BACKUP_ID; + return; + } + + subdirs = parray_new(); parray_append(subdirs, pg_strdup(DATABASE_DIR)); /* Add external dirs containers */ @@ -1438,7 +1451,6 @@ pgBackupCreateDir(pgBackup *backup, InstanceState *instanceState, time_t start_t false); for (i = 0; i < parray_num(external_list); i++) { - char temp[MAXPGPATH]; /* Numeration of externaldirs starts with 1 */ makeExternalDirPathByNum(temp, EXTERNAL_DIR, i+1); parray_append(subdirs, pg_strdup(temp)); @@ -1446,30 +1458,6 @@ pgBackupCreateDir(pgBackup *backup, InstanceState *instanceState, time_t start_t free_dir_list(external_list); } - /* Get list of all backups*/ - backups = catalog_get_backup_list(instanceState, INVALID_BACKUP_ID); - if (parray_num(backups) > 0) - { - target_backup = (pgBackup *) parray_get(backups, 0); - if (start_time > target_backup->backup_id) - { - backup->backup_id = start_time; - create_backup_dir(backup, instanceState->instance_backup_subdir_path); - } - else - { - elog(ERROR, "Cannot create directory for older backup"); - } - } - else - { - backup->backup_id = start_time; - create_backup_dir(backup, instanceState->instance_backup_subdir_path); - } - - if (backup->backup_id == 0) - elog(ERROR, "Cannot create backup directory: %s", strerror(errno)); - backup->database_dir = pgut_malloc(MAXPGPATH); join_path_components(backup->database_dir, backup->root_dir, DATABASE_DIR); @@ -1479,10 +1467,8 @@ pgBackupCreateDir(pgBackup *backup, InstanceState *instanceState, time_t start_t /* create directories for actual backup files */ for (i = 0; i < parray_num(subdirs); i++) { - char path[MAXPGPATH]; - - join_path_components(path, backup->root_dir, parray_get(subdirs, i)); - fio_mkdir(path, DIR_PERMISSION, FIO_BACKUP_HOST); + join_path_components(temp, backup->root_dir, parray_get(subdirs, i)); + fio_mkdir(temp, DIR_PERMISSION, FIO_BACKUP_HOST); } free_dir_list(subdirs); @@ -1491,34 +1477,26 @@ pgBackupCreateDir(pgBackup *backup, InstanceState *instanceState, time_t start_t /* * Create root directory for backup, * update pgBackup.root_dir if directory creation was a success + * Return values (same as dir_create_dir()): + * 0 - ok + * -1 - error (warning message already emitted) */ -void +int create_backup_dir(pgBackup *backup, const char *backup_instance_path) { - int attempts = 10; + int rc; + char path[MAXPGPATH]; - while (attempts--) - { - int rc; - char path[MAXPGPATH]; - - join_path_components(path, backup_instance_path, base36enc(backup->backup_id)); + join_path_components(path, backup_instance_path, base36enc(backup->backup_id)); - /* TODO: add wrapper for remote mode */ - rc = dir_create_dir(path, DIR_PERMISSION, true); - - if (rc == 0) - { - backup->root_dir = pgut_strdup(path); - return; - } - else - { - elog(WARNING, "Cannot create directory \"%s\": %s", path, strerror(errno)); - sleep(1); - } - } + /* TODO: add wrapper for remote mode */ + rc = dir_create_dir(path, DIR_PERMISSION, true); + if (rc == 0) + backup->root_dir = pgut_strdup(path); + else + elog(WARNING, "Cannot create directory \"%s\": %s", path, strerror(errno)); + return rc; } /* diff --git a/src/pg_probackup.c b/src/pg_probackup.c index 5867bd490..1f6b6313e 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -78,7 +78,7 @@ pid_t my_pid = 0; __thread int my_thread_num = 1; bool progress = false; bool no_sync = false; -time_t start_time = 0; +time_t start_time = INVALID_BACKUP_ID; #if PG_VERSION_NUM >= 100000 char *replication_slot = NULL; bool temp_slot = false; @@ -202,7 +202,6 @@ static ConfigOption cmd_options[] = { 's', 'i', "backup-id", &backup_id_string, SOURCE_CMD_STRICT }, { 'b', 133, "no-sync", &no_sync, SOURCE_CMD_STRICT }, { 'b', 134, "no-color", &no_color, SOURCE_CMD_STRICT }, - { 'U', 241, "start-time", &start_time, SOURCE_CMD_STRICT }, /* backup options */ { 'b', 180, "backup-pg-log", &backup_logs, SOURCE_CMD_STRICT }, { 'f', 'b', "backup-mode", opt_backup_mode, SOURCE_CMD_STRICT }, @@ -217,6 +216,7 @@ static ConfigOption cmd_options[] = { 'b', 184, "merge-expired", &merge_expired, SOURCE_CMD_STRICT }, { 'b', 185, "dry-run", &dry_run, SOURCE_CMD_STRICT }, { 's', 238, "note", &backup_note, SOURCE_CMD_STRICT }, + { 'U', 241, "start-time", &start_time, SOURCE_CMD_STRICT }, /* catchup options */ { 's', 239, "source-pgdata", &catchup_source_pgdata, SOURCE_CMD_STRICT }, { 's', 240, "destination-pgdata", &catchup_destination_pgdata, SOURCE_CMD_STRICT }, @@ -975,9 +975,7 @@ main(int argc, char *argv[]) case BACKUP_CMD: { current.stream = stream_wal; - if (start_time == 0) - start_time = current_time; - else + if (start_time != INVALID_BACKUP_ID) elog(WARNING, "Please do not use the --start-time option to start backup. " "This is a service option required to work with other extensions. " "We do not guarantee future support for this flag."); diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 802cbb5c0..1885a191e 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -450,7 +450,10 @@ struct pgBackup { BackupMode backup_mode; /* Mode - one of BACKUP_MODE_xxx above*/ time_t backup_id; /* Identifier of the backup. - * Currently it's the same as start_time */ + * By default it's the same as start_time + * but can be increased if same backup_id + * already exists. It can be also set by + * start_time parameter */ BackupStatus status; /* Status - one of BACKUP_STATUS_xxx above*/ TimeLineID tli; /* timeline of start and stop backup lsns */ XLogRecPtr start_lsn; /* backup's starting transaction log location */ @@ -985,7 +988,7 @@ extern void write_backup_filelist(pgBackup *backup, parray *files, const char *root, parray *external_list, bool sync); -extern void pgBackupCreateDir(pgBackup *backup, InstanceState *instanceState, time_t start_time); +extern void pgBackupInitDir(pgBackup *backup, const char *backup_instance_path); extern void pgNodeInit(PGNodeInfo *node); extern void pgBackupInit(pgBackup *backup); extern void pgBackupFree(void *backup); diff --git a/tests/backup.py b/tests/backup.py index c5235120e..0cba8fe79 100644 --- a/tests/backup.py +++ b/tests/backup.py @@ -1,7 +1,7 @@ import unittest import os from time import sleep, time -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException +from .helpers.ptrack_helpers import base36enc, ProbackupTest, ProbackupException import shutil from distutils.dir_util import copy_tree from testgres import ProcessType, QueryException @@ -313,7 +313,7 @@ def test_backup_detect_corruption(self): self.set_archiving(backup_dir, 'node', node) node.slow_start() - if self.ptrack and node.major_version > 11: + if self.ptrack: node.safe_psql( "postgres", "create extension ptrack") @@ -459,7 +459,7 @@ def test_backup_detect_invalid_block_header(self): self.set_archiving(backup_dir, 'node', node) node.slow_start() - if self.ptrack and node.major_version > 11: + if self.ptrack: node.safe_psql( "postgres", "create extension ptrack") @@ -600,7 +600,7 @@ def test_backup_detect_missing_permissions(self): self.set_archiving(backup_dir, 'node', node) node.slow_start() - if self.ptrack and node.major_version > 11: + if self.ptrack: node.safe_psql( "postgres", "create extension ptrack") @@ -3402,10 +3402,11 @@ def test_pg_stop_backup_missing_permissions(self): # @unittest.skip("skip") def test_start_time(self): - + """Test, that option --start-time allows to set backup_id and restore""" fname = self.id().split('.')[3] node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), + set_replication=True, ptrack_enable=self.ptrack, initdb_params=['--data-checksums']) @@ -3418,138 +3419,81 @@ def test_start_time(self): # FULL backup startTime = int(time()) self.backup_node( - backup_dir, 'node', node, backup_type="full", - options=['--stream', '--start-time', str(startTime)]) - - # DELTA backup - startTime = int(time()) - self.backup_node( - backup_dir, 'node', node, backup_type="delta", - options=['--stream', '--start-time', str(startTime)]) - - # PAGE backup - startTime = int(time()) - self.backup_node( - backup_dir, 'node', node, backup_type="page", - options=['--stream', '--start-time', str(startTime)]) - - if self.ptrack and node.major_version > 11: - node.safe_psql( - "postgres", - "create extension ptrack") - - # PTRACK backup - startTime = int(time()) - self.backup_node( - backup_dir, 'node', node, backup_type="ptrack", - options=['--stream', '--start-time', str(startTime)]) - - # Clean after yourself - self.del_test_dir(module_name, fname) - - # @unittest.skip("skip") - def test_start_time_incorrect_time(self): - - fname = self.id().split('.')[3] - node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), - ptrack_enable=self.ptrack, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() + backup_dir, 'node', node, backup_type='full', + options=['--stream', '--start-time={0}'.format(str(startTime))]) + # restore FULL backup by backup_id calculated from start-time + self.restore_node( + backup_dir, 'node', + data_dir=os.path.join(self.tmp_path, module_name, fname, 'node_restored_full'), + backup_id=base36enc(startTime)) - startTime = int(time()) - #backup with correct start time - self.backup_node( - backup_dir, 'node', node, - options=['--stream', '--start-time', str(startTime)]) - #backups with incorrect start time + #FULL backup with incorrect start time try: + startTime = str(int(time()-100000)) self.backup_node( - backup_dir, 'node', node, backup_type="full", - options=['--stream', '--start-time', str(startTime-10000)]) + backup_dir, 'node', node, backup_type='full', + options=['--stream', '--start-time={0}'.format(startTime)]) # we should die here because exception is what we expect to happen self.assertEqual( 1, 0, - "Expecting Error because start time for new backup must be newer " - "\n Output: {0} \n CMD: {1}".format( + 'Expecting Error because start time for new backup must be newer ' + '\n Output: {0} \n CMD: {1}'.format( repr(self.output), self.cmd)) except ProbackupException as e: self.assertRegex( e.message, - "ERROR: Cannot create directory for older backup", + r"ERROR: Can't assign backup_id from requested start_time \(\w*\), this time must be later that backup \w*\n", "\n Unexpected Error Message: {0}\n CMD: {1}".format( repr(e.message), self.cmd)) - try: - self.backup_node( - backup_dir, 'node', node, backup_type="delta", - options=['--stream', '--start-time', str(startTime-10000)]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because start time for new backup must be newer " - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertRegex( - e.message, - "ERROR: Cannot create directory for older backup", - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) + # DELTA backup + startTime = int(time()) + self.backup_node( + backup_dir, 'node', node, backup_type='delta', + options=['--stream', '--start-time={0}'.format(str(startTime))]) + # restore DELTA backup by backup_id calculated from start-time + self.restore_node( + backup_dir, 'node', + data_dir=os.path.join(self.tmp_path, module_name, fname, 'node_restored_delta'), + backup_id=base36enc(startTime)) - try: - self.backup_node( - backup_dir, 'node', node, backup_type="page", - options=['--stream', '--start-time', str(startTime-10000)]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because start time for new backup must be newer " - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertRegex( - e.message, - "ERROR: Cannot create directory for older backup", - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) + # PAGE backup + startTime = int(time()) + self.backup_node( + backup_dir, 'node', node, backup_type='page', + options=['--stream', '--start-time={0}'.format(str(startTime))]) + # restore PAGE backup by backup_id calculated from start-time + self.restore_node( + backup_dir, 'node', + data_dir=os.path.join(self.tmp_path, module_name, fname, 'node_restored_page'), + backup_id=base36enc(startTime)) - if self.ptrack and node.major_version > 11: + # PTRACK backup + if self.ptrack: node.safe_psql( - "postgres", - "create extension ptrack") + 'postgres', + 'create extension ptrack') - try: - self.backup_node( - backup_dir, 'node', node, backup_type="page", - options=['--stream', '--start-time', str(startTime-10000)]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because start time for new backup must be newer " - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertRegex( - e.message, - "ERROR: Cannot create directory for older backup", - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) + startTime = int(time()) + self.backup_node( + backup_dir, 'node', node, backup_type='ptrack', + options=['--stream', '--start-time={0}'.format(str(startTime))]) + # restore PTRACK backup by backup_id calculated from start-time + self.restore_node( + backup_dir, 'node', + data_dir=os.path.join(self.tmp_path, module_name, fname, 'node_restored_ptrack'), + backup_id=base36enc(startTime)) # Clean after yourself self.del_test_dir(module_name, fname) # @unittest.skip("skip") def test_start_time_few_nodes(self): - + """Test, that we can synchronize backup_id's for different DBs""" fname = self.id().split('.')[3] node1 = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node1'), + set_replication=True, ptrack_enable=self.ptrack, initdb_params=['--data-checksums']) @@ -3561,6 +3505,7 @@ def test_start_time_few_nodes(self): node2 = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node2'), + set_replication=True, ptrack_enable=self.ptrack, initdb_params=['--data-checksums']) @@ -3571,200 +3516,61 @@ def test_start_time_few_nodes(self): node2.slow_start() # FULL backup - startTime = int(time()) + startTime = str(int(time())) self.backup_node( - backup_dir1, 'node1', node1, backup_type="full", - options=['--stream', '--start-time', str(startTime)]) + backup_dir1, 'node1', node1, backup_type='full', + options=['--stream', '--start-time={0}'.format(startTime)]) self.backup_node( - backup_dir2, 'node2', node2, backup_type="full", - options=['--stream', '--start-time', str(startTime)]) - + backup_dir2, 'node2', node2, backup_type='full', + options=['--stream', '--start-time={0}'.format(startTime)]) show_backup1 = self.show_pb(backup_dir1, 'node1')[0] show_backup2 = self.show_pb(backup_dir2, 'node2')[0] self.assertEqual(show_backup1['id'], show_backup2['id']) # DELTA backup - startTime = int(time()) + startTime = str(int(time())) self.backup_node( - backup_dir1, 'node1', node1, backup_type="delta", - options=['--stream', '--start-time', str(startTime)]) + backup_dir1, 'node1', node1, backup_type='delta', + options=['--stream', '--start-time={0}'.format(startTime)]) self.backup_node( - backup_dir2, 'node2', node2, backup_type="delta", - options=['--stream', '--start-time', str(startTime)]) + backup_dir2, 'node2', node2, backup_type='delta', + options=['--stream', '--start-time={0}'.format(startTime)]) show_backup1 = self.show_pb(backup_dir1, 'node1')[1] show_backup2 = self.show_pb(backup_dir2, 'node2')[1] self.assertEqual(show_backup1['id'], show_backup2['id']) # PAGE backup - startTime = int(time()) + startTime = str(int(time())) self.backup_node( - backup_dir1, 'node1', node1, backup_type="page", - options=['--stream', '--start-time', str(startTime)]) + backup_dir1, 'node1', node1, backup_type='page', + options=['--stream', '--start-time={0}'.format(startTime)]) self.backup_node( - backup_dir2, 'node2', node2, backup_type="page", - options=['--stream', '--start-time', str(startTime)]) + backup_dir2, 'node2', node2, backup_type='page', + options=['--stream', '--start-time={0}'.format(startTime)]) show_backup1 = self.show_pb(backup_dir1, 'node1')[2] show_backup2 = self.show_pb(backup_dir2, 'node2')[2] self.assertEqual(show_backup1['id'], show_backup2['id']) # PTRACK backup - startTime = int(time()) - if self.ptrack and node1.major_version > 11: + if self.ptrack: node1.safe_psql( - "postgres", - "create extension ptrack") - self.backup_node( - backup_dir1, 'node1', node1, backup_type="ptrack", - options=['--stream', '--start-time', str(startTime)]) - - if self.ptrack and node2.major_version > 11: + 'postgres', + 'create extension ptrack') node2.safe_psql( - "postgres", - "create extension ptrack") - self.backup_node( - backup_dir2, 'node2', node2, backup_type="ptrack", - options=['--stream', '--start-time', str(startTime)]) - show_backup1 = self.show_pb(backup_dir1, 'node1')[3] - show_backup2 = self.show_pb(backup_dir2, 'node2')[3] - self.assertEqual(show_backup1['id'], show_backup2['id']) - - # Clean after yourself - self.del_test_dir(module_name, fname) - - # @unittest.skip("skip") - def test_start_time_few_nodes_incorrect_time(self): - - fname = self.id().split('.')[3] - node1 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node1'), - ptrack_enable=self.ptrack, - initdb_params=['--data-checksums']) - - backup_dir1 = os.path.join(self.tmp_path, module_name, fname, 'backup1') - self.init_pb(backup_dir1) - self.add_instance(backup_dir1, 'node1', node1) - self.set_archiving(backup_dir1, 'node1', node1) - node1.slow_start() - - node2 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node2'), - ptrack_enable=self.ptrack, - initdb_params=['--data-checksums']) - - backup_dir2 = os.path.join(self.tmp_path, module_name, fname, 'backup2') - self.init_pb(backup_dir2) - self.add_instance(backup_dir2, 'node2', node2) - self.set_archiving(backup_dir2, 'node2', node2) - node2.slow_start() - - # FULL backup - startTime = int(time()) - self.backup_node( - backup_dir1, 'node1', node1, backup_type="full", - options=['--stream', '--start-time', str(startTime)]) - self.backup_node( - backup_dir2, 'node2', node2, backup_type="full", - options=['--stream', '--start-time', str(startTime-10000)]) - - show_backup1 = self.show_pb(backup_dir1, 'node1')[0] - show_backup2 = self.show_pb(backup_dir2, 'node2')[0] - self.assertGreater(show_backup1['id'], show_backup2['id']) - - # DELTA backup - startTime = int(time()) - self.backup_node( - backup_dir1, 'node1', node1, backup_type="delta", - options=['--stream', '--start-time', str(startTime)]) - # make backup with start time definitelly earlier, than existing - try: - self.backup_node( - backup_dir2, 'node2', node2, backup_type="delta", - options=['--stream', '--start-time', str(10000)]) - self.assertEqual( - 1, 0, - "Expecting Error because start time for new backup must be newer " - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertRegex( - e.message, - "ERROR: Cannot create directory for older backup", - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - show_backup1 = self.show_pb(backup_dir1, 'node1')[1] - show_backup2 = self.show_pb(backup_dir2, 'node2')[0] - self.assertGreater(show_backup1['id'], show_backup2['id']) + 'postgres', + 'create extension ptrack') - # PAGE backup - startTime = int(time()) - self.backup_node( - backup_dir1, 'node1', node1, backup_type="page", - options=['--stream', '--start-time', str(startTime)]) - # make backup with start time definitelly earlier, than existing - try: + startTime = str(int(time())) self.backup_node( - backup_dir2, 'node2', node2, backup_type="page", - options=['--stream', '--start-time', str(10000)]) - self.assertEqual( - 1, 0, - "Expecting Error because start time for new backup must be newer " - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertRegex( - e.message, - "ERROR: Cannot create directory for older backup", - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - show_backup1 = self.show_pb(backup_dir1, 'node1')[2] - show_backup2 = self.show_pb(backup_dir2, 'node2')[0] - self.assertGreater(show_backup1['id'], show_backup2['id']) - - # PTRACK backup - startTime = int(time()) - if self.ptrack and node1.major_version > 11: - node1.safe_psql( - "postgres", - "create extension ptrack") + backup_dir1, 'node1', node1, backup_type='ptrack', + options=['--stream', '--start-time={0}'.format(startTime)]) self.backup_node( - backup_dir1, 'node1', node1, backup_type="ptrack", - options=['--stream', '--start-time', str(startTime)]) - - if self.ptrack and node2.major_version > 11: - node2.safe_psql( - "postgres", - "create extension ptrack") - # make backup with start time definitelly earlier, than existing - try: - self.backup_node( - backup_dir2, 'node2', node2, backup_type="ptrack", - options=['--stream', '--start-time', str(10000)]) - self.assertEqual( - 1, 0, - "Expecting Error because start time for new backup must be newer " - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertRegex( - e.message, - "ERROR: Cannot create directory for older backup", - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - # FULL backup - startTime = int(time()) - self.backup_node( - backup_dir1, 'node1', node1, backup_type="full", - options=['--stream', '--start-time', str(startTime)]) - self.backup_node( - backup_dir2, 'node2', node2, backup_type="full", - options=['--stream', '--start-time', str(startTime)]) - - show_backup1 = self.show_pb(backup_dir1, 'node1')[4] - show_backup2 = self.show_pb(backup_dir2, 'node2')[1] - self.assertEqual(show_backup1['id'], show_backup2['id']) + backup_dir2, 'node2', node2, backup_type='ptrack', + options=['--stream', '--start-time={0}'.format(startTime)]) + show_backup1 = self.show_pb(backup_dir1, 'node1')[3] + show_backup2 = self.show_pb(backup_dir2, 'node2')[3] + self.assertEqual(show_backup1['id'], show_backup2['id']) # Clean after yourself self.del_test_dir(module_name, fname) + diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index 418ef4e17..59eb12aec 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -110,6 +110,26 @@ def is_nls_enabled(): return b'enable-nls' in p.communicate()[0] +def base36enc(number): + """Converts an integer to a base36 string.""" + alphabet = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ' + base36 = '' + sign = '' + + if number < 0: + sign = '-' + number = -number + + if 0 <= number < len(alphabet): + return sign + alphabet[number] + + while number != 0: + number, i = divmod(number, len(alphabet)) + base36 = alphabet[i] + base36 + + return sign + base36 + + class ProbackupException(Exception): def __init__(self, message, cmd): self.message = message @@ -947,7 +967,7 @@ def backup_node( backup_type='full', datname=False, options=[], asynchronous=False, gdb=False, old_binary=False, return_id=True, no_remote=False, - env=None, startTime=None + env=None ): if not node and not data_dir: print('You must provide ether node or data_dir for backup') @@ -980,9 +1000,6 @@ def backup_node( if not old_binary: cmd_list += ['--no-sync'] - if startTime: - cmd_list += ['--start-time', startTime] - return self.run_pb(cmd_list + options, asynchronous, gdb, old_binary, return_id, env=env) def checkdb_node( diff --git a/tests/merge.py b/tests/merge.py index 5f092543c..4c374bdfb 100644 --- a/tests/merge.py +++ b/tests/merge.py @@ -1965,7 +1965,11 @@ def test_failed_merge_after_delete_3(self): self.del_test_dir(module_name, fname) - # @unittest.skip("skip") + # Skipped, because backups from the future are invalid. + # This cause a "ERROR: Can't assign backup_id, there is already a backup in future" + # now (PBCKP-259). We can conduct such a test again when we + # untie 'backup_id' from 'start_time' + @unittest.skip("skip") def test_merge_backup_from_future(self): """ take FULL backup, table PAGE backup from future, diff --git a/tests/restore.py b/tests/restore.py index b619078d5..ae1c7cbe0 100644 --- a/tests/restore.py +++ b/tests/restore.py @@ -1853,7 +1853,11 @@ def test_restore_chain_with_corrupted_backup(self): # Clean after yourself self.del_test_dir(module_name, fname) - # @unittest.skip("skip") + # Skipped, because backups from the future are invalid. + # This cause a "ERROR: Can't assign backup_id, there is already a backup in future" + # now (PBCKP-259). We can conduct such a test again when we + # untie 'backup_id' from 'start_time' + @unittest.skip("skip") def test_restore_backup_from_future(self): """more complex test_restore_chain()""" fname = self.id().split('.')[3] From 24a1036037d7c7e05e4224935128309f2c9532c7 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Mon, 5 Sep 2022 02:57:40 +0300 Subject: [PATCH 305/525] [PBCKP-258] fix tests.ptrack.PtrackTest.test_ptrack_unprivileged --- tests/ptrack.py | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/tests/ptrack.py b/tests/ptrack.py index d46ece119..783d3b3e7 100644 --- a/tests/ptrack.py +++ b/tests/ptrack.py @@ -545,8 +545,7 @@ def test_ptrack_unprivileged(self): "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") # PG 9.6 elif self.get_version(node) > 90600 and self.get_version(node) < 100000: node.safe_psql( @@ -583,9 +582,7 @@ def test_ptrack_unprivileged(self): "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - 'GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup; ' - ) + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") # >= 10 else: node.safe_psql( @@ -620,9 +617,7 @@ def test_ptrack_unprivileged(self): "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - 'GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup; ' - ) + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") node.safe_psql( "backupdb", @@ -641,7 +636,8 @@ def test_ptrack_unprivileged(self): if ProbackupTest.enterprise: node.safe_psql( "backupdb", - "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup") + "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup; " + 'GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;') self.backup_node( backup_dir, 'node', node, From b8c2076437514d31153488d774fcf2aa2ef8b6ab Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Mon, 5 Sep 2022 03:04:18 +0300 Subject: [PATCH 306/525] [PBCKP-261] fix configure flags for tests.pgpro2068.BugTest.test_minrecpoint_on_replica --- travis/run_tests.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/travis/run_tests.sh b/travis/run_tests.sh index 37614f970..1823b05de 100755 --- a/travis/run_tests.sh +++ b/travis/run_tests.sh @@ -47,7 +47,7 @@ cd postgres # Go to postgres dir if [ "$PG_PROBACKUP_PTRACK" = "ON" ]; then git apply -3 ../ptrack/patches/${PTRACK_PATCH_PG_BRANCH}-ptrack-core.diff fi -CFLAGS="-O0" ./configure --prefix=$PGHOME --enable-debug --enable-cassert --enable-depend --enable-tap-tests --enable-nls +CFLAGS="-O0" ./configure --prefix=$PGHOME --enable-debug --enable-cassert --enable-depend --enable-tap-tests --enable-nls --with-python make -s -j$(nproc) install #make -s -j$(nproc) -C 'src/common' install #make -s -j$(nproc) -C 'src/port' install From a4a2abde5295ae359413dc3353b8e25e743006f9 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Mon, 5 Sep 2022 17:12:58 +0300 Subject: [PATCH 307/525] [PBCKP-178] fix rare 'buffer error' in tests.validate.ValidateTest.test_validate_corrupt_page_header_map --- tests/validate.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/validate.py b/tests/validate.py index 22a03c3be..966ad81a8 100644 --- a/tests/validate.py +++ b/tests/validate.py @@ -4017,9 +4017,9 @@ def test_validate_corrupt_page_header_map(self): "Output: {0} \n CMD: {1}".format( self.output, self.cmd)) except ProbackupException as e: - self.assertTrue( - 'WARNING: An error occured during metadata decompression' in e.message and - 'data error' in e.message, + self.assertRegex( + e.message, + r'WARNING: An error occured during metadata decompression for file "[\w/]+": (data|buffer) error', '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) From 25fc034509d22af508559b38fbab847ccf8db577 Mon Sep 17 00:00:00 2001 From: Ivan Lazarev Date: Mon, 5 Sep 2022 19:38:14 +0300 Subject: [PATCH 308/525] [PBCKP-236] stable test failure, dirty version --- tests/compatibility.py | 72 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) diff --git a/tests/compatibility.py b/tests/compatibility.py index e274c22be..262b940ef 100644 --- a/tests/compatibility.py +++ b/tests/compatibility.py @@ -10,6 +10,78 @@ class CompatibilityTest(ProbackupTest, unittest.TestCase): + def setUp(self): + self.fname = self.id().split('.')[3] + + # @unittest.expectedFailure + # @unittest.skip("skip") + def test_catchup_with_different_remote_major_pg(self): + "Decription in jira issue PBCKP-236" #TODO REVIEW XXX explain the test + self.verbose = True + self.remote = True + pg_config = os.environ['PG_CONFIG'] + pg_path_ee_9_6 = '/home/avaness/postgres/postgres.build.9.6/bin/' + pg_config_ee_9_6 = pg_path_ee_9_6 + 'pg_config' + probackup_path_ee_9_6 = pg_path_ee_9_6 + 'pg_probackup' + pg_path_ee_11 = '/home/avaness/postgres/postgres.build.11/bin/' + pg_config_ee_11 = pg_path_ee_11 + 'pg_config' + probackup_path_ee_11 = pg_path_ee_11 + 'pg_probackup' + + os.environ['PG_CONFIG'] = pg_config_ee_11 + self.probackup_path = probackup_path_ee_11 + # os.environ['PG_CONFIG'] = pg_config_ee_9_6 + # self.probackup_path = probackup_path_ee_9_6 + + # backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + src_pg = self.make_simple_node( + base_dir=os.path.join(module_name, self.fname, 'src'), + set_replication=True, + # initdb_params=['--data-checksums'] + ) + src_pg.slow_start() + src_pg.safe_psql( + "postgres", + "CREATE TABLE ultimate_question AS SELECT 42 AS answer") + + # do full catchup + os.environ['PG_CONFIG'] = pg_config_ee_11 + self.probackup_path = probackup_path_ee_11 + + dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + # dst_pg = self.make_simple_node( + # base_dir=os.path.join(module_name, self.fname, 'dst'), + # set_replication=True, + # # initdb_params=['--data-checksums'] + # ) + self.catchup_node( + backup_mode = 'FULL', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options=['-d', 'postgres', '-p', str(src_pg.port), '--stream']#, '--remote-path=' + pg_path_ee_9_6] + ) + + dst_options = {} + dst_options['port'] = str(dst_pg.port) + self.set_auto_conf(dst_pg, dst_options) + dst_pg.slow_start() + dst_pg.stop() + + src_pg.safe_psql( + "postgres", + "CREATE TABLE ultimate_question2 AS SELECT 42 AS answer") + + # do delta catchup + #TODO REVIEW XXX try to apply only one catchup (FULL) for test failure + self.catchup_node( + backup_mode = 'DELTA', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options=['-d', 'postgres', '-p', str(src_pg.port), '--stream', '--remote-path=' + pg_path_ee_9_6] + ) + + # Clean after yourself + self.del_test_dir(module_name, self.fname) + # @unittest.expectedFailure # @unittest.skip("skip") def test_backward_compatibility_page(self): From 9e9509d8aab21565d95b44daeafcca6b7516597c Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" <16117281+kulaginm@users.noreply.github.com> Date: Thu, 8 Sep 2022 09:15:24 +0300 Subject: [PATCH 309/525] [PBCKP-263] fix for tests.archive.ArchiveTest.test_archive_get_batching_sanity (#532) --- src/archive.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/archive.c b/src/archive.c index 48114d955..1a19c3d84 100644 --- a/src/archive.c +++ b/src/archive.c @@ -1263,6 +1263,7 @@ uint32 run_wal_prefetch(const char *prefetch_dir, const char *archive_dir, arg->thread_num = i+1; arg->files = batch_files; + arg->n_fetched = 0; } /* Run threads */ From d4d78e18f34ee6d7bf04253a5da597165c8134b6 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Fri, 9 Sep 2022 22:09:41 +0300 Subject: [PATCH 310/525] [PBCKP-277] stabilize catchup.CatchupTest.test_config_exclusion --- .travis.yml | 24 +++++++++++++++--------- tests/catchup.py | 4 ++++ 2 files changed, 19 insertions(+), 9 deletions(-) diff --git a/.travis.yml b/.travis.yml index a7dae2ed1..52d6dba17 100644 --- a/.travis.yml +++ b/.travis.yml @@ -26,17 +26,23 @@ notifications: # Default MODE is basic, i.e. all tests with PG_PROBACKUP_TEST_BASIC=ON env: - - PG_VERSION=15 PG_BRANCH=master PTRACK_PATCH_PG_BRANCH=master - - PG_VERSION=14 PG_BRANCH=REL_14_STABLE PTRACK_PATCH_PG_BRANCH=REL_14_STABLE - - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE - - PG_VERSION=12 PG_BRANCH=REL_12_STABLE PTRACK_PATCH_PG_BRANCH=REL_12_STABLE - - PG_VERSION=11 PG_BRANCH=REL_11_STABLE PTRACK_PATCH_PG_BRANCH=REL_11_STABLE - - PG_VERSION=10 PG_BRANCH=REL_10_STABLE - - PG_VERSION=9.6 PG_BRANCH=REL9_6_STABLE - - PG_VERSION=9.5 PG_BRANCH=REL9_5_STABLE +# - PG_VERSION=15 PG_BRANCH=master PTRACK_PATCH_PG_BRANCH=master +# - PG_VERSION=14 PG_BRANCH=REL_14_STABLE PTRACK_PATCH_PG_BRANCH=REL_14_STABLE +# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE +# - PG_VERSION=12 PG_BRANCH=REL_12_STABLE PTRACK_PATCH_PG_BRANCH=REL_12_STABLE +# - PG_VERSION=11 PG_BRANCH=REL_11_STABLE PTRACK_PATCH_PG_BRANCH=REL_11_STABLE +# - PG_VERSION=10 PG_BRANCH=REL_10_STABLE +# - PG_VERSION=9.6 PG_BRANCH=REL9_6_STABLE +# - PG_VERSION=9.5 PG_BRANCH=REL9_5_STABLE # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=archive # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=backup -# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=catchup + - PG_VERSION=14 PG_BRANCH=REL_14_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=catchup.CatchupTest.test_config_exclusion + - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=catchup.CatchupTest.test_config_exclusion + - PG_VERSION=12 PG_BRANCH=REL_12_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=catchup.CatchupTest.test_config_exclusion + - PG_VERSION=11 PG_BRANCH=REL_11_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=catchup.CatchupTest.test_config_exclusion + - PG_VERSION=10 PG_BRANCH=REL_10_STABLE MODE=catchup.CatchupTest.test_config_exclusion + - PG_VERSION=9.6 PG_BRANCH=REL9_6_STABLE MODE=catchup.CatchupTest.test_config_exclusion + - PG_VERSION=9.5 PG_BRANCH=REL9_5_STABLE MODE=catchup.CatchupTest.test_config_exclusion # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=checkdb # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=compression # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=delta diff --git a/tests/catchup.py b/tests/catchup.py index a83755c54..7ecd84697 100644 --- a/tests/catchup.py +++ b/tests/catchup.py @@ -1362,6 +1362,7 @@ def test_config_exclusion(self): dst_options = {} dst_options['port'] = str(dst_pg.port) self.set_auto_conf(dst_pg, dst_options) + dst_pg._assign_master(src_pg) dst_pg.slow_start(replica = True) dst_pg.stop() @@ -1390,6 +1391,7 @@ def test_config_exclusion(self): # check: run verification query src_pg.safe_psql("postgres", "INSERT INTO ultimate_question VALUES(42)") src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + dst_pg.catchup() # wait for replication dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy') @@ -1419,6 +1421,7 @@ def test_config_exclusion(self): # check: run verification query src_pg.safe_psql("postgres", "INSERT INTO ultimate_question VALUES(2*42)") src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + dst_pg.catchup() # wait for replication dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy') @@ -1447,6 +1450,7 @@ def test_config_exclusion(self): # check: run verification query src_pg.safe_psql("postgres", "INSERT INTO ultimate_question VALUES(3*42)") src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + dst_pg.catchup() # wait for replication dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy') From 42241bd3ba8f2ee2d8d473637a3a3f143ba8eb3e Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Fri, 9 Sep 2022 22:21:28 +0300 Subject: [PATCH 311/525] [PBCKP-277] fix .travis.yml typo --- .travis.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.travis.yml b/.travis.yml index 52d6dba17..7cf50d0ad 100644 --- a/.travis.yml +++ b/.travis.yml @@ -36,10 +36,10 @@ env: # - PG_VERSION=9.5 PG_BRANCH=REL9_5_STABLE # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=archive # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=backup - - PG_VERSION=14 PG_BRANCH=REL_14_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=catchup.CatchupTest.test_config_exclusion + - PG_VERSION=14 PG_BRANCH=REL_14_STABLE PTRACK_PATCH_PG_BRANCH=REL_14_STABLE MODE=catchup.CatchupTest.test_config_exclusion - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=catchup.CatchupTest.test_config_exclusion - - PG_VERSION=12 PG_BRANCH=REL_12_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=catchup.CatchupTest.test_config_exclusion - - PG_VERSION=11 PG_BRANCH=REL_11_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=catchup.CatchupTest.test_config_exclusion + - PG_VERSION=12 PG_BRANCH=REL_12_STABLE PTRACK_PATCH_PG_BRANCH=REL_12_STABLE MODE=catchup.CatchupTest.test_config_exclusion + - PG_VERSION=11 PG_BRANCH=REL_11_STABLE PTRACK_PATCH_PG_BRANCH=REL_11_STABLE MODE=catchup.CatchupTest.test_config_exclusion - PG_VERSION=10 PG_BRANCH=REL_10_STABLE MODE=catchup.CatchupTest.test_config_exclusion - PG_VERSION=9.6 PG_BRANCH=REL9_6_STABLE MODE=catchup.CatchupTest.test_config_exclusion - PG_VERSION=9.5 PG_BRANCH=REL9_5_STABLE MODE=catchup.CatchupTest.test_config_exclusion From f78c63c8f56b8a7064f8799460156001dd1c6a76 Mon Sep 17 00:00:00 2001 From: Ivan Lazarev Date: Sun, 11 Sep 2022 04:14:18 +0300 Subject: [PATCH 312/525] [PBCKP-236] first-stage compatibility protocol impl with stubs --- src/pg_probackup.h | 8 ++++++-- src/utils/file.c | 20 +++++++++++++++++--- src/utils/file.h | 2 +- src/utils/remote.c | 34 +++++++++++++++++++++++++++++----- 4 files changed, 53 insertions(+), 11 deletions(-) diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 1885a191e..e68afc571 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -341,8 +341,8 @@ typedef enum ShowFormat #define PROGRAM_VERSION "2.5.8" /* update when remote agent API or behaviour changes */ -#define AGENT_PROTOCOL_VERSION 20501 -#define AGENT_PROTOCOL_VERSION_STR "2.5.1" +#define AGENT_PROTOCOL_VERSION 20509 +#define AGENT_PROTOCOL_VERSION_STR "2.5.9" /* update only when changing storage format */ #define STORAGE_FORMAT_VERSION "2.4.4" @@ -881,6 +881,10 @@ extern bool tliIsPartOfHistory(const parray *timelines, TimeLineID tli); extern DestDirIncrCompatibility check_incremental_compatibility(const char *pgdata, uint64 system_identifier, IncrRestoreMode incremental_mode); +/* in remote.c */ +extern void check_remote_agent_compatibility(int agent_version, char *compatibility_str); +extern size_t prepare_remote_agent_compatibility_str(char* compatibility_buf, size_t buf_size); + /* in merge.c */ extern void do_merge(InstanceState *instanceState, time_t backup_id, bool no_validate, bool no_sync); extern void merge_backups(pgBackup *backup, pgBackup *next_backup); diff --git a/src/utils/file.c b/src/utils/file.c index 7103c8f1d..e3d1e5801 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -268,9 +268,10 @@ fio_write_all(int fd, void const* buf, size_t size) return offs; } +//TODO REVIEW XXX move to remote.c???? /* Get version of remote agent */ -int -fio_get_agent_version(void) +void +fio_get_agent_version(int* protocol, char* payload_buf, size_t payload_buf_size) { fio_header hdr; hdr.cop = FIO_AGENT_VERSION; @@ -278,8 +279,13 @@ fio_get_agent_version(void) IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); + if (hdr.size > payload_buf_size) + { + elog(ERROR, "Bad protocol, insufficient payload_buf_size=%u", payload_buf_size); + } - return hdr.arg; + *protocol = hdr.arg; + IO_CHECK(fio_read_all(fio_stdin, payload_buf, hdr.size), hdr.size); } /* Open input stream. Remote file is fetched to the in-memory buffer and then accessed through Linux fmemopen */ @@ -3210,6 +3216,7 @@ fio_delete_impl(mode_t mode, char *buf) } /* Execute commands at remote host */ +//TODO REVIEW XXX move to remote.c? void fio_communicate(int in, int out) { @@ -3316,6 +3323,13 @@ fio_communicate(int in, int out) case FIO_AGENT_VERSION: hdr.arg = AGENT_PROTOCOL_VERSION; IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); + //TODO REVIEW XXX is it allowed by ANSI C to declare new scope inside??? + { + size_t payload_size = prepare_remote_agent_compatibility_str(buf, buf_size); + IO_CHECK(fio_write_all(out, buf, payload_size), payload_size); + //TODO REVIEW XXX make INFO to LOG or VERBOSE + elog(INFO, "TODO REVIEW XXX sent agent compatibility\n %s", buf); + } break; case FIO_STAT: /* Get information about file with specified path */ hdr.size = sizeof(st); diff --git a/src/utils/file.h b/src/utils/file.h index a554b4ab0..92c5f2eaa 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -91,7 +91,7 @@ extern fio_location MyLocation; extern void fio_redirect(int in, int out, int err); extern void fio_communicate(int in, int out); -extern int fio_get_agent_version(void); +extern void fio_get_agent_version(int* protocol, char* payload_buf, size_t payload_buf_size); extern FILE* fio_fopen(char const* name, char const* mode, fio_location location); extern size_t fio_fwrite(FILE* f, void const* buf, size_t size); extern ssize_t fio_fwrite_async_compressed(FILE* f, void const* buf, size_t size, int compress_alg); diff --git a/src/utils/remote.c b/src/utils/remote.c index 046ebd818..c7a1f9330 100644 --- a/src/utils/remote.c +++ b/src/utils/remote.c @@ -117,6 +117,9 @@ bool launch_agent(void) int infd[2]; int errfd[2]; int agent_version; + //TODO REVIEW XXX review buf_size + size_t payload_buf_size = 1024 * 8; + char payload_buf[payload_buf_size]; ssh_argc = 0; #ifdef WIN32 @@ -238,10 +241,31 @@ bool launch_agent(void) fio_redirect(infd[0], outfd[1], errfd[0]); /* write to stdout */ } - /* Make sure that remote agent has the same version - * TODO: we must also check PG version and fork edition + /* Make sure that remote agent has the same version, fork and other features to be binary compatible */ - agent_version = fio_get_agent_version(); + fio_get_agent_version(&agent_version, payload_buf, payload_buf_size); + check_remote_agent_compatibility(0, payload_buf); + + return true; +} + +//TODO REVIEW XXX review macro +#define STR(macro) #macro +size_t prepare_remote_agent_compatibility_str(char* compatibility_buf, size_t buf_size) +{ + size_t payload_size = snprintf(compatibility_buf, buf_size, + "%s\n%s\n%s\n%s\n", + STR(PG_MAJORVERSION), PG_MAJORVERSION, + STR(PGPRO_EDN), PGPRO_EDN); + if (payload_size >= buf_size) + { + elog(ERROR, "TODO REVIEW XXX too bad message buffer exhaust"); + } + return payload_size + 1; +} + +void check_remote_agent_compatibility(int agent_version, char *compatibility_str) +{ if (agent_version != AGENT_PROTOCOL_VERSION) { char agent_version_str[1024]; @@ -254,6 +278,6 @@ bool launch_agent(void) "consider to upgrade pg_probackup binary", agent_version_str, AGENT_PROTOCOL_VERSION_STR); } - - return true; + assert(false); + elog(ERROR, " check_remote_agent_compatibility() not implemented"); } From 1dfa5b99c2c20a1a97d01b7f141d450a938a9a4f Mon Sep 17 00:00:00 2001 From: Ivan Lazarev Date: Sun, 11 Sep 2022 04:37:46 +0300 Subject: [PATCH 313/525] [PBCKP-236] draft, first-stage compatibility protocol impl with stubs --- src/utils/file.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/utils/file.c b/src/utils/file.c index e3d1e5801..6c7bdbbff 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -3323,6 +3323,7 @@ fio_communicate(int in, int out) case FIO_AGENT_VERSION: hdr.arg = AGENT_PROTOCOL_VERSION; IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); + assert(false); //TODO REVIEW XXX is it allowed by ANSI C to declare new scope inside??? { size_t payload_size = prepare_remote_agent_compatibility_str(buf, buf_size); From c3d3c026c2f8446b93e5e73150a3c55a153f4317 Mon Sep 17 00:00:00 2001 From: Ivan Lazarev Date: Sun, 11 Sep 2022 04:44:18 +0300 Subject: [PATCH 314/525] [PBCKP-236] draft, first-stage compatibility protocol impl with stubs --- src/utils/remote.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/utils/remote.c b/src/utils/remote.c index c7a1f9330..6a6c3c12b 100644 --- a/src/utils/remote.c +++ b/src/utils/remote.c @@ -255,8 +255,8 @@ size_t prepare_remote_agent_compatibility_str(char* compatibility_buf, size_t bu { size_t payload_size = snprintf(compatibility_buf, buf_size, "%s\n%s\n%s\n%s\n", - STR(PG_MAJORVERSION), PG_MAJORVERSION, - STR(PGPRO_EDN), PGPRO_EDN); + STR(PG_MAJORVERSION), PG_MAJORVERSION); +// STR(PGPRO_EDN), PGPRO_EDN); if (payload_size >= buf_size) { elog(ERROR, "TODO REVIEW XXX too bad message buffer exhaust"); From 46b7079edd63b43c41c104830d7feef9d4536476 Mon Sep 17 00:00:00 2001 From: Ivan Lazarev Date: Sun, 11 Sep 2022 04:46:15 +0300 Subject: [PATCH 315/525] [PBCKP-236] draft, first-stage compatibility protocol impl with stubs --- src/utils/remote.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/utils/remote.c b/src/utils/remote.c index 6a6c3c12b..e4963b62a 100644 --- a/src/utils/remote.c +++ b/src/utils/remote.c @@ -254,7 +254,8 @@ bool launch_agent(void) size_t prepare_remote_agent_compatibility_str(char* compatibility_buf, size_t buf_size) { size_t payload_size = snprintf(compatibility_buf, buf_size, - "%s\n%s\n%s\n%s\n", +// "%s\n%s\n%s\n%s\n", + "%s\n%s\n", STR(PG_MAJORVERSION), PG_MAJORVERSION); // STR(PGPRO_EDN), PGPRO_EDN); if (payload_size >= buf_size) From f5fde7ef8e1ee479932df78086f7bed817c53902 Mon Sep 17 00:00:00 2001 From: Ivan Lazarev Date: Mon, 12 Sep 2022 02:46:29 +0300 Subject: [PATCH 316/525] [PBCKP-236] draft, first-stage compatibility protocol impl with stubs --- src/utils/file.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/utils/file.c b/src/utils/file.c index 6c7bdbbff..65d0699c7 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -281,7 +281,7 @@ fio_get_agent_version(int* protocol, char* payload_buf, size_t payload_buf_size) IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); if (hdr.size > payload_buf_size) { - elog(ERROR, "Bad protocol, insufficient payload_buf_size=%u", payload_buf_size); + elog(ERROR, "Bad protocol, insufficient payload_buf_size=%zu", payload_buf_size); } *protocol = hdr.arg; @@ -3323,7 +3323,6 @@ fio_communicate(int in, int out) case FIO_AGENT_VERSION: hdr.arg = AGENT_PROTOCOL_VERSION; IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); - assert(false); //TODO REVIEW XXX is it allowed by ANSI C to declare new scope inside??? { size_t payload_size = prepare_remote_agent_compatibility_str(buf, buf_size); @@ -3331,6 +3330,7 @@ fio_communicate(int in, int out) //TODO REVIEW XXX make INFO to LOG or VERBOSE elog(INFO, "TODO REVIEW XXX sent agent compatibility\n %s", buf); } + assert(false); break; case FIO_STAT: /* Get information about file with specified path */ hdr.size = sizeof(st); From 6d3ad888cd00dbbdac734c316970ecec6f0e123c Mon Sep 17 00:00:00 2001 From: Sofia Kopikova Date: Tue, 21 Jun 2022 12:54:30 +0300 Subject: [PATCH 317/525] [PBCKP-125] changes in function call CreateWalDirectoryMethod for 15 version Tags: pg_probackup --- src/stream.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/stream.c b/src/stream.c index 1ee8dee37..3b947af01 100644 --- a/src/stream.c +++ b/src/stream.c @@ -274,7 +274,13 @@ StreamLog(void *arg) ctl.synchronous = false; ctl.mark_done = false; -#if PG_VERSION_NUM >= 100000 +#if PG_VERSION_NUM >= 150000 + ctl.walmethod = CreateWalDirectoryMethod( + stream_arg->basedir, + COMPRESSION_NONE, + 0, + false); +#elif PG_VERSION_NUM >= 100000 ctl.walmethod = CreateWalDirectoryMethod( stream_arg->basedir, // (instance_config.compress_alg == NONE_COMPRESS) ? 0 : instance_config.compress_level, From 53abc0b6e735944354b2c40483b9ac82bcc4c499 Mon Sep 17 00:00:00 2001 From: Daniel Shelepanov Date: Fri, 1 Jul 2022 15:46:53 +0300 Subject: [PATCH 318/525] [PGPRO-6938] pg_probackup has been ported to version 15 Has been tested on 15beta2 and 16 tags: pg_probackup --- src/backup.c | 34 +++--- src/parsexlog.c | 14 ++- src/stream.c | 2 +- src/utils/configuration.c | 1 + tests/archive.py | 13 ++- tests/auth_test.py | 105 +++++++++++++----- tests/backup.py | 186 +++++++++++++++++++++++++++----- tests/false_positive.py | 3 + tests/helpers/ptrack_helpers.py | 20 +++- tests/ptrack.py | 46 +++++++- tests/replica.py | 1 + tests/restore.py | 47 +++++++- tests/retention.py | 2 + 13 files changed, 379 insertions(+), 95 deletions(-) diff --git a/src/backup.c b/src/backup.c index 03ff7b72b..0fa8ee9fd 100644 --- a/src/backup.c +++ b/src/backup.c @@ -1056,20 +1056,14 @@ pg_start_backup(const char *label, bool smooth, pgBackup *backup, uint32 lsn_lo; params[0] = label; - elog(INFO, "wait for pg_start_backup()"); + elog(INFO, "wait for pg_backup_start()"); /* 2nd argument is 'fast'*/ params[1] = smooth ? "false" : "true"; - if (!exclusive_backup) - res = pgut_execute(conn, - "SELECT pg_catalog.pg_start_backup($1, $2, false)", - 2, - params); - else - res = pgut_execute(conn, - "SELECT pg_catalog.pg_start_backup($1, $2)", - 2, - params); + res = pgut_execute(conn, + "SELECT pg_catalog.pg_backup_start($1, $2)", + 2, + params); /* * Set flag that pg_start_backup() was called. If an error will happen it @@ -1618,7 +1612,7 @@ pg_stop_backup_send(PGconn *conn, int server_version, bool is_started_on_replica "SELECT" " pg_catalog.txid_snapshot_xmax(pg_catalog.txid_current_snapshot())," " current_timestamp(0)::timestamptz," - " pg_catalog.pg_stop_backup() as lsn", + " pg_catalog.pg_backup_stop() as lsn", stop_backup_on_master_query[] = "SELECT" " pg_catalog.txid_snapshot_xmax(pg_catalog.txid_current_snapshot())," @@ -1626,7 +1620,7 @@ pg_stop_backup_send(PGconn *conn, int server_version, bool is_started_on_replica " lsn," " labelfile," " spcmapfile" - " FROM pg_catalog.pg_stop_backup(false, false)", + " FROM pg_catalog.pg_backup_stop(false)", stop_backup_on_master_before10_query[] = "SELECT" " pg_catalog.txid_snapshot_xmax(pg_catalog.txid_current_snapshot())," @@ -1634,7 +1628,7 @@ pg_stop_backup_send(PGconn *conn, int server_version, bool is_started_on_replica " lsn," " labelfile," " spcmapfile" - " FROM pg_catalog.pg_stop_backup(false)", + " FROM pg_catalog.pg_backup_stop()", /* * In case of backup from replica >= 9.6 we do not trust minRecPoint * and stop_backup LSN, so we use latest replayed LSN as STOP LSN. @@ -1646,7 +1640,7 @@ pg_stop_backup_send(PGconn *conn, int server_version, bool is_started_on_replica " pg_catalog.pg_last_wal_replay_lsn()," " labelfile," " spcmapfile" - " FROM pg_catalog.pg_stop_backup(false, false)", + " FROM pg_catalog.pg_backup_stop(false)", stop_backup_on_replica_before10_query[] = "SELECT" " pg_catalog.txid_snapshot_xmax(pg_catalog.txid_current_snapshot())," @@ -1654,7 +1648,7 @@ pg_stop_backup_send(PGconn *conn, int server_version, bool is_started_on_replica " pg_catalog.pg_last_xlog_replay_location()," " labelfile," " spcmapfile" - " FROM pg_catalog.pg_stop_backup(false)"; + " FROM pg_catalog.pg_backup_stop()"; const char * const stop_backup_query = is_exclusive ? @@ -1682,7 +1676,7 @@ pg_stop_backup_send(PGconn *conn, int server_version, bool is_started_on_replica */ sent = pgut_send(conn, stop_backup_query, 0, NULL, WARNING); if (!sent) - elog(ERROR, "Failed to send pg_stop_backup query"); + elog(ERROR, "Failed to send pg_backup_stop query"); /* After we have sent pg_stop_backup, we don't need this callback anymore */ pgut_atexit_pop(backup_stopbackup_callback, &stop_callback_params); @@ -1728,7 +1722,7 @@ pg_stop_backup_consume(PGconn *conn, int server_version, if (interrupted) { pgut_cancel(conn); - elog(ERROR, "interrupted during waiting for pg_stop_backup"); + elog(ERROR, "interrupted during waiting for pg_backup_stop"); } if (pg_stop_backup_timeout == 1) @@ -1741,7 +1735,7 @@ pg_stop_backup_consume(PGconn *conn, int server_version, if (pg_stop_backup_timeout > timeout) { pgut_cancel(conn); - elog(ERROR, "pg_stop_backup doesn't answer in %d seconds, cancel it", timeout); + elog(ERROR, "pg_backup_stop doesn't answer in %d seconds, cancel it", timeout); } } else @@ -1753,7 +1747,7 @@ pg_stop_backup_consume(PGconn *conn, int server_version, /* Check successfull execution of pg_stop_backup() */ if (!query_result) - elog(ERROR, "pg_stop_backup() failed"); + elog(ERROR, "pg_backup_stop() failed"); else { switch (PQresultStatus(query_result)) diff --git a/src/parsexlog.c b/src/parsexlog.c index 7f1ca9c75..5cf760312 100644 --- a/src/parsexlog.c +++ b/src/parsexlog.c @@ -29,7 +29,10 @@ * RmgrNames is an array of resource manager names, to make error messages * a bit nicer. */ -#if PG_VERSION_NUM >= 100000 +#if PG_VERSION_NUM >= 150000 +#define PG_RMGR(symname,name,redo,desc,identify,startup,cleanup,mask,decode) \ + name, +#elif PG_VERSION_NUM >= 100000 #define PG_RMGR(symname,name,redo,desc,identify,startup,cleanup,mask) \ name, #else @@ -1769,7 +1772,8 @@ extractPageInfo(XLogReaderState *record, XLogReaderData *reader_data, /* Is this a special record type that I recognize? */ - if (rmid == RM_DBASE_ID && rminfo == XLOG_DBASE_CREATE) + if (rmid == RM_DBASE_ID + && (rminfo == XLOG_DBASE_CREATE_WAL_LOG || rminfo == XLOG_DBASE_CREATE_FILE_COPY)) { /* * New databases can be safely ignored. They would be completely @@ -1823,13 +1827,13 @@ extractPageInfo(XLogReaderState *record, XLogReaderData *reader_data, RmgrNames[rmid], info); } - for (block_id = 0; block_id <= record->max_block_id; block_id++) + for (block_id = 0; block_id <= record->record->max_block_id; block_id++) { RelFileNode rnode; ForkNumber forknum; BlockNumber blkno; - if (!XLogRecGetBlockTag(record, block_id, &rnode, &forknum, &blkno)) + if (!XLogRecGetBlockTagExtended(record, block_id, &rnode, &forknum, &blkno, NULL)) continue; /* We only care about the main fork; others are copied as is */ @@ -1946,4 +1950,4 @@ static XLogReaderState* WalReaderAllocate(uint32 wal_seg_size, XLogReaderData *r #else return XLogReaderAllocate(&SimpleXLogPageRead, reader_data); #endif -} \ No newline at end of file +} diff --git a/src/stream.c b/src/stream.c index 3b947af01..7735f35fa 100644 --- a/src/stream.c +++ b/src/stream.c @@ -277,7 +277,7 @@ StreamLog(void *arg) #if PG_VERSION_NUM >= 150000 ctl.walmethod = CreateWalDirectoryMethod( stream_arg->basedir, - COMPRESSION_NONE, + PG_COMPRESSION_NONE, 0, false); #elif PG_VERSION_NUM >= 100000 diff --git a/src/utils/configuration.c b/src/utils/configuration.c index 7ab242aa3..98c3b2994 100644 --- a/src/utils/configuration.c +++ b/src/utils/configuration.c @@ -22,6 +22,7 @@ #include #endif #include +#include #define MAXPG_LSNCOMPONENT 8 diff --git a/tests/archive.py b/tests/archive.py index 52fb225e8..81d013f6b 100644 --- a/tests/archive.py +++ b/tests/archive.py @@ -250,6 +250,7 @@ def test_pgpro434_3(self): "--log-level-file=LOG"], gdb=True) + # Attention! this breakpoint has been set on internal probackup function, not on a postgres core one gdb.set_breakpoint('pg_stop_backup') gdb.run_until_break() @@ -314,6 +315,7 @@ def test_pgpro434_4(self): "--log-level-file=info"], gdb=True) + # Attention! this breakpoint has been set on internal probackup function, not on a postgres core one gdb.set_breakpoint('pg_stop_backup') gdb.run_until_break() @@ -341,9 +343,14 @@ def test_pgpro434_4(self): with open(log_file, 'r') as f: log_content = f.read() - self.assertIn( - "ERROR: pg_stop_backup doesn't answer in 60 seconds, cancel it", - log_content) + if self.get_version(node) < 150000: + self.assertIn( + "ERROR: pg_stop_backup doesn't answer in 60 seconds, cancel it", + log_content) + else: + self.assertIn( + "ERROR: pg_backup_stop doesn't answer in 60 seconds, cancel it", + log_content) log_file = os.path.join(node.logs_dir, 'postgresql.log') with open(log_file, 'r') as f: diff --git a/tests/auth_test.py b/tests/auth_test.py index 78af21be9..39786d7a9 100644 --- a/tests/auth_test.py +++ b/tests/auth_test.py @@ -51,16 +51,29 @@ def test_backup_via_unprivileged_user(self): 1, 0, "Expecting Error due to missing grant on EXECUTE.") except ProbackupException as e: - self.assertIn( - "ERROR: query failed: ERROR: permission denied " - "for function pg_start_backup", e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + if self.get_version(node) < 150000: + self.assertIn( + "ERROR: query failed: ERROR: permission denied " + "for function pg_start_backup", e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + else: + self.assertIn( + "ERROR: query failed: ERROR: permission denied " + "for function pg_backup_start", e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) - node.safe_psql( - "postgres", - "GRANT EXECUTE ON FUNCTION" - " pg_start_backup(text, boolean, boolean) TO backup;") + if self.get_version(node) < 150000: + node.safe_psql( + "postgres", + "GRANT EXECUTE ON FUNCTION" + " pg_start_backup(text, boolean, boolean) TO backup;") + else: + node.safe_psql( + "postgres", + "GRANT EXECUTE ON FUNCTION" + " pg_backup_start(text, boolean) TO backup;") if self.get_version(node) < 100000: node.safe_psql( @@ -97,17 +110,24 @@ def test_backup_via_unprivileged_user(self): 1, 0, "Expecting Error due to missing grant on EXECUTE.") except ProbackupException as e: - self.assertIn( - "ERROR: query failed: ERROR: permission denied " - "for function pg_stop_backup", e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + if self.get_version(node) < 150000: + self.assertIn( + "ERROR: query failed: ERROR: permission denied " + "for function pg_stop_backup", e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + else: + self.assertIn( + "ERROR: query failed: ERROR: permission denied " + "for function pg_backup_stop", e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) if self.get_version(node) < self.version_to_num('10.0'): node.safe_psql( "postgres", "GRANT EXECUTE ON FUNCTION pg_stop_backup(boolean) TO backup") - else: + elif self.get_vestion(node) < self.version_to_num('15.0'): node.safe_psql( "postgres", "GRANT EXECUTE ON FUNCTION " @@ -116,6 +136,16 @@ def test_backup_via_unprivileged_user(self): node.safe_psql( "postgres", "GRANT EXECUTE ON FUNCTION pg_stop_backup() TO backup") + else: + node.safe_psql( + "postgres", + "GRANT EXECUTE ON FUNCTION " + "pg_backup_stop(boolean) TO backup") + # Do this for ptrack backups + node.safe_psql( + "postgres", + "GRANT EXECUTE ON FUNCTION pg_backup_stop() TO backup") + self.backup_node( backup_dir, 'node', node, options=['-U', 'backup']) @@ -177,20 +207,37 @@ def setUpClass(cls): except StartNodeException: raise unittest.skip("Node hasn't started") - cls.node.safe_psql( - "postgres", - "CREATE ROLE backup WITH LOGIN PASSWORD 'password'; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT EXECUTE ON FUNCTION current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_stop_backup() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_stop_backup(boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_switch_xlog() TO backup; " - "GRANT EXECUTE ON FUNCTION txid_current() TO backup; " - "GRANT EXECUTE ON FUNCTION txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION txid_snapshot_xmax(txid_snapshot) TO backup;") + if cls.pb.get_version(cls.node) < 150000: + cls.node.safe_psql( + "postgres", + "CREATE ROLE backup WITH LOGIN PASSWORD 'password'; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT EXECUTE ON FUNCTION current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_start_backup(text, boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_stop_backup() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_stop_backup(boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_switch_xlog() TO backup; " + "GRANT EXECUTE ON FUNCTION txid_current() TO backup; " + "GRANT EXECUTE ON FUNCTION txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION txid_snapshot_xmax(txid_snapshot) TO backup;") + else: + cls.node.safe_psql( + "postgres", + "CREATE ROLE backup WITH LOGIN PASSWORD 'password'; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT EXECUTE ON FUNCTION current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_backup_start(text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_backup_stop() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_backup_stop(boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_switch_xlog() TO backup; " + "GRANT EXECUTE ON FUNCTION txid_current() TO backup; " + "GRANT EXECUTE ON FUNCTION txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION txid_snapshot_xmax(txid_snapshot) TO backup;") + cls.pgpass_file = os.path.join(os.path.expanduser('~'), '.pgpass') @classmethod diff --git a/tests/backup.py b/tests/backup.py index 0cba8fe79..4f447c9bd 100644 --- a/tests/backup.py +++ b/tests/backup.py @@ -1927,9 +1927,10 @@ def test_backup_with_least_privileges_role(self): "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") - # >= 10 - else: + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + ) + # >= 10 && < 15 + elif self.get_version(node) >= 100000 and self.get_version(node) < 150000: node.safe_psql( 'backupdb', "REVOKE ALL ON DATABASE backupdb from PUBLIC; " @@ -1964,7 +1965,46 @@ def test_backup_with_least_privileges_role(self): "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + ) + # >= 15 + else: + node.safe_psql( + 'backupdb', + "REVOKE ALL ON DATABASE backupdb from PUBLIC; " + "REVOKE ALL ON SCHEMA public from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " + "CREATE ROLE backup WITH LOGIN REPLICATION; " + "GRANT CONNECT ON DATABASE backupdb to backup; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_start(text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_stop(boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + ) if self.ptrack: node.safe_psql( @@ -2266,9 +2306,11 @@ def test_backup_with_less_privileges_role(self): "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") - # >= 10 - else: + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; " + "COMMIT;" + ) + # >= 10 && < 15 + elif self.get_version(node) >= 100000 and self.get_version(node) < 150000: node.safe_psql( 'backupdb', "CREATE ROLE backup WITH LOGIN; " @@ -2282,7 +2324,28 @@ def test_backup_with_less_privileges_role(self): "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; " + "COMMIT;" + ) + # >= 15 + else: + node.safe_psql( + 'backupdb', + "BEGIN; " + "CREATE ROLE backup WITH LOGIN; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_start(text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_stop(boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; " + "COMMIT;" + ) # enable STREAM backup node.safe_psql( @@ -3054,8 +3117,8 @@ def test_missing_replication_permission(self): "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") - # >= 10 - else: + # >= 10 && < 15 + elif self.get_version(node) >= 100000 and self.get_version(node) < 150000: node.safe_psql( 'backupdb', "CREATE ROLE backup WITH LOGIN; " @@ -3075,7 +3138,31 @@ def test_missing_replication_permission(self): "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + ) + # >= 15 + else: + node.safe_psql( + 'backupdb', + "CREATE ROLE backup WITH LOGIN; " + "GRANT CONNECT ON DATABASE backupdb to backup; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_start(text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_stop(boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + ) if ProbackupTest.enterprise: node.safe_psql( @@ -3183,9 +3270,10 @@ def test_missing_replication_permission_1(self): "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") - # >= 10 - else: + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + ) + # >= 10 && < 15 + elif self.get_version(node) >= 100000 and self.get_version(node) < 150000: node.safe_psql( 'backupdb', "CREATE ROLE backup WITH LOGIN; " @@ -3205,7 +3293,31 @@ def test_missing_replication_permission_1(self): "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + ) + # > 15 + else: + node.safe_psql( + 'backupdb', + "CREATE ROLE backup WITH LOGIN; " + "GRANT CONNECT ON DATABASE backupdb to backup; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_start(text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_stop(boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + ) if ProbackupTest.enterprise: node.safe_psql( @@ -3331,7 +3443,7 @@ def test_backup_atexit(self): log_content) self.assertIn( - 'FROM pg_catalog.pg_stop_backup', + 'FROM pg_catalog.pg_backup_stop', log_content) self.assertIn( @@ -3369,10 +3481,15 @@ def test_pg_stop_backup_missing_permissions(self): node.safe_psql( 'postgres', 'REVOKE EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) FROM backup') - else: + elif self.get_version(node) < 150000: node.safe_psql( 'postgres', 'REVOKE EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) FROM backup') + else: + node.safe_psql( + 'postgres', + 'REVOKE EXECUTE ON FUNCTION pg_catalog.pg_backup_stop(boolean) FROM backup') + # Full backup in streaming mode try: @@ -3380,17 +3497,32 @@ def test_pg_stop_backup_missing_permissions(self): backup_dir, 'node', node, options=['--stream', '-U', 'backup']) # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because of missing permissions on pg_stop_backup " - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) + if self.get_version(node) < 150000: + self.assertEqual( + 1, 0, + "Expecting Error because of missing permissions on pg_stop_backup " + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + else: + self.assertEqual( + 1, 0, + "Expecting Error because of missing permissions on pg_backup_stop " + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) except ProbackupException as e: - self.assertIn( - "ERROR: permission denied for function pg_stop_backup", - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) + if self.get_version(node) < 150000: + self.assertIn( + "ERROR: permission denied for function pg_stop_backup", + e.message, + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) + else: + self.assertIn( + "ERROR: permission denied for function pg_backup_stop", + e.message, + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) + self.assertIn( "query was: SELECT pg_catalog.txid_snapshot_xmax", e.message, diff --git a/tests/false_positive.py b/tests/false_positive.py index a101f8107..2ededdf12 100644 --- a/tests/false_positive.py +++ b/tests/false_positive.py @@ -198,6 +198,7 @@ def test_recovery_target_time_backup_victim(self): gdb = self.backup_node(backup_dir, 'node', node, gdb=True) + # Attention! This breakpoint is set to a probackup internal fuction, not a postgres core one gdb.set_breakpoint('pg_stop_backup') gdb.run_until_break() gdb.remove_all_breakpoints() @@ -257,6 +258,7 @@ def test_recovery_target_lsn_backup_victim(self): backup_dir, 'node', node, options=['--log-level-console=LOG'], gdb=True) + # Attention! This breakpoint is set to a probackup internal fuction, not a postgres core one gdb.set_breakpoint('pg_stop_backup') gdb.run_until_break() gdb.remove_all_breakpoints() @@ -308,6 +310,7 @@ def test_streaming_timeout(self): backup_dir, 'node', node, gdb=True, options=['--stream', '--log-level-file=LOG']) + # Attention! This breakpoint is set to a probackup internal fuction, not a postgres core one gdb.set_breakpoint('pg_stop_backup') gdb.run_until_break() diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index 59eb12aec..d800f0d3e 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -476,8 +476,8 @@ def simple_bootstrap(self, node, role) -> None: 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO {0}; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO {0}; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_checkpoint() TO {0};'.format(role)) - # >= 10 - else: + # >= 10 && < 15 + elif self.get_version(node) >= 100000 and self.get_version(node) < 150000: node.safe_psql( 'postgres', 'GRANT USAGE ON SCHEMA pg_catalog TO {0}; ' @@ -492,6 +492,22 @@ def simple_bootstrap(self, node, role) -> None: 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO {0}; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO {0}; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_checkpoint() TO {0};'.format(role)) + # >= 15 + else: + node.safe_psql( + 'postgres', + 'GRANT USAGE ON SCHEMA pg_catalog TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_start(text, boolean) TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_stop(boolean) TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_checkpoint() TO {0};'.format(role)) def create_tblspace_in_node(self, node, tblspc_name, tblspc_path=None, cfs=False): res = node.execute( diff --git a/tests/ptrack.py b/tests/ptrack.py index 783d3b3e7..a01405d6a 100644 --- a/tests/ptrack.py +++ b/tests/ptrack.py @@ -582,9 +582,10 @@ def test_ptrack_unprivileged(self): "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") - # >= 10 - else: + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + ) + # >= 10 && < 15 + elif self.get_version(node) >= 100000 and self.get_version(node) < 150000: node.safe_psql( 'backupdb', "REVOKE ALL ON DATABASE backupdb from PUBLIC; " @@ -617,7 +618,44 @@ def test_ptrack_unprivileged(self): "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + ) + # >= 15 + else: + node.safe_psql( + 'backupdb', + "REVOKE ALL ON DATABASE backupdb from PUBLIC; " + "REVOKE ALL ON SCHEMA public from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " + "CREATE ROLE backup WITH LOGIN REPLICATION; " + "GRANT CONNECT ON DATABASE backupdb to backup; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_start(text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_stop(boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + ) node.safe_psql( "backupdb", diff --git a/tests/replica.py b/tests/replica.py index 24dbaa39e..ea69e2d01 100644 --- a/tests/replica.py +++ b/tests/replica.py @@ -775,6 +775,7 @@ def test_replica_stop_lsn_null_offset_next_record(self): '--stream'], gdb=True) + # Attention! this breakpoint is set to a probackup internal function, not a postgres core one gdb.set_breakpoint('pg_stop_backup') gdb.run_until_break() gdb.remove_all_breakpoints() diff --git a/tests/restore.py b/tests/restore.py index ae1c7cbe0..49538bd1f 100644 --- a/tests/restore.py +++ b/tests/restore.py @@ -3272,9 +3272,10 @@ def test_missing_database_map(self): "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") - # >= 10 - else: + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + ) + # >= 10 && < 15 + elif self.get_version(node) >= 100000 and self.get_version(node) < 150000: node.safe_psql( 'backupdb', "REVOKE ALL ON DATABASE backupdb from PUBLIC; " @@ -3308,7 +3309,45 @@ def test_missing_database_map(self): "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + ) + # >= 15 + else: + node.safe_psql( + 'backupdb', + "REVOKE ALL ON DATABASE backupdb from PUBLIC; " + "REVOKE ALL ON SCHEMA public from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " + "CREATE ROLE backup WITH LOGIN REPLICATION; " + "GRANT CONNECT ON DATABASE backupdb to backup; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_start(text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_stop(boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + ) if self.ptrack: # TODO why backup works without these grants ? diff --git a/tests/retention.py b/tests/retention.py index b0399a239..122ab28ad 100644 --- a/tests/retention.py +++ b/tests/retention.py @@ -1519,6 +1519,7 @@ def test_window_error_backups_1(self): gdb = self.backup_node( backup_dir, 'node', node, backup_type='page', gdb=True) + # Attention! this breakpoint has been set on internal probackup function, not on a postgres core one gdb.set_breakpoint('pg_stop_backup') gdb.run_until_break() gdb.remove_all_breakpoints() @@ -1568,6 +1569,7 @@ def test_window_error_backups_2(self): gdb = self.backup_node( backup_dir, 'node', node, backup_type='page', gdb=True) + # Attention! this breakpoint has been set on internal probackup function, not on a postgres core one gdb.set_breakpoint('pg_stop_backup') gdb.run_until_break() gdb._execute('signal SIGKILL') From 0a1a075b12a466b17be6f1f9fcc8895e5a7e247f Mon Sep 17 00:00:00 2001 From: Daniel Shelepanov Date: Tue, 23 Aug 2022 20:40:42 +0300 Subject: [PATCH 319/525] [PGPRO-6938] macro conditions fixed tags: pg_probackup --- .travis.yml | 1 + README.md | 76 ++++++++++++++++++++++----------------------- doc/pgprobackup.xml | 23 +++++++++++++- src/backup.c | 74 ++++++++++++++++++++++++++++++++++--------- src/parsexlog.c | 12 +++++++ src/pg_probackup.h | 6 ++++ 6 files changed, 139 insertions(+), 53 deletions(-) diff --git a/.travis.yml b/.travis.yml index a7dae2ed1..f113d05c4 100644 --- a/.travis.yml +++ b/.travis.yml @@ -27,6 +27,7 @@ notifications: # Default MODE is basic, i.e. all tests with PG_PROBACKUP_TEST_BASIC=ON env: - PG_VERSION=15 PG_BRANCH=master PTRACK_PATCH_PG_BRANCH=master + - PG_VERSION=15 PG_BRANCH=REL_15_STABLE PTRACK_PATCH_PG_BRANCH=REL_15_STABLE - PG_VERSION=14 PG_BRANCH=REL_14_STABLE PTRACK_PATCH_PG_BRANCH=REL_14_STABLE - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE - PG_VERSION=12 PG_BRANCH=REL_12_STABLE PTRACK_PATCH_PG_BRANCH=REL_12_STABLE diff --git a/README.md b/README.md index 5da8d199e..433978473 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,7 @@ `pg_probackup` is a utility to manage backup and recovery of PostgreSQL database clusters. It is designed to perform periodic backups of the PostgreSQL instance that enable you to restore the server in case of a failure. The utility is compatible with: -* PostgreSQL 9.6, 10, 11, 12, 13, 14; +* PostgreSQL 9.6, 10, 11, 12, 13, 14, 15; As compared to other backup solutions, `pg_probackup` offers the following benefits that can help you implement different backup strategies and deal with large amounts of data: * Incremental backup: page-level incremental backup allows you to save disk space, speed up backup and restore. With three different incremental modes, you can plan the backup strategy in accordance with your data flow. @@ -41,9 +41,9 @@ Regardless of the chosen backup type, all backups taken with `pg_probackup` supp ## ptrack support `PTRACK` backup support provided via following options: -* vanilla PostgreSQL 11, 12, 13, 14 with [ptrack extension](https://github.com/postgrespro/ptrack) -* Postgres Pro Standard 11, 12, 13 -* Postgres Pro Enterprise 11, 12, 13 +* vanilla PostgreSQL 11, 12, 13, 14, 15 with [ptrack extension](https://github.com/postgrespro/ptrack) +* Postgres Pro Standard 11, 12, 13, 14 +* Postgres Pro Enterprise 11, 12, 13, 14 ## Limitations @@ -74,62 +74,62 @@ Installers are available in release **assets**. [Latests](https://github.com/pos #DEB Ubuntu|Debian Packages sudo sh -c 'echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup/deb/ $(lsb_release -cs) main-$(lsb_release -cs)" > /etc/apt/sources.list.d/pg_probackup.list' sudo wget -O - https://repo.postgrespro.ru/pg_probackup/keys/GPG-KEY-PG_PROBACKUP | sudo apt-key add - && sudo apt-get update -sudo apt-get install pg-probackup-{14,13,12,11,10,9.6} -sudo apt-get install pg-probackup-{14,13,12,11,10,9.6}-dbg +sudo apt-get install pg-probackup-{15,14,13,12,11,10,9.6} +sudo apt-get install pg-probackup-{15,14,13,12,11,10,9.6}-dbg #DEB-SRC Packages sudo sh -c 'echo "deb-src [arch=amd64] https://repo.postgrespro.ru/pg_probackup/deb/ $(lsb_release -cs) main-$(lsb_release -cs)" >>\ /etc/apt/sources.list.d/pg_probackup.list' && sudo apt-get update -sudo apt-get source pg-probackup-{14,13,12,11,10,9.6} +sudo apt-get source pg-probackup-{15,14,13,12,11,10,9.6} #DEB Astra Linix Orel sudo sh -c 'echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup/deb/ stretch main-stretch" > /etc/apt/sources.list.d/pg_probackup.list' sudo wget -O - https://repo.postgrespro.ru/pg_probackup/keys/GPG-KEY-PG_PROBACKUP | sudo apt-key add - && sudo apt-get update -sudo apt-get install pg-probackup-{14,13,12,11,10,9.6}{-dbg,} +sudo apt-get install pg-probackup-{15,14,13,12,11,10,9.6}{-dbg,} #RPM Centos Packages rpm -ivh https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-centos.noarch.rpm -yum install pg_probackup-{14,13,12,11,10,9.6} -yum install pg_probackup-{14,13,12,11,10,9.6}-debuginfo +yum install pg_probackup-{15,14,13,12,11,10,9.6} +yum install pg_probackup-{15,14,13,12,11,10,9.6}-debuginfo #RPM RHEL Packages rpm -ivh https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-rhel.noarch.rpm -yum install pg_probackup-{14,13,12,11,10,9.6} -yum install pg_probackup-{14,13,12,11,10,9.6}-debuginfo +yum install pg_probackup-{15,14,13,12,11,10,9.6} +yum install pg_probackup-{15,14,13,12,11,10,9.6}-debuginfo #RPM Oracle Linux Packages rpm -ivh https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-oraclelinux.noarch.rpm -yum install pg_probackup-{14,13,12,11,10,9.6} -yum install pg_probackup-{14,13,12,11,10,9.6}-debuginfo +yum install pg_probackup-{15,14,13,12,11,10,9.6} +yum install pg_probackup-{15,14,13,12,11,10,9.6}-debuginfo #SRPM Centos|RHEL|OracleLinux Packages -yumdownloader --source pg_probackup-{14,13,12,11,10,9.6} +yumdownloader --source pg_probackup-{15,14,13,12,11,10,9.6} #RPM SUSE|SLES Packages zypper install --allow-unsigned-rpm -y https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-suse.noarch.rpm -zypper --gpg-auto-import-keys install -y pg_probackup-{14,13,12,11,10,9.6} -zypper install pg_probackup-{14,13,12,11,10,9.6}-debuginfo +zypper --gpg-auto-import-keys install -y pg_probackup-{15,14,13,12,11,10,9.6} +zypper install pg_probackup-{15,14,13,12,11,10,9.6}-debuginfo #SRPM SUSE|SLES Packages -zypper si pg_probackup-{14,13,12,11,10,9.6} +zypper si pg_probackup-{15,14,13,12,11,10,9.6} #RPM ALT Linux 7 sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-p7 x86_64 vanilla" > /etc/apt/sources.list.d/pg_probackup.list' sudo apt-get update -sudo apt-get install pg_probackup-{14,13,12,11,10,9.6} -sudo apt-get install pg_probackup-{14,13,12,11,10,9.6}-debuginfo +sudo apt-get install pg_probackup-{15,14,13,12,11,10,9.6} +sudo apt-get install pg_probackup-{15,14,13,12,11,10,9.6}-debuginfo #RPM ALT Linux 8 sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-p8 x86_64 vanilla" > /etc/apt/sources.list.d/pg_probackup.list' sudo apt-get update -sudo apt-get install pg_probackup-{14,13,12,11,10,9.6} -sudo apt-get install pg_probackup-{14,13,12,11,10,9.6}-debuginfo +sudo apt-get install pg_probackup-{15,14,13,12,11,10,9.6} +sudo apt-get install pg_probackup-{15,14,13,12,11,10,9.6}-debuginfo #RPM ALT Linux 9 sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-p9 x86_64 vanilla" > /etc/apt/sources.list.d/pg_probackup.list' sudo apt-get update -sudo apt-get install pg_probackup-{14,13,12,11,10,9.6} -sudo apt-get install pg_probackup-{14,13,12,11,10,9.6}-debuginfo +sudo apt-get install pg_probackup-{15,14,13,12,11,10,9.6} +sudo apt-get install pg_probackup-{15,14,13,12,11,10,9.6}-debuginfo ``` #### pg_probackup for PostgresPro Standard and Enterprise @@ -137,8 +137,8 @@ sudo apt-get install pg_probackup-{14,13,12,11,10,9.6}-debuginfo #DEB Ubuntu|Debian Packages sudo sh -c 'echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup-forks/deb/ $(lsb_release -cs) main-$(lsb_release -cs)" > /etc/apt/sources.list.d/pg_probackup-forks.list' sudo wget -O - https://repo.postgrespro.ru/pg_probackup-forks/keys/GPG-KEY-PG_PROBACKUP | sudo apt-key add - && sudo apt-get update -sudo apt-get install pg-probackup-{std,ent}-{13,12,11,10,9.6} -sudo apt-get install pg-probackup-{std,ent}-{13,12,11,10,9.6}-dbg +sudo apt-get install pg-probackup-{std,ent}-{14,13,12,11,10,9.6} +sudo apt-get install pg-probackup-{std,ent}-{14,13,12,11,10,9.6}-dbg #DEB Astra Linix Orel sudo sh -c 'echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup-forks/deb/ stretch main-stretch" > /etc/apt/sources.list.d/pg_probackup.list' @@ -148,35 +148,35 @@ sudo apt-get install pg-probackup-{std,ent}-{12,11,10,9.6}{-dbg,} #RPM Centos Packages rpm -ivh https://repo.postgrespro.ru/pg_probackup-forks/keys/pg_probackup-repo-forks-centos.noarch.rpm -yum install pg_probackup-{std,ent}-{13,12,11,10,9.6} -yum install pg_probackup-{std,ent}-{13,12,11,10,9.6}-debuginfo +yum install pg_probackup-{std,ent}-{14,13,12,11,10,9.6} +yum install pg_probackup-{std,ent}-{14,13,12,11,10,9.6}-debuginfo #RPM RHEL Packages rpm -ivh https://repo.postgrespro.ru/pg_probackup-forks/keys/pg_probackup-repo-forks-rhel.noarch.rpm -yum install pg_probackup-{std,ent}-{13,12,11,10,9.6} -yum install pg_probackup-{std,ent}-{13,12,11,10,9.6}-debuginfo +yum install pg_probackup-{std,ent}-{14,13,12,11,10,9.6} +yum install pg_probackup-{std,ent}-{14,13,12,11,10,9.6}-debuginfo #RPM Oracle Linux Packages rpm -ivh https://repo.postgrespro.ru/pg_probackup-forks/keys/pg_probackup-repo-forks-oraclelinux.noarch.rpm -yum install pg_probackup-{std,ent}-{13,12,11,10,9.6} -yum install pg_probackup-{std,ent}-{13,12,11,10,9.6}-debuginfo +yum install pg_probackup-{std,ent}-{14,13,12,11,10,9.6} +yum install pg_probackup-{std,ent}-{14,13,12,11,10,9.6}-debuginfo #RPM ALT Linux 7 sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup-forks/rpm/latest/altlinux-p7 x86_64 forks" > /etc/apt/sources.list.d/pg_probackup_forks.list' sudo apt-get update -sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10,9.6} -sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10,9.6}-debuginfo +sudo apt-get install pg_probackup-{std,ent}-{14,13,12,11,10,9.6} +sudo apt-get install pg_probackup-{std,ent}-{14,13,12,11,10,9.6}-debuginfo #RPM ALT Linux 8 sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup-forks/rpm/latest/altlinux-p8 x86_64 forks" > /etc/apt/sources.list.d/pg_probackup_forks.list' sudo apt-get update -sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10,9.6} -sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10,9.6}-debuginfo +sudo apt-get install pg_probackup-{std,ent}-{14,13,12,11,10,9.6} +sudo apt-get install pg_probackup-{std,ent}-{14,13,12,11,10,9.6}-debuginfo #RPM ALT Linux 9 sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup-forks/rpm/latest/altlinux-p9 x86_64 forks" > /etc/apt/sources.list.d/pg_probackup_forks.list' && sudo apt-get update -sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10,9.6} -sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10,9.6}-debuginfo +sudo apt-get install pg_probackup-{std,ent}-{14,13,12,11,10,9.6} +sudo apt-get install pg_probackup-{std,ent}-{14,13,12,11,10,9.6}-debuginfo ``` Once you have `pg_probackup` installed, complete [the setup](https://postgrespro.github.io/pg_probackup/#pbk-install-and-setup). diff --git a/doc/pgprobackup.xml b/doc/pgprobackup.xml index fc2a341e8..6babf00f7 100644 --- a/doc/pgprobackup.xml +++ b/doc/pgprobackup.xml @@ -653,7 +653,7 @@ GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_checkpoint() TO backup; COMMIT; - For PostgreSQL 10 or higher: + For PostgreSQL 10: BEGIN; @@ -672,6 +672,27 @@ GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_checkpoint() TO backup; COMMIT; + + + For PostgreSQL 15 or higher: + + +BEGIN; +CREATE ROLE backup WITH LOGIN; +GRANT USAGE ON SCHEMA pg_catalog TO backup; +GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; +GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; +GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; +GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_start(text, boolean) TO backup; +GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_stop(boolean) TO backup; +GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; +GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; +GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; +GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO backup; +GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; +GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; +GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_checkpoint() TO backup; +COMMIT; In the diff --git a/src/backup.c b/src/backup.c index 0fa8ee9fd..31289978d 100644 --- a/src/backup.c +++ b/src/backup.c @@ -1056,14 +1056,22 @@ pg_start_backup(const char *label, bool smooth, pgBackup *backup, uint32 lsn_lo; params[0] = label; +#if PG_VERSION_NUM >= 150000 elog(INFO, "wait for pg_backup_start()"); +#else + elog(INFO, "wait for pg_start_backup()"); +#endif /* 2nd argument is 'fast'*/ params[1] = smooth ? "false" : "true"; res = pgut_execute(conn, - "SELECT pg_catalog.pg_backup_start($1, $2)", - 2, - params); +#if PG_VERSION_NUM >= 150000 + "SELECT pg_catalog.pg_backup_start($1, $2)", +#else + "SELECT pg_catalog.pg_start_backup($1, $2, false)", +#endif + 2, + params); /* * Set flag that pg_start_backup() was called. If an error will happen it @@ -1612,7 +1620,7 @@ pg_stop_backup_send(PGconn *conn, int server_version, bool is_started_on_replica "SELECT" " pg_catalog.txid_snapshot_xmax(pg_catalog.txid_current_snapshot())," " current_timestamp(0)::timestamptz," - " pg_catalog.pg_backup_stop() as lsn", + " pg_catalog.pg_stop_backup() as lsn", stop_backup_on_master_query[] = "SELECT" " pg_catalog.txid_snapshot_xmax(pg_catalog.txid_current_snapshot())," @@ -1620,7 +1628,7 @@ pg_stop_backup_send(PGconn *conn, int server_version, bool is_started_on_replica " lsn," " labelfile," " spcmapfile" - " FROM pg_catalog.pg_backup_stop(false)", + " FROM pg_catalog.pg_stop_backup(false, false)", stop_backup_on_master_before10_query[] = "SELECT" " pg_catalog.txid_snapshot_xmax(pg_catalog.txid_current_snapshot())," @@ -1628,7 +1636,15 @@ pg_stop_backup_send(PGconn *conn, int server_version, bool is_started_on_replica " lsn," " labelfile," " spcmapfile" - " FROM pg_catalog.pg_backup_stop()", + " FROM pg_catalog.pg_stop_backup(false)", + stop_backup_on_master_after15_query[] = + "SELECT" + " pg_catalog.txid_snapshot_xmax(pg_catalog.txid_current_snapshot())," + " current_timestamp(0)::timestamptz," + " lsn," + " labelfile," + " spcmapfile" + " FROM pg_catalog.pg_backup_stop(false)", /* * In case of backup from replica >= 9.6 we do not trust minRecPoint * and stop_backup LSN, so we use latest replayed LSN as STOP LSN. @@ -1640,7 +1656,7 @@ pg_stop_backup_send(PGconn *conn, int server_version, bool is_started_on_replica " pg_catalog.pg_last_wal_replay_lsn()," " labelfile," " spcmapfile" - " FROM pg_catalog.pg_backup_stop(false)", + " FROM pg_catalog.pg_stop_backup(false, false)", stop_backup_on_replica_before10_query[] = "SELECT" " pg_catalog.txid_snapshot_xmax(pg_catalog.txid_current_snapshot())," @@ -1648,19 +1664,33 @@ pg_stop_backup_send(PGconn *conn, int server_version, bool is_started_on_replica " pg_catalog.pg_last_xlog_replay_location()," " labelfile," " spcmapfile" - " FROM pg_catalog.pg_backup_stop()"; + " FROM pg_catalog.pg_stop_backup(false)", + stop_backup_on_replica_after15_query[] = + "SELECT" + " pg_catalog.txid_snapshot_xmax(pg_catalog.txid_current_snapshot())," + " current_timestamp(0)::timestamptz," + " pg_catalog.pg_last_wal_replay_lsn()," + " labelfile," + " spcmapfile" + " FROM pg_catalog.pg_backup_stop(false)"; const char * const stop_backup_query = is_exclusive ? stop_exlusive_backup_query : - server_version >= 100000 ? + server_version >= 150000 ? (is_started_on_replica ? - stop_backup_on_replica_query : - stop_backup_on_master_query + stop_backup_on_replica_after15_query : + stop_backup_on_master_after15_query ) : - (is_started_on_replica ? - stop_backup_on_replica_before10_query : - stop_backup_on_master_before10_query + (server_version >= 100000 ? + (is_started_on_replica ? + stop_backup_on_replica_query : + stop_backup_on_master_query + ) : + (is_started_on_replica ? + stop_backup_on_replica_before10_query : + stop_backup_on_master_before10_query + ) ); bool sent = false; @@ -1676,7 +1706,11 @@ pg_stop_backup_send(PGconn *conn, int server_version, bool is_started_on_replica */ sent = pgut_send(conn, stop_backup_query, 0, NULL, WARNING); if (!sent) +#if PG_VERSION_NUM >= 150000 elog(ERROR, "Failed to send pg_backup_stop query"); +#else + elog(ERROR, "Failed to send pg_stop_backup query"); +#endif /* After we have sent pg_stop_backup, we don't need this callback anymore */ pgut_atexit_pop(backup_stopbackup_callback, &stop_callback_params); @@ -1722,7 +1756,11 @@ pg_stop_backup_consume(PGconn *conn, int server_version, if (interrupted) { pgut_cancel(conn); +#if PG_VERSION_NUM >= 150000 elog(ERROR, "interrupted during waiting for pg_backup_stop"); +#else + elog(ERROR, "interrupted during waiting for pg_stop_backup"); +#endif } if (pg_stop_backup_timeout == 1) @@ -1735,7 +1773,11 @@ pg_stop_backup_consume(PGconn *conn, int server_version, if (pg_stop_backup_timeout > timeout) { pgut_cancel(conn); +#if PG_VERSION_NUM >= 150000 elog(ERROR, "pg_backup_stop doesn't answer in %d seconds, cancel it", timeout); +#else + elog(ERROR, "pg_stop_backup doesn't answer in %d seconds, cancel it", timeout); +#endif } } else @@ -1747,7 +1789,11 @@ pg_stop_backup_consume(PGconn *conn, int server_version, /* Check successfull execution of pg_stop_backup() */ if (!query_result) +#if PG_VERSION_NUM >= 150000 elog(ERROR, "pg_backup_stop() failed"); +#else + elog(ERROR, "pg_stop_backup() failed"); +#endif else { switch (PQresultStatus(query_result)) diff --git a/src/parsexlog.c b/src/parsexlog.c index 5cf760312..f12aae904 100644 --- a/src/parsexlog.c +++ b/src/parsexlog.c @@ -1773,7 +1773,11 @@ extractPageInfo(XLogReaderState *record, XLogReaderData *reader_data, /* Is this a special record type that I recognize? */ if (rmid == RM_DBASE_ID +#if PG_VERSION_NUM >= 150000 && (rminfo == XLOG_DBASE_CREATE_WAL_LOG || rminfo == XLOG_DBASE_CREATE_FILE_COPY)) +#else + && rminfo == XLOG_DBASE_CREATE) +#endif { /* * New databases can be safely ignored. They would be completely @@ -1827,13 +1831,21 @@ extractPageInfo(XLogReaderState *record, XLogReaderData *reader_data, RmgrNames[rmid], info); } +#if PG_VERSION_NUM >= 150000 for (block_id = 0; block_id <= record->record->max_block_id; block_id++) +#else + for (block_id = 0; block_id <= record->max_block_id; block_id++) +#endif { RelFileNode rnode; ForkNumber forknum; BlockNumber blkno; +#if PG_VERSION_NUM >= 150000 if (!XLogRecGetBlockTagExtended(record, block_id, &rnode, &forknum, &blkno, NULL)) +#else + if (!XLogRecGetBlockTag(record, block_id, &rnode, &forknum, &blkno)) +#endif continue; /* We only care about the main fork; others are copied as is */ diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 1885a191e..533b05d58 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -50,6 +50,12 @@ #include #endif +#if PG_VERSION_NUM >= 150000 +// _() is explicitly undefined in libpq-int.h +// https://github.com/postgres/postgres/commit/28ec316787674dd74d00b296724a009b6edc2fb0 +#define _(s) gettext(s) +#endif + /* Wrap the code that we're going to delete after refactoring in this define*/ #define REFACTORE_ME From 497751c0b63b8cfdf825beeec26f1d66a902be6e Mon Sep 17 00:00:00 2001 From: Ivan Lazarev Date: Thu, 15 Sep 2022 05:47:36 +0300 Subject: [PATCH 320/525] [PBCKP-236] draft, tests.CompatibilityTest.test_catchup_with_different_remote_major_pg fixes --- tests/compatibility.py | 50 +++++++++++++++++++----------------------- 1 file changed, 22 insertions(+), 28 deletions(-) diff --git a/tests/compatibility.py b/tests/compatibility.py index 262b940ef..0562b441c 100644 --- a/tests/compatibility.py +++ b/tests/compatibility.py @@ -14,29 +14,30 @@ def setUp(self): self.fname = self.id().split('.')[3] # @unittest.expectedFailure - # @unittest.skip("skip") + @unittest.skip("skip") def test_catchup_with_different_remote_major_pg(self): - "Decription in jira issue PBCKP-236" #TODO REVIEW XXX explain the test + """ + Decription in jira issue PBCKP-236 + This test requires builds both PGPROEE11 and PGPROEE9_6 + + prerequisites: + - git tag for PBCKP 2.5.1 + - master probackup build should be inside PGPROEE11 + - agent probackup build is inside PGPROEE9_6 + + calling probackup PGPROEE9_6 agent from PGPROEE11 probackup master for DELTA backup causes the PBCKP-236 problem + + please correct path for agent's pg_path_ee_9_6 = '/home/avaness/postgres/postgres.build.ee.9.6/bin/' + """ + self.verbose = True self.remote = True - pg_config = os.environ['PG_CONFIG'] - pg_path_ee_9_6 = '/home/avaness/postgres/postgres.build.9.6/bin/' - pg_config_ee_9_6 = pg_path_ee_9_6 + 'pg_config' - probackup_path_ee_9_6 = pg_path_ee_9_6 + 'pg_probackup' - pg_path_ee_11 = '/home/avaness/postgres/postgres.build.11/bin/' - pg_config_ee_11 = pg_path_ee_11 + 'pg_config' - probackup_path_ee_11 = pg_path_ee_11 + 'pg_probackup' - - os.environ['PG_CONFIG'] = pg_config_ee_11 - self.probackup_path = probackup_path_ee_11 - # os.environ['PG_CONFIG'] = pg_config_ee_9_6 - # self.probackup_path = probackup_path_ee_9_6 - - # backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + # please use your own local path + pg_path_ee_9_6 = '/home/avaness/postgres/postgres.build.ee.9.6/bin/' + src_pg = self.make_simple_node( base_dir=os.path.join(module_name, self.fname, 'src'), set_replication=True, - # initdb_params=['--data-checksums'] ) src_pg.slow_start() src_pg.safe_psql( @@ -44,20 +45,12 @@ def test_catchup_with_different_remote_major_pg(self): "CREATE TABLE ultimate_question AS SELECT 42 AS answer") # do full catchup - os.environ['PG_CONFIG'] = pg_config_ee_11 - self.probackup_path = probackup_path_ee_11 - dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) - # dst_pg = self.make_simple_node( - # base_dir=os.path.join(module_name, self.fname, 'dst'), - # set_replication=True, - # # initdb_params=['--data-checksums'] - # ) self.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, destination_node = dst_pg, - options=['-d', 'postgres', '-p', str(src_pg.port), '--stream']#, '--remote-path=' + pg_path_ee_9_6] + options=['-d', 'postgres', '-p', str(src_pg.port), '--stream'] ) dst_options = {} @@ -70,12 +63,13 @@ def test_catchup_with_different_remote_major_pg(self): "postgres", "CREATE TABLE ultimate_question2 AS SELECT 42 AS answer") - # do delta catchup - #TODO REVIEW XXX try to apply only one catchup (FULL) for test failure + # do delta catchup with remote pg_probackup agent with another postgres major version + # this DELTA backup should fail without PBCKP-236 patch. self.catchup_node( backup_mode = 'DELTA', source_pgdata = src_pg.data_dir, destination_node = dst_pg, + # here's substitution of --remoge-path pg_probackup agent compiled with another postgres version options=['-d', 'postgres', '-p', str(src_pg.port), '--stream', '--remote-path=' + pg_path_ee_9_6] ) From 044c0376c849608cb79a0e1d53863a619c4799c5 Mon Sep 17 00:00:00 2001 From: Sofia Kopikova Date: Thu, 15 Sep 2022 15:45:47 +0300 Subject: [PATCH 321/525] [PBCKP-125] changes in function call CreateWalDirectoryMethod for 15 version --- src/stream.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/stream.c b/src/stream.c index 7735f35fa..f7bbeae5a 100644 --- a/src/stream.c +++ b/src/stream.c @@ -274,18 +274,20 @@ StreamLog(void *arg) ctl.synchronous = false; ctl.mark_done = false; +#if PG_VERSION_NUM >= 100000 #if PG_VERSION_NUM >= 150000 ctl.walmethod = CreateWalDirectoryMethod( stream_arg->basedir, PG_COMPRESSION_NONE, 0, false); -#elif PG_VERSION_NUM >= 100000 +#else /* PG_VERSION_NUM >= 100000 && PG_VERSION_NUM < 150000 */ ctl.walmethod = CreateWalDirectoryMethod( stream_arg->basedir, // (instance_config.compress_alg == NONE_COMPRESS) ? 0 : instance_config.compress_level, 0, false); +#endif /* PG_VERSION_NUM >= 150000 */ ctl.replication_slot = replication_slot; ctl.stop_socket = PGINVALID_SOCKET; ctl.do_sync = false; /* We sync all files at the end of backup */ From eefd88768a8cb2b6350d69ca47cc28a5e8beb160 Mon Sep 17 00:00:00 2001 From: Ivan Lazarev Date: Fri, 16 Sep 2022 01:06:52 +0300 Subject: [PATCH 322/525] [PBCKP-236] draft, solution without couple of unapplied shortenings --- src/pg_probackup.h | 5 ++- src/utils/file.c | 16 ++++--- src/utils/remote.c | 103 ++++++++++++++++++++++++++++++++++++++------- 3 files changed, 99 insertions(+), 25 deletions(-) diff --git a/src/pg_probackup.h b/src/pg_probackup.h index e68afc571..13dfe1989 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -882,8 +882,9 @@ extern DestDirIncrCompatibility check_incremental_compatibility(const char *pgda IncrRestoreMode incremental_mode); /* in remote.c */ -extern void check_remote_agent_compatibility(int agent_version, char *compatibility_str); -extern size_t prepare_remote_agent_compatibility_str(char* compatibility_buf, size_t buf_size); +extern void check_remote_agent_compatibility(int agent_version, + char *compatibility_str, size_t compatibility_str_max_size); +extern size_t prepare_compatibility_str(char* compatibility_buf, size_t compatibility_buf_size); /* in merge.c */ extern void do_merge(InstanceState *instanceState, time_t backup_id, bool no_validate, bool no_sync); diff --git a/src/utils/file.c b/src/utils/file.c index 65d0699c7..b0dc39ae9 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -281,7 +281,8 @@ fio_get_agent_version(int* protocol, char* payload_buf, size_t payload_buf_size) IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); if (hdr.size > payload_buf_size) { - elog(ERROR, "Bad protocol, insufficient payload_buf_size=%zu", payload_buf_size); + //TODO REVIEW XXX %zu is C99 but not ANSI S standard, should we cast to unsigned long? + elog(ERROR, "Corrupted remote compatibility protocol: insufficient payload_buf_size=%zu", payload_buf_size); } *protocol = hdr.arg; @@ -3321,17 +3322,18 @@ fio_communicate(int in, int out) IO_CHECK(fio_write_all(out, buf, hdr.size), hdr.size); break; case FIO_AGENT_VERSION: - hdr.arg = AGENT_PROTOCOL_VERSION; - IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); - //TODO REVIEW XXX is it allowed by ANSI C to declare new scope inside??? { - size_t payload_size = prepare_remote_agent_compatibility_str(buf, buf_size); + size_t payload_size = prepare_compatibility_str(buf, buf_size); + + hdr.arg = AGENT_PROTOCOL_VERSION; + hdr.size = payload_size; + + IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); IO_CHECK(fio_write_all(out, buf, payload_size), payload_size); //TODO REVIEW XXX make INFO to LOG or VERBOSE elog(INFO, "TODO REVIEW XXX sent agent compatibility\n %s", buf); + break; } - assert(false); - break; case FIO_STAT: /* Get information about file with specified path */ hdr.size = sizeof(st); rc = hdr.arg ? stat(buf, &st) : lstat(buf, &st); diff --git a/src/utils/remote.c b/src/utils/remote.c index e4963b62a..af3e460c0 100644 --- a/src/utils/remote.c +++ b/src/utils/remote.c @@ -118,7 +118,7 @@ bool launch_agent(void) int errfd[2]; int agent_version; //TODO REVIEW XXX review buf_size - size_t payload_buf_size = 1024 * 8; + int payload_buf_size = 1024 * 8; char payload_buf[payload_buf_size]; ssh_argc = 0; @@ -244,29 +244,83 @@ bool launch_agent(void) /* Make sure that remote agent has the same version, fork and other features to be binary compatible */ fio_get_agent_version(&agent_version, payload_buf, payload_buf_size); - check_remote_agent_compatibility(0, payload_buf); + check_remote_agent_compatibility(agent_version, payload_buf, payload_buf_size); return true; } -//TODO REVIEW XXX review macro -#define STR(macro) #macro -size_t prepare_remote_agent_compatibility_str(char* compatibility_buf, size_t buf_size) +#define COMPATIBILITY_VAL(macro) #macro, macro +#define COMPATIBILITY_STR(macro) #macro +#define COMPATIBILITY_VAL_STR(macro) #macro, COMPATIBILITY_STR(macro) + +#define COMPATIBILITY_VAL_SEPARATOR "=" +#define COMPATIBILITY_LINE_SEPARATOR "\n" + +static char* compatibility_params[] = { + COMPATIBILITY_VAL(PG_MAJORVERSION), + //TODO remove? + //TODO doesn't work macro name check for ints!!!! + COMPATIBILITY_VAL_STR(SIZEOF_VOID_P), + //TODO REVIEW XXX can use edition.h/extract_pgpro_edition() +#ifdef PGPRO_EDN + //TODO add vanilla + //TODO make "1c" -> "vanilla" + COMPATIBILITY_VAL(PGPRO_EDN), +#endif +}; + +/* + * Compose compatibility string to be sent by pg_probackup agent + * through ssh and to be verified by pg_probackup peer. + * Compatibility string contains postgres essential vars as strings + * in format "var_name" + COMPATIBILITY_VAL_SEPARATOR + "var_value" + COMPATIBILITY_LINE_SEPARATOR + */ +size_t prepare_compatibility_str(char* compatibility_buf, size_t compatibility_buf_size) { - size_t payload_size = snprintf(compatibility_buf, buf_size, -// "%s\n%s\n%s\n%s\n", - "%s\n%s\n", - STR(PG_MAJORVERSION), PG_MAJORVERSION); -// STR(PGPRO_EDN), PGPRO_EDN); - if (payload_size >= buf_size) + char tmp_buf[1024]; + int size_inc = 0; + size_t result_size = 1; + size_t compatibility_params_array_size = sizeof compatibility_params / sizeof compatibility_params[0];; + + *compatibility_buf = '\0'; + Assert(compatibility_params_array_size % 2 == 0); + + //TODO !!!! + for (int i = 0; i < compatibility_params_array_size; i+=2) { - elog(ERROR, "TODO REVIEW XXX too bad message buffer exhaust"); + size_inc = snprintf(compatibility_buf + size_inc, compatibility_buf_size, + "%s" COMPATIBILITY_VAL_SEPARATOR "%s" COMPATIBILITY_LINE_SEPARATOR, + compatibility_params[i], compatibility_params[i+1]); + +// size_inc = snprintf(tmp_buf, sizeof tmp_buf, +// "%s" COMPATIBILITY_VAL_SEPARATOR "%s" COMPATIBILITY_LINE_SEPARATOR, +// compatibility_params[i], compatibility_params[i+1]); + if (size_inc >= sizeof tmp_buf) + { + //TODO make Assert + elog(ERROR, "Compatibility params from agent doesn't fit to %zu chars, %s=%s", + sizeof tmp_buf - 1, compatibility_params[i], compatibility_params[i+1] ); + } + + result_size += size_inc; + if (result_size > compatibility_buf_size) + { + //TODO make Assert + elog(ERROR, "Can't fit compatibility string size %zu to buffer size %zu:\n%s\n%s", + result_size, compatibility_buf_size, compatibility_buf, tmp_buf); + } + strcat(compatibility_buf, tmp_buf); } - return payload_size + 1; + return result_size; } -void check_remote_agent_compatibility(int agent_version, char *compatibility_str) +/* + * Check incoming remote agent's compatibility params for equality to local ones. + */ +void check_remote_agent_compatibility(int agent_version, char *compatibility_str, size_t compatibility_str_max_size) { + elog(LOG, "Agent version=%d", agent_version); + if (agent_version != AGENT_PROTOCOL_VERSION) { char agent_version_str[1024]; @@ -279,6 +333,23 @@ void check_remote_agent_compatibility(int agent_version, char *compatibility_str "consider to upgrade pg_probackup binary", agent_version_str, AGENT_PROTOCOL_VERSION_STR); } - assert(false); - elog(ERROR, " check_remote_agent_compatibility() not implemented"); + + if (strnlen(compatibility_str, compatibility_str_max_size) == compatibility_str_max_size) + { + elog(ERROR, "Corrupted remote compatibility protocol: compatibility string has no terminating \\0"); + } + + elog(LOG, "Agent compatibility params: '%s'", compatibility_str); + + /* checking compatibility params */ + { + char *buf[compatibility_str_max_size]; + + prepare_compatibility_str(buf, sizeof buf); + if(!strcmp(compatibility_str, buf)) + { + elog(ERROR, "Incompatible agent params, expected %s", buf); + } + } + } From 0604cce21de69a3e3b0ea6a6634a6dac53ae39ea Mon Sep 17 00:00:00 2001 From: Ivan Lazarev Date: Fri, 16 Sep 2022 01:55:25 +0300 Subject: [PATCH 323/525] [PBCKP-236] draft, solution without macros cleanup --- src/utils/file.c | 4 +--- src/utils/remote.c | 48 +++++++++++++--------------------------------- 2 files changed, 14 insertions(+), 38 deletions(-) diff --git a/src/utils/file.c b/src/utils/file.c index b0dc39ae9..fa0983947 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -281,7 +281,7 @@ fio_get_agent_version(int* protocol, char* payload_buf, size_t payload_buf_size) IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); if (hdr.size > payload_buf_size) { - //TODO REVIEW XXX %zu is C99 but not ANSI S standard, should we cast to unsigned long? + //TODO REVIEW XXX %zu is C99 but not ANSI S compatible, should we use ints, %zu is also applied at data.c:501 data.c:1638?? elog(ERROR, "Corrupted remote compatibility protocol: insufficient payload_buf_size=%zu", payload_buf_size); } @@ -3330,8 +3330,6 @@ fio_communicate(int in, int out) IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); IO_CHECK(fio_write_all(out, buf, payload_size), payload_size); - //TODO REVIEW XXX make INFO to LOG or VERBOSE - elog(INFO, "TODO REVIEW XXX sent agent compatibility\n %s", buf); break; } case FIO_STAT: /* Get information about file with specified path */ diff --git a/src/utils/remote.c b/src/utils/remote.c index af3e460c0..2babe79ae 100644 --- a/src/utils/remote.c +++ b/src/utils/remote.c @@ -277,41 +277,20 @@ static char* compatibility_params[] = { */ size_t prepare_compatibility_str(char* compatibility_buf, size_t compatibility_buf_size) { - char tmp_buf[1024]; - int size_inc = 0; - size_t result_size = 1; + size_t result_size = 0; size_t compatibility_params_array_size = sizeof compatibility_params / sizeof compatibility_params[0];; *compatibility_buf = '\0'; Assert(compatibility_params_array_size % 2 == 0); - //TODO !!!! for (int i = 0; i < compatibility_params_array_size; i+=2) { - size_inc = snprintf(compatibility_buf + size_inc, compatibility_buf_size, - "%s" COMPATIBILITY_VAL_SEPARATOR "%s" COMPATIBILITY_LINE_SEPARATOR, - compatibility_params[i], compatibility_params[i+1]); - -// size_inc = snprintf(tmp_buf, sizeof tmp_buf, -// "%s" COMPATIBILITY_VAL_SEPARATOR "%s" COMPATIBILITY_LINE_SEPARATOR, -// compatibility_params[i], compatibility_params[i+1]); - if (size_inc >= sizeof tmp_buf) - { - //TODO make Assert - elog(ERROR, "Compatibility params from agent doesn't fit to %zu chars, %s=%s", - sizeof tmp_buf - 1, compatibility_params[i], compatibility_params[i+1] ); - } - - result_size += size_inc; - if (result_size > compatibility_buf_size) - { - //TODO make Assert - elog(ERROR, "Can't fit compatibility string size %zu to buffer size %zu:\n%s\n%s", - result_size, compatibility_buf_size, compatibility_buf, tmp_buf); - } - strcat(compatibility_buf, tmp_buf); + result_size += snprintf(compatibility_buf + result_size, compatibility_buf_size - result_size, + "%s" COMPATIBILITY_VAL_SEPARATOR "%s" COMPATIBILITY_LINE_SEPARATOR, + compatibility_params[i], compatibility_params[i+1]); + Assert(result_size < compatibility_buf_size); } - return result_size; + return result_size + 1; } /* @@ -319,7 +298,7 @@ size_t prepare_compatibility_str(char* compatibility_buf, size_t compatibility_b */ void check_remote_agent_compatibility(int agent_version, char *compatibility_str, size_t compatibility_str_max_size) { - elog(LOG, "Agent version=%d", agent_version); + elog(LOG, "Agent version=%d\n", agent_version); if (agent_version != AGENT_PROTOCOL_VERSION) { @@ -331,25 +310,24 @@ void check_remote_agent_compatibility(int agent_version, char *compatibility_str elog(ERROR, "Remote agent protocol version %s does not match local program protocol version %s, " "consider to upgrade pg_probackup binary", - agent_version_str, AGENT_PROTOCOL_VERSION_STR); + agent_version_str, AGENT_PROTOCOL_VERSION_STR); } + /* checking compatibility params */ if (strnlen(compatibility_str, compatibility_str_max_size) == compatibility_str_max_size) { elog(ERROR, "Corrupted remote compatibility protocol: compatibility string has no terminating \\0"); } - elog(LOG, "Agent compatibility params: '%s'", compatibility_str); + elog(LOG, "Agent compatibility params:\n%s", compatibility_str); - /* checking compatibility params */ { - char *buf[compatibility_str_max_size]; + char buf[compatibility_str_max_size]; prepare_compatibility_str(buf, sizeof buf); - if(!strcmp(compatibility_str, buf)) + if(strcmp(compatibility_str, buf)) { - elog(ERROR, "Incompatible agent params, expected %s", buf); + elog(ERROR, "Incompatible remote agent params, expected:\n%s", buf); } } - } From f61be78e783a5948750e5fc77f31eeab54a82ae2 Mon Sep 17 00:00:00 2001 From: Ivan Lazarev Date: Fri, 16 Sep 2022 04:39:18 +0300 Subject: [PATCH 324/525] [PBCKP-236] working solution cleaned up --- src/utils/remote.c | 31 +++++++++++++++---------------- 1 file changed, 15 insertions(+), 16 deletions(-) diff --git a/src/utils/remote.c b/src/utils/remote.c index 2babe79ae..79456d9fb 100644 --- a/src/utils/remote.c +++ b/src/utils/remote.c @@ -249,26 +249,13 @@ bool launch_agent(void) return true; } -#define COMPATIBILITY_VAL(macro) #macro, macro -#define COMPATIBILITY_STR(macro) #macro -#define COMPATIBILITY_VAL_STR(macro) #macro, COMPATIBILITY_STR(macro) +#define COMPATIBILITY_VAL_STR(macro) #macro, macro +#define COMPATIBILITY_VAL_INT_HELPER(macro, helper_buf, buf_size) (snprintf(helper_buf, buf_size, "%d", macro), helper_buf) +#define COMPATIBILITY_VAL_INT(macro, helper_buf, buf_size) #macro, COMPATIBILITY_VAL_INT_HELPER(macro, helper_buf, buf_size) #define COMPATIBILITY_VAL_SEPARATOR "=" #define COMPATIBILITY_LINE_SEPARATOR "\n" -static char* compatibility_params[] = { - COMPATIBILITY_VAL(PG_MAJORVERSION), - //TODO remove? - //TODO doesn't work macro name check for ints!!!! - COMPATIBILITY_VAL_STR(SIZEOF_VOID_P), - //TODO REVIEW XXX can use edition.h/extract_pgpro_edition() -#ifdef PGPRO_EDN - //TODO add vanilla - //TODO make "1c" -> "vanilla" - COMPATIBILITY_VAL(PGPRO_EDN), -#endif -}; - /* * Compose compatibility string to be sent by pg_probackup agent * through ssh and to be verified by pg_probackup peer. @@ -277,6 +264,18 @@ static char* compatibility_params[] = { */ size_t prepare_compatibility_str(char* compatibility_buf, size_t compatibility_buf_size) { + char compatibility_val_int_macro_helper_buf[32]; + char* compatibility_params[] = { + COMPATIBILITY_VAL_STR(PG_MAJORVERSION), +#ifdef PGPRO_EDN + //TODO REVIEW can use edition.h/extract_pgpro_edition() + COMPATIBILITY_VAL_STR(PGPRO_EDN), +#endif + //TODO REVIEW remove? no difference between 32/64 in global/pg_control. + COMPATIBILITY_VAL_INT(SIZEOF_VOID_P, + compatibility_val_int_macro_helper_buf, sizeof compatibility_val_int_macro_helper_buf), + }; + size_t result_size = 0; size_t compatibility_params_array_size = sizeof compatibility_params / sizeof compatibility_params[0];; From b2091cd2c277d28ec8bc4f1c7980b1c1798076ea Mon Sep 17 00:00:00 2001 From: Ivan Lazarev Date: Fri, 16 Sep 2022 05:51:11 +0300 Subject: [PATCH 325/525] [PBCKP-236] [skip] removed unnecessary TODOs --- src/utils/file.c | 2 -- src/utils/remote.c | 16 ++++++++-------- 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/src/utils/file.c b/src/utils/file.c index fa0983947..5a2aa61a8 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -268,7 +268,6 @@ fio_write_all(int fd, void const* buf, size_t size) return offs; } -//TODO REVIEW XXX move to remote.c???? /* Get version of remote agent */ void fio_get_agent_version(int* protocol, char* payload_buf, size_t payload_buf_size) @@ -3217,7 +3216,6 @@ fio_delete_impl(mode_t mode, char *buf) } /* Execute commands at remote host */ -//TODO REVIEW XXX move to remote.c? void fio_communicate(int in, int out) { diff --git a/src/utils/remote.c b/src/utils/remote.c index 79456d9fb..97c8f3d4a 100644 --- a/src/utils/remote.c +++ b/src/utils/remote.c @@ -117,9 +117,6 @@ bool launch_agent(void) int infd[2]; int errfd[2]; int agent_version; - //TODO REVIEW XXX review buf_size - int payload_buf_size = 1024 * 8; - char payload_buf[payload_buf_size]; ssh_argc = 0; #ifdef WIN32 @@ -241,10 +238,13 @@ bool launch_agent(void) fio_redirect(infd[0], outfd[1], errfd[0]); /* write to stdout */ } - /* Make sure that remote agent has the same version, fork and other features to be binary compatible - */ - fio_get_agent_version(&agent_version, payload_buf, payload_buf_size); - check_remote_agent_compatibility(agent_version, payload_buf, payload_buf_size); + + /* Make sure that remote agent has the same version, fork and other features to be binary compatible */ + { + char payload_buf[1024]; + fio_get_agent_version(&agent_version, payload_buf, sizeof payload_buf); + check_remote_agent_compatibility(agent_version, payload_buf, sizeof payload_buf); + } return true; } @@ -268,7 +268,7 @@ size_t prepare_compatibility_str(char* compatibility_buf, size_t compatibility_b char* compatibility_params[] = { COMPATIBILITY_VAL_STR(PG_MAJORVERSION), #ifdef PGPRO_EDN - //TODO REVIEW can use edition.h/extract_pgpro_edition() + //TODO REVIEW can use edition.h/extract_pgpro_edition() or similar COMPATIBILITY_VAL_STR(PGPRO_EDN), #endif //TODO REVIEW remove? no difference between 32/64 in global/pg_control. From 56598848c260c1cae5a6fb9d739f840c105668ed Mon Sep 17 00:00:00 2001 From: Ivan Lazarev Date: Fri, 16 Sep 2022 06:43:16 +0300 Subject: [PATCH 326/525] [PBCKP-236] ANSI C fix --- src/utils/remote.c | 2 +- tests/compatibility.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/utils/remote.c b/src/utils/remote.c index 97c8f3d4a..a5294c705 100644 --- a/src/utils/remote.c +++ b/src/utils/remote.c @@ -321,7 +321,7 @@ void check_remote_agent_compatibility(int agent_version, char *compatibility_str elog(LOG, "Agent compatibility params:\n%s", compatibility_str); { - char buf[compatibility_str_max_size]; + char buf[1024]; prepare_compatibility_str(buf, sizeof buf); if(strcmp(compatibility_str, buf)) diff --git a/tests/compatibility.py b/tests/compatibility.py index 0562b441c..b0a9b0e50 100644 --- a/tests/compatibility.py +++ b/tests/compatibility.py @@ -14,7 +14,7 @@ def setUp(self): self.fname = self.id().split('.')[3] # @unittest.expectedFailure - @unittest.skip("skip") + # @unittest.skip("skip") def test_catchup_with_different_remote_major_pg(self): """ Decription in jira issue PBCKP-236 From 35df5060d5e1a20bdfa45dbaa5aab429f0fd4aa6 Mon Sep 17 00:00:00 2001 From: Ivan Lazarev Date: Mon, 26 Sep 2022 06:00:03 +0300 Subject: [PATCH 327/525] [PBCKP-236] 1c+certified editions check --- src/utils/file.c | 1 - src/utils/remote.c | 58 ++++++++++++++++++++++++++++++++++++++++++---- 2 files changed, 53 insertions(+), 6 deletions(-) diff --git a/src/utils/file.c b/src/utils/file.c index 5a2aa61a8..242810b5e 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -280,7 +280,6 @@ fio_get_agent_version(int* protocol, char* payload_buf, size_t payload_buf_size) IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); if (hdr.size > payload_buf_size) { - //TODO REVIEW XXX %zu is C99 but not ANSI S compatible, should we use ints, %zu is also applied at data.c:501 data.c:1638?? elog(ERROR, "Corrupted remote compatibility protocol: insufficient payload_buf_size=%zu", payload_buf_size); } diff --git a/src/utils/remote.c b/src/utils/remote.c index a5294c705..786b4bfb1 100644 --- a/src/utils/remote.c +++ b/src/utils/remote.c @@ -249,6 +249,57 @@ bool launch_agent(void) return true; } +/* PGPRO 10-13 check to be "(certified)", with exceptional case PGPRO_11 conforming to "(standard certified)" */ +static bool check_certified() +{ +#ifdef PGPRO_VERSION_STR + return strstr(PGPRO_VERSION_STR, "(certified)") || + strstr(PGPRO_VERSION_STR, ("(standard certified)")); +#endif + return false; +} + +//TODO REVIEW review coding standard https://jira.postgrespro.ru/browse/PBCKP-251 with @funny_falcon, newlines, braces etc +static char* extract_pg_edition_str() +{ + static char *vanilla = "vanilla"; + static char *std = "standard"; + static char *ent = "enterprise"; + static char *std_cert = "standard-certified"; + static char *ent_cert = "enterprise-certified"; + +#ifdef PGPRO_EDITION + if (strcasecmp(PGPRO_EDITION, "1C") == 0) + return vanilla; + + /* these "certified" checks are applicable to PGPRO from 9.6 up to 12 versions. + * 13+ certified versions are compatible to non-certified ones */ + if (PG_VERSION_NUM < 100000) + { + if (strcmp(PGPRO_EDITION, "standard-certified") == 0) + return std_cert; + else if (strcmp(PGPRO_EDITION, "enterprise-certified")) + return ent_cert; + else + Assert("Bad #define PGPRO_EDITION value" == 0); + } + + if (check_certified()) + { + if (strcmp(PGPRO_EDITION, "standard")) + return std_cert; + else if (strcmp(PGPRO_EDITION, "enterprise") == 0) + return ent_cert; + else + Assert("Bad #define PGPRO_EDITION value" == 0); + } + + return PGPRO_EDITION; +#else + return vanilla; +#endif +} + #define COMPATIBILITY_VAL_STR(macro) #macro, macro #define COMPATIBILITY_VAL_INT_HELPER(macro, helper_buf, buf_size) (snprintf(helper_buf, buf_size, "%d", macro), helper_buf) #define COMPATIBILITY_VAL_INT(macro, helper_buf, buf_size) #macro, COMPATIBILITY_VAL_INT_HELPER(macro, helper_buf, buf_size) @@ -267,11 +318,8 @@ size_t prepare_compatibility_str(char* compatibility_buf, size_t compatibility_b char compatibility_val_int_macro_helper_buf[32]; char* compatibility_params[] = { COMPATIBILITY_VAL_STR(PG_MAJORVERSION), -#ifdef PGPRO_EDN - //TODO REVIEW can use edition.h/extract_pgpro_edition() or similar - COMPATIBILITY_VAL_STR(PGPRO_EDN), -#endif - //TODO REVIEW remove? no difference between 32/64 in global/pg_control. + "edition", extract_pg_edition_str(), + /* 32/64 bits compatibility */ COMPATIBILITY_VAL_INT(SIZEOF_VOID_P, compatibility_val_int_macro_helper_buf, sizeof compatibility_val_int_macro_helper_buf), }; From 0dc826fc118ce5988b984b9ee7912a054920d941 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Mon, 26 Sep 2022 10:59:37 +0300 Subject: [PATCH 328/525] Fix packaging/Makefile.test typo [ci skip] --- packaging/Makefile.test | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packaging/Makefile.test b/packaging/Makefile.test index f5e004f01..11c63619a 100644 --- a/packaging/Makefile.test +++ b/packaging/Makefile.test @@ -130,10 +130,10 @@ build/test_suse: build/test_suse_15.1 build/test_suse_15.2 @echo Suse: done build/test_suse_15.1: build/test_suse_15.1_9.6 build/test_suse_15.1_10 build/test_suse_15.1_11 build/test_suse_15.1_12 build/test_suse_15.1_13 - @echo Rhel 15.1: done + @echo Suse 15.1: done build/test_suse_15.2: build/test_suse_15.2_9.6 build/test_suse_15.2_10 build/test_suse_15.2_11 build/test_suse_15.2_12 build/test_suse_15.2_13 build/test_suse_15.2_14 - @echo Rhel 15.1: done + @echo Suse 15.2: done define test_suse docker rm -f $1_$2_probackup_$(PKG_NAME_SUFFIX)$(PBK_VERSION) >> /dev/null 2>&1 ; \ From 4d907c958d2cd74b6e85eaf426ed93b18e9f6c39 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Mon, 26 Sep 2022 11:01:21 +0300 Subject: [PATCH 329/525] Fix test packagin script for 9.6 [ci skip] --- packaging/test/scripts/rpm.sh | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/packaging/test/scripts/rpm.sh b/packaging/test/scripts/rpm.sh index 3b6806993..87d430ef8 100755 --- a/packaging/test/scripts/rpm.sh +++ b/packaging/test/scripts/rpm.sh @@ -77,6 +77,12 @@ if [ ${DISTRIB} == 'centos' ] && [ ${DISTRIB_VERSION} == '8' ]; then dnf -qy module disable postgresql fi +# PGDG doesn't support install of PG-9.6 from repo package anymore +if [ ${PG_VERSION} == '9.6' ] && [ ${DISTRIB_VERSION} == '7' ]; then + # ugly hack: use repo settings from PG10 + sed -i 's/10/9.6/' /etc/yum.repos.d/pgdg-redhat-all.repo +fi + yum install -y postgresql${PG_TOG}-server.x86_64 export PGDATA=/var/lib/pgsql/${PG_VERSION}/data From b3351b50d664b9a639537d4aea694d971c9cd7d5 Mon Sep 17 00:00:00 2001 From: Ivan Lazarev Date: Tue, 27 Sep 2022 06:00:57 +0300 Subject: [PATCH 330/525] [PBCKP-236] final update --- src/utils/remote.c | 32 ++++++++++++-------------------- tests/compatibility.py | 8 ++++---- 2 files changed, 16 insertions(+), 24 deletions(-) diff --git a/src/utils/remote.c b/src/utils/remote.c index 786b4bfb1..7d86be4c1 100644 --- a/src/utils/remote.c +++ b/src/utils/remote.c @@ -249,19 +249,18 @@ bool launch_agent(void) return true; } -/* PGPRO 10-13 check to be "(certified)", with exceptional case PGPRO_11 conforming to "(standard certified)" */ +#ifdef PGPRO_EDITION +/* PGPRO 10-13 checks to be "(certified)", with exceptional case PGPRO_11 conforming to "(standard certified)" */ static bool check_certified() { -#ifdef PGPRO_VERSION_STR return strstr(PGPRO_VERSION_STR, "(certified)") || strstr(PGPRO_VERSION_STR, ("(standard certified)")); -#endif - return false; } +#endif -//TODO REVIEW review coding standard https://jira.postgrespro.ru/browse/PBCKP-251 with @funny_falcon, newlines, braces etc static char* extract_pg_edition_str() { + static char *_1C = "1C"; static char *vanilla = "vanilla"; static char *std = "standard"; static char *ent = "enterprise"; @@ -269,26 +268,19 @@ static char* extract_pg_edition_str() static char *ent_cert = "enterprise-certified"; #ifdef PGPRO_EDITION - if (strcasecmp(PGPRO_EDITION, "1C") == 0) + if (strcmp(PGPRO_EDITION, _1C) == 0) return vanilla; - /* these "certified" checks are applicable to PGPRO from 9.6 up to 12 versions. - * 13+ certified versions are compatible to non-certified ones */ if (PG_VERSION_NUM < 100000) - { - if (strcmp(PGPRO_EDITION, "standard-certified") == 0) - return std_cert; - else if (strcmp(PGPRO_EDITION, "enterprise-certified")) - return ent_cert; - else - Assert("Bad #define PGPRO_EDITION value" == 0); - } + return PGPRO_EDITION; - if (check_certified()) + /* these "certified" checks are applicable to PGPRO from 10 up to 12 versions. + * 13+ certified versions are compatible to non-certified ones */ + if (PG_VERSION_NUM < 130000 && check_certified()) { - if (strcmp(PGPRO_EDITION, "standard")) + if (strcmp(PGPRO_EDITION, std) == 0) return std_cert; - else if (strcmp(PGPRO_EDITION, "enterprise") == 0) + else if (strcmp(PGPRO_EDITION, ent) == 0) return ent_cert; else Assert("Bad #define PGPRO_EDITION value" == 0); @@ -374,7 +366,7 @@ void check_remote_agent_compatibility(int agent_version, char *compatibility_str prepare_compatibility_str(buf, sizeof buf); if(strcmp(compatibility_str, buf)) { - elog(ERROR, "Incompatible remote agent params, expected:\n%s", buf); + elog(ERROR, "Incompatible remote agent params, expected:\n%s, actual:\n:%s ", buf, compatibility_str); } } } diff --git a/tests/compatibility.py b/tests/compatibility.py index b0a9b0e50..04af1478f 100644 --- a/tests/compatibility.py +++ b/tests/compatibility.py @@ -14,7 +14,7 @@ def setUp(self): self.fname = self.id().split('.')[3] # @unittest.expectedFailure - # @unittest.skip("skip") + @unittest.skip("skip") def test_catchup_with_different_remote_major_pg(self): """ Decription in jira issue PBCKP-236 @@ -27,13 +27,13 @@ def test_catchup_with_different_remote_major_pg(self): calling probackup PGPROEE9_6 agent from PGPROEE11 probackup master for DELTA backup causes the PBCKP-236 problem - please correct path for agent's pg_path_ee_9_6 = '/home/avaness/postgres/postgres.build.ee.9.6/bin/' + please correct path for agent's pg_path_remote_version = '/home/avaness/postgres/postgres.build.ee.9.6/bin/' """ self.verbose = True self.remote = True # please use your own local path - pg_path_ee_9_6 = '/home/avaness/postgres/postgres.build.ee.9.6/bin/' + pg_path_remote_version = '/home/avaness/postgres/postgres.build.clean/bin' src_pg = self.make_simple_node( base_dir=os.path.join(module_name, self.fname, 'src'), @@ -70,7 +70,7 @@ def test_catchup_with_different_remote_major_pg(self): source_pgdata = src_pg.data_dir, destination_node = dst_pg, # here's substitution of --remoge-path pg_probackup agent compiled with another postgres version - options=['-d', 'postgres', '-p', str(src_pg.port), '--stream', '--remote-path=' + pg_path_ee_9_6] + options=['-d', 'postgres', '-p', str(src_pg.port), '--stream', '--remote-path=' + pg_path_remote_version] ) # Clean after yourself From 6e671232b8791ccb8bf6090c9dcf38081fc9c2dd Mon Sep 17 00:00:00 2001 From: Ivan Lazarev Date: Wed, 28 Sep 2022 04:32:47 +0300 Subject: [PATCH 331/525] [PBCKP-236] final update after review --- src/utils/remote.c | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/src/utils/remote.c b/src/utils/remote.c index 7d86be4c1..0f254d147 100644 --- a/src/utils/remote.c +++ b/src/utils/remote.c @@ -292,12 +292,8 @@ static char* extract_pg_edition_str() #endif } -#define COMPATIBILITY_VAL_STR(macro) #macro, macro -#define COMPATIBILITY_VAL_INT_HELPER(macro, helper_buf, buf_size) (snprintf(helper_buf, buf_size, "%d", macro), helper_buf) -#define COMPATIBILITY_VAL_INT(macro, helper_buf, buf_size) #macro, COMPATIBILITY_VAL_INT_HELPER(macro, helper_buf, buf_size) - -#define COMPATIBILITY_VAL_SEPARATOR "=" -#define COMPATIBILITY_LINE_SEPARATOR "\n" +#define COMPATIBILITY_VAL_STR(macro) { #macro, macro, 0 } +#define COMPATIBILITY_VAL_INT(macro) { #macro, NULL, macro } /* * Compose compatibility string to be sent by pg_probackup agent @@ -307,13 +303,10 @@ static char* extract_pg_edition_str() */ size_t prepare_compatibility_str(char* compatibility_buf, size_t compatibility_buf_size) { - char compatibility_val_int_macro_helper_buf[32]; - char* compatibility_params[] = { + struct { const char* name; const char* strval; int intval; } compatibility_params[] = { COMPATIBILITY_VAL_STR(PG_MAJORVERSION), - "edition", extract_pg_edition_str(), - /* 32/64 bits compatibility */ - COMPATIBILITY_VAL_INT(SIZEOF_VOID_P, - compatibility_val_int_macro_helper_buf, sizeof compatibility_val_int_macro_helper_buf), + { "edition", extract_pg_edition_str(), 0 }, + COMPATIBILITY_VAL_INT(SIZEOF_VOID_P), }; size_t result_size = 0; @@ -324,9 +317,16 @@ size_t prepare_compatibility_str(char* compatibility_buf, size_t compatibility_b for (int i = 0; i < compatibility_params_array_size; i+=2) { - result_size += snprintf(compatibility_buf + result_size, compatibility_buf_size - result_size, - "%s" COMPATIBILITY_VAL_SEPARATOR "%s" COMPATIBILITY_LINE_SEPARATOR, - compatibility_params[i], compatibility_params[i+1]); + if (compatibility_params[i].strval != NULL) + result_size += snprintf(compatibility_buf + result_size, compatibility_buf_size - result_size, + "%s=%s/n", + compatibility_params[i].name, + compatibility_params[i].strval); + else + result_size += snprintf(compatibility_buf + result_size, compatibility_buf_size - result_size, + "%s=%d/n", + compatibility_params[i].name, + compatibility_params[i].intval); Assert(result_size < compatibility_buf_size); } return result_size + 1; @@ -349,7 +349,7 @@ void check_remote_agent_compatibility(int agent_version, char *compatibility_str elog(ERROR, "Remote agent protocol version %s does not match local program protocol version %s, " "consider to upgrade pg_probackup binary", - agent_version_str, AGENT_PROTOCOL_VERSION_STR); + agent_version_str, AGENT_PROTOCOL_VERSION_STR); } /* checking compatibility params */ From 1ce38ed70cccabcdea5dbda7f1030424ceeeef03 Mon Sep 17 00:00:00 2001 From: Ivan Lazarev Date: Wed, 28 Sep 2022 04:52:11 +0300 Subject: [PATCH 332/525] [PBCKP-236] final update after review --- src/utils/remote.c | 7 ++----- tests/compatibility.py | 2 +- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/src/utils/remote.c b/src/utils/remote.c index 0f254d147..f3608e566 100644 --- a/src/utils/remote.c +++ b/src/utils/remote.c @@ -310,12 +310,9 @@ size_t prepare_compatibility_str(char* compatibility_buf, size_t compatibility_b }; size_t result_size = 0; - size_t compatibility_params_array_size = sizeof compatibility_params / sizeof compatibility_params[0];; - *compatibility_buf = '\0'; - Assert(compatibility_params_array_size % 2 == 0); - for (int i = 0; i < compatibility_params_array_size; i+=2) + for (int i = 0; i < sizeof compatibility_params; i+=2) { if (compatibility_params[i].strval != NULL) result_size += snprintf(compatibility_buf + result_size, compatibility_buf_size - result_size, @@ -366,7 +363,7 @@ void check_remote_agent_compatibility(int agent_version, char *compatibility_str prepare_compatibility_str(buf, sizeof buf); if(strcmp(compatibility_str, buf)) { - elog(ERROR, "Incompatible remote agent params, expected:\n%s, actual:\n:%s ", buf, compatibility_str); + elog(ERROR, "Incompatible remote agent params, expected:\n%s, actual:\n:%s", buf, compatibility_str); } } } diff --git a/tests/compatibility.py b/tests/compatibility.py index 04af1478f..4e5e27f0e 100644 --- a/tests/compatibility.py +++ b/tests/compatibility.py @@ -14,7 +14,7 @@ def setUp(self): self.fname = self.id().split('.')[3] # @unittest.expectedFailure - @unittest.skip("skip") + # @unittest.skip("skip") def test_catchup_with_different_remote_major_pg(self): """ Decription in jira issue PBCKP-236 From c52659791b91012b13a2a8ebec1367dddc60187b Mon Sep 17 00:00:00 2001 From: Ivan Lazarev Date: Thu, 29 Sep 2022 03:01:57 +0300 Subject: [PATCH 333/525] [PBCKP-236] assert fix --- src/utils/remote.c | 17 +++++++++++++---- tests/compatibility.py | 2 +- 2 files changed, 14 insertions(+), 5 deletions(-) diff --git a/src/utils/remote.c b/src/utils/remote.c index f3608e566..91468b54c 100644 --- a/src/utils/remote.c +++ b/src/utils/remote.c @@ -295,6 +295,9 @@ static char* extract_pg_edition_str() #define COMPATIBILITY_VAL_STR(macro) { #macro, macro, 0 } #define COMPATIBILITY_VAL_INT(macro) { #macro, NULL, macro } +#define COMPATIBILITY_VAL_SEPARATOR "=" +#define COMPATIBILITY_LINE_SEPARATOR "\n" + /* * Compose compatibility string to be sent by pg_probackup agent * through ssh and to be verified by pg_probackup peer. @@ -303,7 +306,13 @@ static char* extract_pg_edition_str() */ size_t prepare_compatibility_str(char* compatibility_buf, size_t compatibility_buf_size) { - struct { const char* name; const char* strval; int intval; } compatibility_params[] = { + typedef struct compatibility_param_tag { + const char* name; + const char* strval; + int intval; + } compatibility_param; + + compatibility_param compatibility_params[] = { COMPATIBILITY_VAL_STR(PG_MAJORVERSION), { "edition", extract_pg_edition_str(), 0 }, COMPATIBILITY_VAL_INT(SIZEOF_VOID_P), @@ -312,16 +321,16 @@ size_t prepare_compatibility_str(char* compatibility_buf, size_t compatibility_b size_t result_size = 0; *compatibility_buf = '\0'; - for (int i = 0; i < sizeof compatibility_params; i+=2) + for (int i = 0; i < (sizeof compatibility_params / sizeof(compatibility_param)); i++) { if (compatibility_params[i].strval != NULL) result_size += snprintf(compatibility_buf + result_size, compatibility_buf_size - result_size, - "%s=%s/n", + "%s" COMPATIBILITY_VAL_SEPARATOR "%s" COMPATIBILITY_LINE_SEPARATOR, compatibility_params[i].name, compatibility_params[i].strval); else result_size += snprintf(compatibility_buf + result_size, compatibility_buf_size - result_size, - "%s=%d/n", + "%s" COMPATIBILITY_VAL_SEPARATOR "%d" COMPATIBILITY_LINE_SEPARATOR, compatibility_params[i].name, compatibility_params[i].intval); Assert(result_size < compatibility_buf_size); diff --git a/tests/compatibility.py b/tests/compatibility.py index 4e5e27f0e..04af1478f 100644 --- a/tests/compatibility.py +++ b/tests/compatibility.py @@ -14,7 +14,7 @@ def setUp(self): self.fname = self.id().split('.')[3] # @unittest.expectedFailure - # @unittest.skip("skip") + @unittest.skip("skip") def test_catchup_with_different_remote_major_pg(self): """ Decription in jira issue PBCKP-236 From 03d55d079b836d285716a3df67a213fa1674a50a Mon Sep 17 00:00:00 2001 From: Ivan Lazarev Date: Thu, 29 Sep 2022 05:03:51 +0300 Subject: [PATCH 334/525] [PBCKP-236] fix excessive warnings for vanilla --- src/utils/remote.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/utils/remote.c b/src/utils/remote.c index 91468b54c..9feb44a9c 100644 --- a/src/utils/remote.c +++ b/src/utils/remote.c @@ -260,14 +260,14 @@ static bool check_certified() static char* extract_pg_edition_str() { - static char *_1C = "1C"; static char *vanilla = "vanilla"; +#ifdef PGPRO_EDITION + static char *_1C = "1C"; static char *std = "standard"; static char *ent = "enterprise"; static char *std_cert = "standard-certified"; static char *ent_cert = "enterprise-certified"; -#ifdef PGPRO_EDITION if (strcmp(PGPRO_EDITION, _1C) == 0) return vanilla; From 23d5ee4abfb57506fe3f1400b4e9c635a42e6bf6 Mon Sep 17 00:00:00 2001 From: Daniel Shelepanov Date: Tue, 11 Oct 2022 12:20:41 +0300 Subject: [PATCH 335/525] [PBCKP-235] review fixes tags: pg_probackup --- .travis.yml | 1 - README.md | 32 ++++++++++++++++---------------- 2 files changed, 16 insertions(+), 17 deletions(-) diff --git a/.travis.yml b/.travis.yml index f113d05c4..8a67e77b3 100644 --- a/.travis.yml +++ b/.travis.yml @@ -54,7 +54,6 @@ env: jobs: allow_failures: - if: env(PG_BRANCH) = master - - if: env(PG_BRANCH) = REL_15_STABLE - if: env(PG_BRANCH) = REL9_5_STABLE # - if: env(MODE) IN (archive, backup, delta, locking, merge, replica, retention, restore) diff --git a/README.md b/README.md index 433978473..d1ccd9866 100644 --- a/README.md +++ b/README.md @@ -42,8 +42,8 @@ Regardless of the chosen backup type, all backups taken with `pg_probackup` supp `PTRACK` backup support provided via following options: * vanilla PostgreSQL 11, 12, 13, 14, 15 with [ptrack extension](https://github.com/postgrespro/ptrack) -* Postgres Pro Standard 11, 12, 13, 14 -* Postgres Pro Enterprise 11, 12, 13, 14 +* Postgres Pro Standard 11, 12, 13, 14, 15 +* Postgres Pro Enterprise 11, 12, 13, 14, 15 ## Limitations @@ -137,8 +137,8 @@ sudo apt-get install pg_probackup-{15,14,13,12,11,10,9.6}-debuginfo #DEB Ubuntu|Debian Packages sudo sh -c 'echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup-forks/deb/ $(lsb_release -cs) main-$(lsb_release -cs)" > /etc/apt/sources.list.d/pg_probackup-forks.list' sudo wget -O - https://repo.postgrespro.ru/pg_probackup-forks/keys/GPG-KEY-PG_PROBACKUP | sudo apt-key add - && sudo apt-get update -sudo apt-get install pg-probackup-{std,ent}-{14,13,12,11,10,9.6} -sudo apt-get install pg-probackup-{std,ent}-{14,13,12,11,10,9.6}-dbg +sudo apt-get install pg-probackup-{std,ent}-{15,14,13,12,11,10,9.6} +sudo apt-get install pg-probackup-{std,ent}-{15,14,13,12,11,10,9.6}-dbg #DEB Astra Linix Orel sudo sh -c 'echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup-forks/deb/ stretch main-stretch" > /etc/apt/sources.list.d/pg_probackup.list' @@ -148,35 +148,35 @@ sudo apt-get install pg-probackup-{std,ent}-{12,11,10,9.6}{-dbg,} #RPM Centos Packages rpm -ivh https://repo.postgrespro.ru/pg_probackup-forks/keys/pg_probackup-repo-forks-centos.noarch.rpm -yum install pg_probackup-{std,ent}-{14,13,12,11,10,9.6} -yum install pg_probackup-{std,ent}-{14,13,12,11,10,9.6}-debuginfo +yum install pg_probackup-{std,ent}-{15,14,13,12,11,10,9.6} +yum install pg_probackup-{std,ent}-{15,14,13,12,11,10,9.6}-debuginfo #RPM RHEL Packages rpm -ivh https://repo.postgrespro.ru/pg_probackup-forks/keys/pg_probackup-repo-forks-rhel.noarch.rpm -yum install pg_probackup-{std,ent}-{14,13,12,11,10,9.6} -yum install pg_probackup-{std,ent}-{14,13,12,11,10,9.6}-debuginfo +yum install pg_probackup-{std,ent}-{15,14,13,12,11,10,9.6} +yum install pg_probackup-{std,ent}-{15,14,13,12,11,10,9.6}-debuginfo #RPM Oracle Linux Packages rpm -ivh https://repo.postgrespro.ru/pg_probackup-forks/keys/pg_probackup-repo-forks-oraclelinux.noarch.rpm -yum install pg_probackup-{std,ent}-{14,13,12,11,10,9.6} -yum install pg_probackup-{std,ent}-{14,13,12,11,10,9.6}-debuginfo +yum install pg_probackup-{std,ent}-{15,14,13,12,11,10,9.6} +yum install pg_probackup-{std,ent}-{15,14,13,12,11,10,9.6}-debuginfo #RPM ALT Linux 7 sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup-forks/rpm/latest/altlinux-p7 x86_64 forks" > /etc/apt/sources.list.d/pg_probackup_forks.list' sudo apt-get update -sudo apt-get install pg_probackup-{std,ent}-{14,13,12,11,10,9.6} -sudo apt-get install pg_probackup-{std,ent}-{14,13,12,11,10,9.6}-debuginfo +sudo apt-get install pg_probackup-{std,ent}-{15,14,13,12,11,10,9.6} +sudo apt-get install pg_probackup-{std,ent}-{15,14,13,12,11,10,9.6}-debuginfo #RPM ALT Linux 8 sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup-forks/rpm/latest/altlinux-p8 x86_64 forks" > /etc/apt/sources.list.d/pg_probackup_forks.list' sudo apt-get update -sudo apt-get install pg_probackup-{std,ent}-{14,13,12,11,10,9.6} -sudo apt-get install pg_probackup-{std,ent}-{14,13,12,11,10,9.6}-debuginfo +sudo apt-get install pg_probackup-{std,ent}-{15,14,13,12,11,10,9.6} +sudo apt-get install pg_probackup-{std,ent}-{15,14,13,12,11,10,9.6}-debuginfo #RPM ALT Linux 9 sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup-forks/rpm/latest/altlinux-p9 x86_64 forks" > /etc/apt/sources.list.d/pg_probackup_forks.list' && sudo apt-get update -sudo apt-get install pg_probackup-{std,ent}-{14,13,12,11,10,9.6} -sudo apt-get install pg_probackup-{std,ent}-{14,13,12,11,10,9.6}-debuginfo +sudo apt-get install pg_probackup-{std,ent}-{15,14,13,12,11,10,9.6} +sudo apt-get install pg_probackup-{std,ent}-{15,14,13,12,11,10,9.6}-debuginfo ``` Once you have `pg_probackup` installed, complete [the setup](https://postgrespro.github.io/pg_probackup/#pbk-install-and-setup). From d808a16640be611e363b66900b18b2b6f8a52747 Mon Sep 17 00:00:00 2001 From: Ivan Lazarev Date: Tue, 11 Oct 2022 16:09:57 +0300 Subject: [PATCH 336/525] [PBCKP-236] removed excessive brackets --- src/utils/remote.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/utils/remote.c b/src/utils/remote.c index 9feb44a9c..addd73dc8 100644 --- a/src/utils/remote.c +++ b/src/utils/remote.c @@ -254,7 +254,7 @@ bool launch_agent(void) static bool check_certified() { return strstr(PGPRO_VERSION_STR, "(certified)") || - strstr(PGPRO_VERSION_STR, ("(standard certified)")); + strstr(PGPRO_VERSION_STR, "(standard certified)"); } #endif From 96ad6e2eb9d0a9a6ef166d61d2bffa4098354cd7 Mon Sep 17 00:00:00 2001 From: Daniel Shelepanov Date: Wed, 12 Oct 2022 17:11:58 +0300 Subject: [PATCH 337/525] version macro increment, Release 2.5.9 --- src/pg_probackup.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 533b05d58..27deeee9b 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -344,7 +344,7 @@ typedef enum ShowFormat #define BYTES_INVALID (-1) /* file didn`t changed since previous backup, DELTA backup do not rely on it */ #define FILE_NOT_FOUND (-2) /* file disappeared during backup */ #define BLOCKNUM_INVALID (-1) -#define PROGRAM_VERSION "2.5.8" +#define PROGRAM_VERSION "2.5.9" /* update when remote agent API or behaviour changes */ #define AGENT_PROTOCOL_VERSION 20501 From 80efb85029a254c0a885931058d08c260c3c35d6 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Fri, 30 Sep 2022 11:06:16 +0300 Subject: [PATCH 338/525] [PBCKP-146] Small fix for remote_agent --- src/pg_probackup.c | 3 ++- src/pg_probackup.h | 2 +- src/utils/configuration.c | 1 - 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/pg_probackup.c b/src/pg_probackup.c index 1f6b6313e..849685278 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -88,7 +88,7 @@ bool perm_slot = false; /* backup options */ bool backup_logs = false; bool smooth_checkpoint; -char *remote_agent; +bool remote_agent = false; static char *backup_note = NULL; /* catchup options */ static char *catchup_source_pgdata = NULL; @@ -361,6 +361,7 @@ main(int argc, char *argv[]) elog(ERROR, "Version mismatch, pg_probackup binary with version '%s' " "is launched as an agent for pg_probackup binary with version '%s'", PROGRAM_VERSION, argv[2]); + remote_agent = true; fio_communicate(STDIN_FILENO, STDOUT_FILENO); return 0; case HELP_CMD: diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 533b05d58..495fbdcad 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -798,7 +798,7 @@ extern bool perm_slot; extern bool smooth_checkpoint; /* remote probackup options */ -extern char* remote_agent; +extern bool remote_agent; extern bool exclusive_backup; diff --git a/src/utils/configuration.c b/src/utils/configuration.c index 98c3b2994..93f29c488 100644 --- a/src/utils/configuration.c +++ b/src/utils/configuration.c @@ -531,7 +531,6 @@ config_get_opt(int argc, char **argv, ConfigOption cmd_options[], opt = option_find(c, options); if (opt - && !remote_agent && opt->allowed < SOURCE_CMD && opt->allowed != SOURCE_CMD_STRICT) elog(ERROR, "Option %s cannot be specified in command line", opt->lname); From 4730857b7946e0fb136e66acb27de44ca08e4977 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Fri, 30 Sep 2022 12:22:14 +0300 Subject: [PATCH 339/525] [PBCKP-146] - fio_get_crc32 - add "missing_ok" parameter --- src/archive.c | 9 ++++----- src/data.c | 9 ++++++--- src/utils/file.c | 22 +++++++++++++++------- src/utils/file.h | 3 ++- 4 files changed, 27 insertions(+), 16 deletions(-) diff --git a/src/archive.c b/src/archive.c index 1a19c3d84..2ae86bd6a 100644 --- a/src/archive.c +++ b/src/archive.c @@ -512,8 +512,8 @@ push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_d pg_crc32 crc32_src; pg_crc32 crc32_dst; - crc32_src = fio_get_crc32(from_fullpath, FIO_DB_HOST, false); - crc32_dst = fio_get_crc32(to_fullpath, FIO_BACKUP_HOST, false); + crc32_src = fio_get_crc32(from_fullpath, FIO_DB_HOST, false, false); + crc32_dst = fio_get_crc32(to_fullpath, FIO_BACKUP_HOST, false, false); if (crc32_src == crc32_dst) { @@ -760,9 +760,8 @@ push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir, pg_crc32 crc32_src; pg_crc32 crc32_dst; - /* TODO: what if one of them goes missing? */ - crc32_src = fio_get_crc32(from_fullpath, FIO_DB_HOST, false); - crc32_dst = fio_get_crc32(to_fullpath_gz, FIO_BACKUP_HOST, true); + crc32_src = fio_get_crc32(from_fullpath, FIO_DB_HOST, false, false); + crc32_dst = fio_get_crc32(to_fullpath_gz, FIO_BACKUP_HOST, true, false); if (crc32_src == crc32_dst) { diff --git a/src/data.c b/src/data.c index 5c5fdf4f0..753f247f7 100644 --- a/src/data.c +++ b/src/data.c @@ -801,8 +801,11 @@ backup_non_data_file(pgFile *file, pgFile *prev_file, (prev_file && file->exists_in_prev && file->mtime <= parent_backup_time)) { - - file->crc = fio_get_crc32(from_fullpath, FIO_DB_HOST, false); + /* + * file could be deleted under our feets. + * But then backup_non_data_file_internal will handle it safely + */ + file->crc = fio_get_crc32(from_fullpath, FIO_DB_HOST, false, true); /* ...and checksum is the same... */ if (EQ_TRADITIONAL_CRC32(file->crc, prev_file->crc)) @@ -1327,7 +1330,7 @@ restore_non_data_file(parray *parent_chain, pgBackup *dest_backup, if (already_exists) { /* compare checksums of already existing file and backup file */ - pg_crc32 file_crc = fio_get_crc32(to_fullpath, FIO_DB_HOST, false); + pg_crc32 file_crc = fio_get_crc32(to_fullpath, FIO_DB_HOST, false, false); if (file_crc == tmp_file->crc) { diff --git a/src/utils/file.c b/src/utils/file.c index 7103c8f1d..727b48c60 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -1355,9 +1355,15 @@ fio_sync(char const* path, fio_location location) } } +enum { + GET_CRC32_DECOMPRESS = 1, + GET_CRC32_MISSING_OK = 2 +}; + /* Get crc32 of file */ pg_crc32 -fio_get_crc32(const char *file_path, fio_location location, bool decompress) +fio_get_crc32(const char *file_path, fio_location location, + bool decompress, bool missing_ok) { if (fio_is_remote(location)) { @@ -1370,7 +1376,9 @@ fio_get_crc32(const char *file_path, fio_location location, bool decompress) hdr.arg = 0; if (decompress) - hdr.arg = 1; + hdr.arg = GET_CRC32_DECOMPRESS; + if (missing_ok) + hdr.arg |= GET_CRC32_MISSING_OK; IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); IO_CHECK(fio_write_all(fio_stdout, file_path, path_len), path_len); @@ -1381,9 +1389,9 @@ fio_get_crc32(const char *file_path, fio_location location, bool decompress) else { if (decompress) - return pgFileGetCRCgz(file_path, true, true); + return pgFileGetCRCgz(file_path, true, missing_ok); else - return pgFileGetCRC(file_path, true, true); + return pgFileGetCRC(file_path, true, missing_ok); } } @@ -3380,10 +3388,10 @@ fio_communicate(int in, int out) break; case FIO_GET_CRC32: /* calculate crc32 for a file */ - if (hdr.arg == 1) - crc = pgFileGetCRCgz(buf, true, true); + if ((hdr.arg & GET_CRC32_DECOMPRESS)) + crc = pgFileGetCRCgz(buf, true, (hdr.arg & GET_CRC32_MISSING_OK) != 0); else - crc = pgFileGetCRC(buf, true, true); + crc = pgFileGetCRC(buf, true, (hdr.arg & GET_CRC32_MISSING_OK) != 0); IO_CHECK(fio_write_all(out, &crc, sizeof(crc)), sizeof(crc)); break; case FIO_GET_CHECKSUM_MAP: diff --git a/src/utils/file.h b/src/utils/file.h index a554b4ab0..ec478b451 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -120,7 +120,8 @@ extern int fio_truncate(int fd, off_t size); extern int fio_close(int fd); extern void fio_disconnect(void); extern int fio_sync(char const* path, fio_location location); -extern pg_crc32 fio_get_crc32(const char *file_path, fio_location location, bool decompress); +extern pg_crc32 fio_get_crc32(const char *file_path, fio_location location, + bool decompress, bool missing_ok); extern int fio_rename(char const* old_path, char const* new_path, fio_location location); extern int fio_symlink(char const* target, char const* link_path, bool overwrite, fio_location location); From e16c62e8fd705d65d2cc1a7fd3beb0ea40e31277 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Wed, 5 Oct 2022 12:16:10 +0300 Subject: [PATCH 340/525] [PBCKP-146] prettify forkname handling. --- src/catalog.c | 5 ++- src/dir.c | 87 ++++++++++++++++++++++------------------------ src/pg_probackup.h | 2 ++ 3 files changed, 48 insertions(+), 46 deletions(-) diff --git a/src/catalog.c b/src/catalog.c index 47513096c..03099d1a2 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -1132,6 +1132,9 @@ get_backup_filelist(pgBackup *backup, bool strict) if (get_control_value_int64(buf, "hdr_size", &hdr_size, false)) file->hdr_size = (int) hdr_size; + if (file->external_dir_num == 0) + set_forkname(file); + parray_append(files, file); } @@ -2488,7 +2491,7 @@ write_backup_filelist(pgBackup *backup, parray *files, const char *root, char control_path[MAXPGPATH]; char control_path_temp[MAXPGPATH]; size_t i = 0; - #define BUFFERSZ 1024*1024 + #define BUFFERSZ (1024*1024) char *buf; int64 backup_size_on_disk = 0; int64 uncompressed_size_on_disk = 0; diff --git a/src/dir.c b/src/dir.c index 561586f87..00e918d0f 100644 --- a/src/dir.c +++ b/src/dir.c @@ -758,57 +758,22 @@ dir_check_file(pgFile *file, bool backup_logs) return CHECK_FALSE; else if (isdigit(file->name[0])) { - char *fork_name; - int len; - char suffix[MAXPGPATH]; + set_forkname(file); - fork_name = strstr(file->name, "_"); - if (fork_name) - { - /* Auxiliary fork of the relfile */ - if (strcmp(fork_name, "_vm") == 0) - file->forkName = vm; - - else if (strcmp(fork_name, "_fsm") == 0) - file->forkName = fsm; - - else if (strcmp(fork_name, "_cfm") == 0) - file->forkName = cfm; - - else if (strcmp(fork_name, "_ptrack") == 0) - file->forkName = ptrack; - - else if (strcmp(fork_name, "_init") == 0) - file->forkName = init; - - // extract relOid for certain forks - if (file->forkName == vm || - file->forkName == fsm || - file->forkName == init || - file->forkName == cfm) - { - // sanity - if (sscanf(file->name, "%u_*", &(file->relOid)) != 1) - file->relOid = 0; - } + if (file->forkName == ptrack) /* Compatibility with left-overs from ptrack1 */ + return CHECK_FALSE; + else if (file->forkName != none) + return CHECK_TRUE; - /* Do not backup ptrack files */ - if (file->forkName == ptrack) - return CHECK_FALSE; - } - else + /* Set is_datafile flag */ { + char suffix[MAXFNAMELEN]; - len = strlen(file->name); - /* reloid.cfm */ - if (len > 3 && strcmp(file->name + len - 3, "cfm") == 0) - return CHECK_TRUE; - + /* check if file is datafile */ sscanf_res = sscanf(file->name, "%u.%d.%s", &(file->relOid), &(file->segno), suffix); - if (sscanf_res == 0) - elog(ERROR, "Cannot parse file name \"%s\"", file->name); - else if (sscanf_res == 1 || sscanf_res == 2) + Assert(sscanf_res > 0); /* since first char is digit */ + if (sscanf_res == 1 || sscanf_res == 2) file->is_datafile = true; } } @@ -1954,3 +1919,35 @@ pfilearray_clear_locks(parray *file_list) pg_atomic_clear_flag(&file->lock); } } + +/* Set forkName if possible */ +void +set_forkname(pgFile *file) +{ + int name_len = strlen(file->name); + + /* Auxiliary fork of the relfile */ + if (name_len > 3 && strcmp(file->name + name_len - 3, "_vm") == 0) + file->forkName = vm; + + else if (name_len > 4 && strcmp(file->name + name_len - 4, "_fsm") == 0) + file->forkName = fsm; + + else if (name_len > 4 && strcmp(file->name + name_len - 4, ".cfm") == 0) + file->forkName = cfm; + + else if (name_len > 5 && strcmp(file->name + name_len - 5, "_init") == 0) + file->forkName = init; + + else if (name_len > 7 && strcmp(file->name + name_len - 7, "_ptrack") == 0) + file->forkName = ptrack; + + // extract relOid for certain forks + + if ((file->forkName == vm || + file->forkName == fsm || + file->forkName == init || + file->forkName == cfm) && + (sscanf(file->name, "%u*", &(file->relOid)) != 1)) + file->relOid = 0; +} diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 495fbdcad..bc9f9b8a8 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -215,6 +215,7 @@ typedef enum CompressAlg typedef enum ForkName { + none, vm, fsm, cfm, @@ -1091,6 +1092,7 @@ extern int pgCompareString(const void *str1, const void *str2); extern int pgPrefixCompareString(const void *str1, const void *str2); extern int pgCompareOid(const void *f1, const void *f2); extern void pfilearray_clear_locks(parray *file_list); +extern void set_forkname(pgFile *file); /* in data.c */ extern bool check_data_file(ConnectionArgs *arguments, pgFile *file, From 8f504fc95cbace9297da363ac098126b4e75c6c8 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Mon, 10 Oct 2022 17:07:41 +0300 Subject: [PATCH 341/525] [PBCKP-146] stabilize couple of tests. --- tests/cfs_backup.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/tests/cfs_backup.py b/tests/cfs_backup.py index d820360fe..436db31e7 100644 --- a/tests/cfs_backup.py +++ b/tests/cfs_backup.py @@ -995,6 +995,11 @@ def test_delete_random_cfm_file_from_tablespace_dir(self): "FROM generate_series(0,256) i".format('t1', tblspace_name) ) + self.node.safe_psql( + "postgres", + "CHECKPOINT" + ) + list_cmf = find_by_extensions( [self.get_tblspace_path(self.node, tblspace_name)], ['.cfm']) @@ -1044,6 +1049,11 @@ def test_delete_random_data_file_from_tablespace_dir(self): "FROM generate_series(0,256) i".format('t1', tblspace_name) ) + self.node.safe_psql( + "postgres", + "CHECKPOINT" + ) + list_data_files = find_by_pattern( [self.get_tblspace_path(self.node, tblspace_name)], '^.*/\d+$') From 51a141c4b0b07899c001f75c5c172492cbefa076 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 11 Oct 2022 18:59:48 +0300 Subject: [PATCH 342/525] [PBCKP-146] fix cfs test python3 compatibility --- tests/cfs_restore.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/cfs_restore.py b/tests/cfs_restore.py index 07cf891aa..611afc49e 100644 --- a/tests/cfs_restore.py +++ b/tests/cfs_restore.py @@ -103,6 +103,7 @@ def test_restore_empty_tablespace_from_fullbackup(self): "postgres", "SELECT * FROM pg_tablespace WHERE spcname='{0}'".format(tblspace_name) ) + tblspace = str(tblspace) self.assertTrue( tblspace_name in tblspace and "compression=true" in tblspace, "ERROR: The tablespace not restored or it restored without compressions" From 3e17c8c8daa21ba8d79fecc614c591c429dda7d1 Mon Sep 17 00:00:00 2001 From: "Andrew A. Bille" Date: Mon, 19 Sep 2022 12:25:23 +0700 Subject: [PATCH 343/525] Fix remembered check gdb flag in test with GDB --- tests/locking.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/locking.py b/tests/locking.py index 0fe954cae..4042a1462 100644 --- a/tests/locking.py +++ b/tests/locking.py @@ -419,6 +419,8 @@ def test_locking_concurrent_validate_and_backup(self): and stop it in the middle, take page backup. Expect PAGE backup to be successfully executed """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), From b568f2254a9f714760abe75c436a6476723463b8 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Wed, 12 Oct 2022 12:17:48 +0300 Subject: [PATCH 344/525] [PBCKP-146] truncate cfm files - store cfm files truncated to non-zero head with coarse granularity (64-4096 bytes) - and calculate crc for truncated file - during restoration calculate crc for cfm as if it was truncated --- src/archive.c | 8 +- src/catalog.c | 9 + src/data.c | 124 ++----- src/dir.c | 133 +------- src/merge.c | 2 +- src/pg_probackup.h | 13 +- src/utils/file.c | 519 +++++++++++++++++++++++++++++- src/utils/file.h | 4 +- tests/expected/option_version.out | 2 +- 9 files changed, 564 insertions(+), 250 deletions(-) diff --git a/src/archive.c b/src/archive.c index 2ae86bd6a..734602cac 100644 --- a/src/archive.c +++ b/src/archive.c @@ -1375,11 +1375,11 @@ get_wal_file(const char *filename, const char *from_fullpath, #ifdef HAVE_LIBZ /* If requested file is regular WAL segment, then try to open it with '.gz' suffix... */ if (IsXLogFileName(filename)) - rc = fio_send_file_gz(from_fullpath_gz, to_fullpath, out, &errmsg); + rc = fio_send_file_gz(from_fullpath_gz, out, &errmsg); if (rc == FILE_MISSING) #endif /* ... failing that, use uncompressed */ - rc = fio_send_file(from_fullpath, to_fullpath, out, NULL, &errmsg); + rc = fio_send_file(from_fullpath, out, false, NULL, &errmsg); /* When not in prefetch mode, try to use partial file */ if (rc == FILE_MISSING && !prefetch_mode && IsXLogFileName(filename)) @@ -1389,13 +1389,13 @@ get_wal_file(const char *filename, const char *from_fullpath, #ifdef HAVE_LIBZ /* '.gz.partial' goes first ... */ snprintf(from_partial, sizeof(from_partial), "%s.gz.partial", from_fullpath); - rc = fio_send_file_gz(from_partial, to_fullpath, out, &errmsg); + rc = fio_send_file_gz(from_partial, out, &errmsg); if (rc == FILE_MISSING) #endif { /* ... failing that, use '.partial' */ snprintf(from_partial, sizeof(from_partial), "%s.partial", from_fullpath); - rc = fio_send_file(from_partial, to_fullpath, out, NULL, &errmsg); + rc = fio_send_file(from_partial, out, false, NULL, &errmsg); } if (rc == SEND_OK) diff --git a/src/catalog.c b/src/catalog.c index 03099d1a2..9668427bb 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -1069,6 +1069,7 @@ get_backup_filelist(pgBackup *backup, bool strict) char linked[MAXPGPATH]; char compress_alg_string[MAXPGPATH]; int64 write_size, + full_size, mode, /* bit length of mode_t depends on platforms */ is_datafile, is_cfs, @@ -1087,6 +1088,8 @@ get_backup_filelist(pgBackup *backup, bool strict) get_control_value_str(buf, "path", path, sizeof(path),true); get_control_value_int64(buf, "size", &write_size, true); + if (!get_control_value_int64(buf, "full_size", &full_size, false)) + full_size = write_size; get_control_value_int64(buf, "mode", &mode, true); get_control_value_int64(buf, "is_datafile", &is_datafile, true); get_control_value_int64(buf, "is_cfs", &is_cfs, false); @@ -1097,6 +1100,7 @@ get_backup_filelist(pgBackup *backup, bool strict) file = pgFileInit(path); file->write_size = (int64) write_size; + file->uncompressed_size = full_size; file->mode = (mode_t) mode; file->is_datafile = is_datafile ? true : false; file->is_cfs = is_cfs ? true : false; @@ -2561,6 +2565,11 @@ write_backup_filelist(pgBackup *backup, parray *files, const char *root, file->external_dir_num, file->dbOid); + if (file->uncompressed_size != 0 && + file->uncompressed_size != file->write_size) + len += sprintf(line+len, ",\"full_size\":\"" INT64_FORMAT "\"", + file->uncompressed_size); + if (file->is_datafile) len += sprintf(line+len, ",\"segno\":\"%d\"", file->segno); diff --git a/src/data.c b/src/data.c index 753f247f7..a020c6efc 100644 --- a/src/data.c +++ b/src/data.c @@ -799,6 +799,7 @@ backup_non_data_file(pgFile *file, pgFile *prev_file, * and its mtime is less than parent backup start time ... */ if ((pg_strcasecmp(file->name, RELMAPPER_FILENAME) != 0) && (prev_file && file->exists_in_prev && + file->size == prev_file->size && file->mtime <= parent_backup_time)) { /* @@ -1330,7 +1331,12 @@ restore_non_data_file(parray *parent_chain, pgBackup *dest_backup, if (already_exists) { /* compare checksums of already existing file and backup file */ - pg_crc32 file_crc = fio_get_crc32(to_fullpath, FIO_DB_HOST, false, false); + pg_crc32 file_crc; + if (tmp_file->forkName == cfm && + tmp_file->uncompressed_size > tmp_file->write_size) + file_crc = fio_get_crc32_truncated(to_fullpath, FIO_DB_HOST); + else + file_crc = fio_get_crc32(to_fullpath, FIO_DB_HOST, false, false); if (file_crc == tmp_file->crc) { @@ -1387,10 +1393,12 @@ backup_non_data_file_internal(const char *from_fullpath, const char *to_fullpath, pgFile *file, bool missing_ok) { - FILE *in = NULL; FILE *out = NULL; - ssize_t read_len = 0; - char *buf = NULL; + char *errmsg = NULL; + int rc; + bool cut_zero_tail; + + cut_zero_tail = file->forkName == cfm; INIT_FILE_CRC32(true, file->crc); @@ -1412,107 +1420,43 @@ backup_non_data_file_internal(const char *from_fullpath, /* backup remote file */ if (fio_is_remote(FIO_DB_HOST)) - { - char *errmsg = NULL; - int rc = fio_send_file(from_fullpath, to_fullpath, out, file, &errmsg); + rc = fio_send_file(from_fullpath, out, cut_zero_tail, file, &errmsg); + else + rc = fio_send_file_local(from_fullpath, out, cut_zero_tail, file, &errmsg); - /* handle errors */ - if (rc == FILE_MISSING) - { - /* maybe deleted, it's not error in case of backup */ - if (missing_ok) - { - elog(LOG, "File \"%s\" is not found", from_fullpath); - file->write_size = FILE_NOT_FOUND; - goto cleanup; - } - else - elog(ERROR, "File \"%s\" is not found", from_fullpath); - } - else if (rc == WRITE_FAILED) - elog(ERROR, "Cannot write to \"%s\": %s", to_fullpath, strerror(errno)); - else if (rc != SEND_OK) + /* handle errors */ + if (rc == FILE_MISSING) + { + /* maybe deleted, it's not error in case of backup */ + if (missing_ok) { - if (errmsg) - elog(ERROR, "%s", errmsg); - else - elog(ERROR, "Cannot access remote file \"%s\"", from_fullpath); + elog(LOG, "File \"%s\" is not found", from_fullpath); + file->write_size = FILE_NOT_FOUND; + goto cleanup; } - - pg_free(errmsg); + else + elog(ERROR, "File \"%s\" is not found", from_fullpath); } - /* backup local file */ - else + else if (rc == WRITE_FAILED) + elog(ERROR, "Cannot write to \"%s\": %s", to_fullpath, strerror(errno)); + else if (rc != SEND_OK) { - /* open source file for read */ - in = fopen(from_fullpath, PG_BINARY_R); - if (in == NULL) - { - /* maybe deleted, it's not error in case of backup */ - if (errno == ENOENT) - { - if (missing_ok) - { - elog(LOG, "File \"%s\" is not found", from_fullpath); - file->write_size = FILE_NOT_FOUND; - goto cleanup; - } - else - elog(ERROR, "File \"%s\" is not found", from_fullpath); - } - - elog(ERROR, "Cannot open file \"%s\": %s", from_fullpath, - strerror(errno)); - } - - /* disable stdio buffering for local input/output files to avoid triple buffering */ - setvbuf(in, NULL, _IONBF, BUFSIZ); - setvbuf(out, NULL, _IONBF, BUFSIZ); - - /* allocate 64kB buffer */ - buf = pgut_malloc(CHUNK_SIZE); - - /* copy content and calc CRC */ - for (;;) - { - read_len = fread(buf, 1, CHUNK_SIZE, in); - - if (ferror(in)) - elog(ERROR, "Cannot read from file \"%s\": %s", - from_fullpath, strerror(errno)); - - if (read_len > 0) - { - if (fwrite(buf, 1, read_len, out) != read_len) - elog(ERROR, "Cannot write to file \"%s\": %s", to_fullpath, - strerror(errno)); - - /* update CRC */ - COMP_FILE_CRC32(true, file->crc, buf, read_len); - file->read_size += read_len; - } - - if (feof(in)) - break; - } + if (errmsg) + elog(ERROR, "%s", errmsg); + else + elog(ERROR, "Cannot access remote file \"%s\"", from_fullpath); } - file->write_size = (int64) file->read_size; + pg_free(errmsg); /* ????? */ - if (file->write_size > 0) - file->uncompressed_size = file->write_size; + file->uncompressed_size = file->read_size; cleanup: /* finish CRC calculation and store into pgFile */ FIN_FILE_CRC32(true, file->crc); - if (in && fclose(in)) - elog(ERROR, "Cannot close the file \"%s\": %s", from_fullpath, strerror(errno)); - if (out && fclose(out)) elog(ERROR, "Cannot close the file \"%s\": %s", to_fullpath, strerror(errno)); - - pg_free(buf); } /* diff --git a/src/dir.c b/src/dir.c index 00e918d0f..73d6db09b 100644 --- a/src/dir.c +++ b/src/dir.c @@ -262,137 +262,6 @@ pgFileDelete(mode_t mode, const char *full_path) } } -/* - * Read the local file to compute its CRC. - * We cannot make decision about file decompression because - * user may ask to backup already compressed files and we should be - * obvious about it. - */ -pg_crc32 -pgFileGetCRC(const char *file_path, bool use_crc32c, bool missing_ok) -{ - FILE *fp; - pg_crc32 crc = 0; - char *buf; - size_t len = 0; - - INIT_FILE_CRC32(use_crc32c, crc); - - /* open file in binary read mode */ - fp = fopen(file_path, PG_BINARY_R); - if (fp == NULL) - { - if (errno == ENOENT) - { - if (missing_ok) - { - FIN_FILE_CRC32(use_crc32c, crc); - return crc; - } - } - - elog(ERROR, "Cannot open file \"%s\": %s", - file_path, strerror(errno)); - } - - /* disable stdio buffering */ - setvbuf(fp, NULL, _IONBF, BUFSIZ); - buf = pgut_malloc(STDIO_BUFSIZE); - - /* calc CRC of file */ - for (;;) - { - if (interrupted) - elog(ERROR, "interrupted during CRC calculation"); - - len = fread(buf, 1, STDIO_BUFSIZE, fp); - - if (ferror(fp)) - elog(ERROR, "Cannot read \"%s\": %s", file_path, strerror(errno)); - - /* update CRC */ - COMP_FILE_CRC32(use_crc32c, crc, buf, len); - - if (feof(fp)) - break; - } - - FIN_FILE_CRC32(use_crc32c, crc); - fclose(fp); - pg_free(buf); - - return crc; -} - -/* - * Read the local file to compute its CRC. - * We cannot make decision about file decompression because - * user may ask to backup already compressed files and we should be - * obvious about it. - */ -pg_crc32 -pgFileGetCRCgz(const char *file_path, bool use_crc32c, bool missing_ok) -{ - gzFile fp; - pg_crc32 crc = 0; - int len = 0; - int err; - char *buf; - - INIT_FILE_CRC32(use_crc32c, crc); - - /* open file in binary read mode */ - fp = gzopen(file_path, PG_BINARY_R); - if (fp == NULL) - { - if (errno == ENOENT) - { - if (missing_ok) - { - FIN_FILE_CRC32(use_crc32c, crc); - return crc; - } - } - - elog(ERROR, "Cannot open file \"%s\": %s", - file_path, strerror(errno)); - } - - buf = pgut_malloc(STDIO_BUFSIZE); - - /* calc CRC of file */ - for (;;) - { - if (interrupted) - elog(ERROR, "interrupted during CRC calculation"); - - len = gzread(fp, buf, STDIO_BUFSIZE); - - if (len <= 0) - { - /* we either run into eof or error */ - if (gzeof(fp)) - break; - else - { - const char *err_str = NULL; - - err_str = gzerror(fp, &err); - elog(ERROR, "Cannot read from compressed file %s", err_str); - } - } - - /* update CRC */ - COMP_FILE_CRC32(use_crc32c, crc, buf, len); - } - - FIN_FILE_CRC32(use_crc32c, crc); - gzclose(fp); - pg_free(buf); - - return crc; -} - void pgFileFree(void *file) { @@ -1812,7 +1681,7 @@ write_database_map(pgBackup *backup, parray *database_map, parray *backup_files_ FIO_BACKUP_HOST); file->crc = pgFileGetCRC(database_map_path, true, false); file->write_size = file->size; - file->uncompressed_size = file->read_size; + file->uncompressed_size = file->size; parray_append(backup_files_list, file); } diff --git a/src/merge.c b/src/merge.c index 1ce49f9a2..79498f48c 100644 --- a/src/merge.c +++ b/src/merge.c @@ -1078,7 +1078,7 @@ merge_files(void *arg) tmp_file->hdr_crc = file->hdr_crc; } else - tmp_file->uncompressed_size = tmp_file->write_size; + tmp_file->uncompressed_size = tmp_file->uncompressed_size; /* Copy header metadata from old map into a new one */ tmp_file->n_headers = file->n_headers; diff --git a/src/pg_probackup.h b/src/pg_probackup.h index bc9f9b8a8..d1d912045 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -345,11 +345,11 @@ typedef enum ShowFormat #define BYTES_INVALID (-1) /* file didn`t changed since previous backup, DELTA backup do not rely on it */ #define FILE_NOT_FOUND (-2) /* file disappeared during backup */ #define BLOCKNUM_INVALID (-1) -#define PROGRAM_VERSION "2.5.8" +#define PROGRAM_VERSION "2.5.9" /* update when remote agent API or behaviour changes */ -#define AGENT_PROTOCOL_VERSION 20501 -#define AGENT_PROTOCOL_VERSION_STR "2.5.1" +#define AGENT_PROTOCOL_VERSION 20509 +#define AGENT_PROTOCOL_VERSION_STR "2.5.9" /* update only when changing storage format */ #define STORAGE_FORMAT_VERSION "2.4.4" @@ -1077,6 +1077,7 @@ extern void fio_pgFileDelete(pgFile *file, const char *full_path); extern void pgFileFree(void *file); extern pg_crc32 pgFileGetCRC(const char *file_path, bool use_crc32c, bool missing_ok); +extern pg_crc32 pgFileGetCRCTruncated(const char *file_path, bool use_crc32c); extern pg_crc32 pgFileGetCRCgz(const char *file_path, bool use_crc32c, bool missing_ok); extern int pgFileMapComparePath(const void *f1, const void *f2); @@ -1240,9 +1241,11 @@ extern int fio_copy_pages(const char *to_fullpath, const char *from_fullpath, pg XLogRecPtr horizonLsn, int calg, int clevel, uint32 checksum_version, bool use_pagemap, BlockNumber *err_blknum, char **errormsg); /* return codes for fio_send_pages */ -extern int fio_send_file_gz(const char *from_fullpath, const char *to_fullpath, FILE* out, char **errormsg); -extern int fio_send_file(const char *from_fullpath, const char *to_fullpath, FILE* out, +extern int fio_send_file_gz(const char *from_fullpath, FILE* out, char **errormsg); +extern int fio_send_file(const char *from_fullpath, FILE* out, bool cut_zero_tail, pgFile *file, char **errormsg); +extern int fio_send_file_local(const char *from_fullpath, FILE* out, bool cut_zero_tail, + pgFile *file, char **errormsg); extern void fio_list_dir(parray *files, const char *root, bool exclude, bool follow_symlink, bool add_root, bool backup_logs, bool skip_hidden, int external_dir_num); diff --git a/src/utils/file.c b/src/utils/file.c index 727b48c60..8e3701af6 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -18,6 +18,10 @@ static __thread int fio_stdin = 0; static __thread int fio_stderr = 0; static char *async_errormsg = NULL; +#define PAGE_ZEROSEARCH_COARSE_GRANULARITY 4096 +#define PAGE_ZEROSEARCH_FINE_GRANULARITY 64 +static const char zerobuf[PAGE_ZEROSEARCH_COARSE_GRANULARITY] = {0}; + fio_location MyLocation; typedef struct @@ -1357,14 +1361,20 @@ fio_sync(char const* path, fio_location location) enum { GET_CRC32_DECOMPRESS = 1, - GET_CRC32_MISSING_OK = 2 + GET_CRC32_MISSING_OK = 2, + GET_CRC32_TRUNCATED = 4 }; /* Get crc32 of file */ -pg_crc32 -fio_get_crc32(const char *file_path, fio_location location, - bool decompress, bool missing_ok) +static pg_crc32 +fio_get_crc32_ex(const char *file_path, fio_location location, + bool decompress, bool missing_ok, bool truncated) { + if (decompress && truncated) + elog(ERROR, "Could not calculate CRC for compressed truncated file"); + if (missing_ok && truncated) + elog(ERROR, "CRC calculation for missing truncated file is forbidden"); + if (fio_is_remote(location)) { fio_header hdr; @@ -1379,6 +1389,8 @@ fio_get_crc32(const char *file_path, fio_location location, hdr.arg = GET_CRC32_DECOMPRESS; if (missing_ok) hdr.arg |= GET_CRC32_MISSING_OK; + if (truncated) + hdr.arg |= GET_CRC32_TRUNCATED; IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); IO_CHECK(fio_write_all(fio_stdout, file_path, path_len), path_len); @@ -1390,11 +1402,26 @@ fio_get_crc32(const char *file_path, fio_location location, { if (decompress) return pgFileGetCRCgz(file_path, true, missing_ok); + else if (truncated) + return pgFileGetCRCTruncated(file_path, true); else return pgFileGetCRC(file_path, true, missing_ok); } } +pg_crc32 +fio_get_crc32(const char *file_path, fio_location location, + bool decompress, bool missing_ok) +{ + return fio_get_crc32_ex(file_path, location, decompress, missing_ok, false); +} + +pg_crc32 +fio_get_crc32_truncated(const char *file_path, fio_location location) +{ + return fio_get_crc32_ex(file_path, location, false, false, true); +} + /* Remove file */ int fio_unlink(char const* path, fio_location location) @@ -2455,7 +2482,7 @@ fio_send_pages_impl(int out, char* buf) * REMOTE_ERROR (-6) */ int -fio_send_file_gz(const char *from_fullpath, const char *to_fullpath, FILE* out, char **errormsg) +fio_send_file_gz(const char *from_fullpath, FILE* out, char **errormsg) { fio_header hdr; int exit_code = SEND_OK; @@ -2604,6 +2631,105 @@ fio_send_file_gz(const char *from_fullpath, const char *to_fullpath, FILE* out, return exit_code; } +typedef struct send_file_state { + bool calc_crc; + uint32_t crc; + int64_t read_size; + int64_t write_size; +} send_file_state; + +/* find page border of all-zero tail */ +static size_t +find_zero_tail(char *buf, size_t len) +{ + size_t i, l; + size_t granul = sizeof(zerobuf); + + if (len == 0) + return 0; + + /* fast check for last bytes */ + i = (len-1) & ~(PAGE_ZEROSEARCH_FINE_GRANULARITY-1); + l = len - i; + if (memcmp(buf + i, zerobuf, i) != 0) + return len; + + /* coarse search for zero tail */ + i = (len-1) & ~(granul-1); + l = len - i; + for (;;) + { + if (memcmp(buf+i, zerobuf, l) != 0) + { + i += l; + break; + } + if (i == 0) + break; + i -= granul; + l = granul; + } + + len = i; + /* search zero tail with finer granularity */ + for (granul = sizeof(zerobuf)/2; + len > 0 && granul >= PAGE_ZEROSEARCH_FINE_GRANULARITY; + granul /= 2) + { + if (granul > l) + continue; + i = (len-1) & ~(granul-1); + l = len - i; + if (memcmp(buf+i, zerobuf, l) == 0) + len = i; + } + + return len; +} + +static void +fio_send_file_crc(send_file_state* st, char *buf, size_t len) +{ + int64_t write_size; + + if (!st->calc_crc) + return; + + write_size = st->write_size; + while (st->read_size > write_size) + { + size_t crc_len = Min(st->read_size - write_size, sizeof(zerobuf)); + COMP_FILE_CRC32(true, st->crc, zerobuf, crc_len); + write_size += crc_len; + } + + if (len > 0) + COMP_FILE_CRC32(true, st->crc, buf, len); +} + +static bool +fio_send_file_write(FILE* out, send_file_state* st, char *buf, size_t len) +{ + if (len == 0) + return true; + + if (st->read_size > st->write_size && + fseeko(out, st->read_size, SEEK_SET) != 0) + { + return false; + } + + if (fwrite(buf, 1, len, out) != len) + { + return false; + } + + st->read_size += len; + st->write_size = st->read_size; + + return true; +} + /* Receive chunks of data and write them to destination file. * Return codes: * SEND_OK (0) @@ -2616,13 +2742,22 @@ fio_send_file_gz(const char *from_fullpath, const char *to_fullpath, FILE* out, * If pgFile is not NULL then we must calculate crc and read_size for it. */ int -fio_send_file(const char *from_fullpath, const char *to_fullpath, FILE* out, +fio_send_file(const char *from_fullpath, FILE* out, bool cut_zero_tail, pgFile *file, char **errormsg) { fio_header hdr; int exit_code = SEND_OK; size_t path_len = strlen(from_fullpath) + 1; char *buf = pgut_malloc(CHUNK_SIZE); /* buffer */ + send_file_state st = {false, 0, 0, 0}; + + memset(&hdr, 0, sizeof(hdr)); + + if (file) + { + st.calc_crc = true; + st.crc = file->crc; + } hdr.cop = FIO_SEND_FILE; hdr.size = path_len; @@ -2640,6 +2775,37 @@ fio_send_file(const char *from_fullpath, const char *to_fullpath, FILE* out, if (hdr.cop == FIO_SEND_FILE_EOF) { + if (st.write_size < st.read_size) + { + if (!cut_zero_tail) + { + /* + * We still need to calc crc for zero tail. + */ + fio_send_file_crc(&st, NULL, 0); + + /* + * Let's write single zero byte to the end of file to restore + * logical size. + * Well, it would be better to use ftruncate here actually, + * but then we need to change interface. + */ + st.read_size -= 1; + buf[0] = 0; + if (!fio_send_file_write(out, &st, buf, 1)) + { + exit_code = WRITE_FAILED; + break; + } + } + } + + if (file) + { + file->crc = st.crc; + file->read_size = st.read_size; + file->write_size = st.write_size; + } break; } else if (hdr.cop == FIO_ERROR) @@ -2660,17 +2826,23 @@ fio_send_file(const char *from_fullpath, const char *to_fullpath, FILE* out, IO_CHECK(fio_read_all(fio_stdin, buf, hdr.size), hdr.size); /* We have received a chunk of data data, lets write it out */ - if (fwrite(buf, 1, hdr.size, out) != hdr.size) + fio_send_file_crc(&st, buf, hdr.size); + if (!fio_send_file_write(out, &st, buf, hdr.size)) { exit_code = WRITE_FAILED; break; } + } + else if (hdr.cop == FIO_PAGE_ZERO) + { + Assert(hdr.size == 0); + Assert(hdr.arg <= CHUNK_SIZE); - if (file) - { - file->read_size += hdr.size; - COMP_FILE_CRC32(true, file->crc, buf, hdr.size); - } + /* + * We have received a chunk of zero data, lets just think we + * wrote it. + */ + st.read_size += hdr.arg; } else { @@ -2686,6 +2858,117 @@ fio_send_file(const char *from_fullpath, const char *to_fullpath, FILE* out, return exit_code; } +int +fio_send_file_local(const char *from_fullpath, FILE* out, bool cut_zero_tail, + pgFile *file, char **errormsg) +{ + FILE* in; + char* buf; + size_t read_len, non_zero_len; + int exit_code = SEND_OK; + send_file_state st = {false, 0, 0, 0}; + + if (file) + { + st.calc_crc = true; + st.crc = file->crc; + } + + /* open source file for read */ + in = fopen(from_fullpath, PG_BINARY_R); + if (in == NULL) + { + /* maybe deleted, it's not error in case of backup */ + if (errno == ENOENT) + return FILE_MISSING; + + + *errormsg = psprintf("Cannot open file \"%s\": %s", from_fullpath, + strerror(errno)); + return OPEN_FAILED; + } + + /* disable stdio buffering for local input/output files to avoid triple buffering */ + setvbuf(in, NULL, _IONBF, BUFSIZ); + setvbuf(out, NULL, _IONBF, BUFSIZ); + + /* allocate 64kB buffer */ + buf = pgut_malloc(CHUNK_SIZE); + + /* copy content and calc CRC */ + for (;;) + { + read_len = fread(buf, 1, CHUNK_SIZE, in); + + if (ferror(in)) + { + *errormsg = psprintf("Cannot read from file \"%s\": %s", + from_fullpath, strerror(errno)); + exit_code = READ_FAILED; + goto cleanup; + } + + if (read_len > 0) + { + non_zero_len = find_zero_tail(buf, read_len); + if (non_zero_len > 0) + { + fio_send_file_crc(&st, buf, non_zero_len); + if (!fio_send_file_write(out, &st, buf, non_zero_len)) + { + exit_code = WRITE_FAILED; + goto cleanup; + } + } + if (non_zero_len < read_len) + { + /* Just pretend we wrote it. */ + st.read_size += read_len - non_zero_len; + } + } + + if (feof(in)) + break; + } + + if (st.write_size < st.read_size) + { + /* + * We still need to calc crc for zero tail. + */ + fio_send_file_crc(&st, NULL, 0); + + if (!cut_zero_tail) + { + /* + * Let's write single zero byte to the end of file to restore + * logical size. + * Well, it would be better to use ftruncate here actually, + * but then we need to change interface. + */ + st.read_size -= 1; + buf[0] = 0; + if (!fio_send_file_write(out, &st, buf, 1)) + { + exit_code = WRITE_FAILED; + goto cleanup; + } + } + } + + if (file) + { + file->crc = st.crc; + file->read_size = st.read_size; + file->write_size = st.write_size; + } + +cleanup: + free(buf); + fclose(in); + return exit_code; +} + /* Send file content * On error we return FIO_ERROR message with following codes * FIO_ERROR: @@ -2746,6 +3029,7 @@ fio_send_file_impl(int out, char const* path) for (;;) { read_len = fread(buf, 1, CHUNK_SIZE, fp); + memset(&hdr, 0, sizeof(hdr)); /* report error */ if (ferror(fp)) @@ -2766,10 +3050,22 @@ fio_send_file_impl(int out, char const* path) if (read_len > 0) { /* send chunk */ - hdr.cop = FIO_PAGE; - hdr.size = read_len; - IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); - IO_CHECK(fio_write_all(out, buf, read_len), read_len); + size_t non_zero_len = find_zero_tail(buf, read_len); + if (non_zero_len > 0) + { + hdr.cop = FIO_PAGE; + hdr.size = non_zero_len; + IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); + IO_CHECK(fio_write_all(out, buf, non_zero_len), non_zero_len); + } + + if (non_zero_len < read_len) + { + hdr.cop = FIO_PAGE_ZERO; + hdr.size = 0; + hdr.arg = read_len - non_zero_len; + IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); + } } if (feof(fp)) @@ -2788,6 +3084,193 @@ fio_send_file_impl(int out, char const* path) return; } +/* + * Read the local file to compute its CRC. + * We cannot make decision about file decompression because + * user may ask to backup already compressed files and we should be + * obvious about it. + */ +pg_crc32 +pgFileGetCRC(const char *file_path, bool use_crc32c, bool missing_ok) +{ + FILE *fp; + pg_crc32 crc = 0; + char *buf; + size_t len = 0; + + INIT_FILE_CRC32(use_crc32c, crc); + + /* open file in binary read mode */ + fp = fopen(file_path, PG_BINARY_R); + if (fp == NULL) + { + if (errno == ENOENT) + { + if (missing_ok) + { + FIN_FILE_CRC32(use_crc32c, crc); + return crc; + } + } + + elog(ERROR, "Cannot open file \"%s\": %s", + file_path, strerror(errno)); + } + + /* disable stdio buffering */ + setvbuf(fp, NULL, _IONBF, BUFSIZ); + buf = pgut_malloc(STDIO_BUFSIZE); + + /* calc CRC of file */ + for (;;) + { + if (interrupted) + elog(ERROR, "interrupted during CRC calculation"); + + len = fread(buf, 1, STDIO_BUFSIZE, fp); + + if (ferror(fp)) + elog(ERROR, "Cannot read \"%s\": %s", file_path, strerror(errno)); + + /* update CRC */ + COMP_FILE_CRC32(use_crc32c, crc, buf, len); + + if (feof(fp)) + break; + } + + FIN_FILE_CRC32(use_crc32c, crc); + fclose(fp); + pg_free(buf); + + return crc; +} + +/* + * Read the local file to compute CRC for it extened to real_size. + */ +pg_crc32 +pgFileGetCRCTruncated(const char *file_path, bool use_crc32c) +{ + FILE *fp; + char *buf; + size_t len = 0; + size_t non_zero_len; + send_file_state st = {true, 0, 0, 0}; + + INIT_FILE_CRC32(use_crc32c, st.crc); + + /* open file in binary read mode */ + fp = fopen(file_path, PG_BINARY_R); + if (fp == NULL) + { + elog(ERROR, "Cannot open file \"%s\": %s", + file_path, strerror(errno)); + } + + /* disable stdio buffering */ + setvbuf(fp, NULL, _IONBF, BUFSIZ); + buf = pgut_malloc(CHUNK_SIZE); + + /* calc CRC of file */ + for (;;) + { + if (interrupted) + elog(ERROR, "interrupted during CRC calculation"); + + len = fread(buf, 1, STDIO_BUFSIZE, fp); + + if (ferror(fp)) + elog(ERROR, "Cannot read \"%s\": %s", file_path, strerror(errno)); + + non_zero_len = find_zero_tail(buf, len); + if (non_zero_len) + { + fio_send_file_crc(&st, buf, non_zero_len); + st.write_size += st.read_size + non_zero_len; + } + st.read_size += len; + + if (feof(fp)) + break; + } + + FIN_FILE_CRC32(use_crc32c, st.crc); + fclose(fp); + pg_free(buf); + + return st.crc; +} + +/* + * Read the local file to compute its CRC. + * We cannot make decision about file decompression because + * user may ask to backup already compressed files and we should be + * obvious about it. + */ +pg_crc32 +pgFileGetCRCgz(const char *file_path, bool use_crc32c, bool missing_ok) +{ + gzFile fp; + pg_crc32 crc = 0; + int len = 0; + int err; + char *buf; + + INIT_FILE_CRC32(use_crc32c, crc); + + /* open file in binary read mode */ + fp = gzopen(file_path, PG_BINARY_R); + if (fp == NULL) + { + if (errno == ENOENT) + { + if (missing_ok) + { + FIN_FILE_CRC32(use_crc32c, crc); + return crc; + } + } + + elog(ERROR, "Cannot open file \"%s\": %s", + file_path, strerror(errno)); + } + + buf = pgut_malloc(STDIO_BUFSIZE); + + /* calc CRC of file */ + for (;;) + { + if (interrupted) + elog(ERROR, "interrupted during CRC calculation"); + + len = gzread(fp, buf, STDIO_BUFSIZE); + + if (len <= 0) + { + /* we either run into eof or error */ + if (gzeof(fp)) + break; + else + { + const char *err_str = NULL; + + err_str = gzerror(fp, &err); + elog(ERROR, "Cannot read from compressed file %s", err_str); + } + } + + /* update CRC */ + COMP_FILE_CRC32(use_crc32c, crc, buf, len); + } + + FIN_FILE_CRC32(use_crc32c, crc); + gzclose(fp); + pg_free(buf); + + return crc; +} + /* Compile the array of files located on remote machine in directory root */ static void fio_list_dir_internal(parray *files, const char *root, bool exclude, @@ -3387,9 +3870,13 @@ fio_communicate(int in, int out) IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); break; case FIO_GET_CRC32: + Assert((hdr.arg & GET_CRC32_TRUNCATED) == 0 || + (hdr.arg & GET_CRC32_TRUNCATED) == GET_CRC32_TRUNCATED); /* calculate crc32 for a file */ if ((hdr.arg & GET_CRC32_DECOMPRESS)) crc = pgFileGetCRCgz(buf, true, (hdr.arg & GET_CRC32_MISSING_OK) != 0); + else if ((hdr.arg & GET_CRC32_TRUNCATED)) + crc = pgFileGetCRCTruncated(buf, true); else crc = pgFileGetCRC(buf, true, (hdr.arg & GET_CRC32_MISSING_OK) != 0); IO_CHECK(fio_write_all(out, &crc, sizeof(crc)), sizeof(crc)); diff --git a/src/utils/file.h b/src/utils/file.h index ec478b451..890babf55 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -56,7 +56,8 @@ typedef enum FIO_CHECK_POSTMASTER, FIO_GET_ASYNC_ERROR, FIO_WRITE_ASYNC, - FIO_READLINK + FIO_READLINK, + FIO_PAGE_ZERO } fio_operations; typedef enum @@ -122,6 +123,7 @@ extern void fio_disconnect(void); extern int fio_sync(char const* path, fio_location location); extern pg_crc32 fio_get_crc32(const char *file_path, fio_location location, bool decompress, bool missing_ok); +extern pg_crc32 fio_get_crc32_truncated(const char *file_path, fio_location location); extern int fio_rename(char const* old_path, char const* new_path, fio_location location); extern int fio_symlink(char const* target, char const* link_path, bool overwrite, fio_location location); diff --git a/tests/expected/option_version.out b/tests/expected/option_version.out index 4de288907..7c9fcbfe0 100644 --- a/tests/expected/option_version.out +++ b/tests/expected/option_version.out @@ -1 +1 @@ -pg_probackup 2.5.8 +pg_probackup 2.5.9 From 202f2ade7f7ae7b1fdf1f7f8b413b2950d040fa4 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Mon, 24 Oct 2022 16:52:38 +0300 Subject: [PATCH 345/525] find_zero_tail: fix last bytes check --- src/utils/file.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/utils/file.c b/src/utils/file.c index 8e3701af6..b45fea0e7 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -2649,9 +2649,9 @@ find_zero_tail(char *buf, size_t len) return 0; /* fast check for last bytes */ - i = (len-1) & ~(PAGE_ZEROSEARCH_FINE_GRANULARITY-1); - l = len - i; - if (memcmp(buf + i, zerobuf, i) != 0) + l = Min(len, PAGE_ZEROSEARCH_FINE_GRANULARITY); + i = len - l; + if (memcmp(buf + i, zerobuf, l) != 0) return len; /* coarse search for zero tail */ From 2eaeb942c68a6b149f6e9b04a9f094c57f3e8b96 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Mon, 24 Oct 2022 17:15:59 +0300 Subject: [PATCH 346/525] [PBCKP-146] add test for cfm size --- tests/cfs_backup.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/tests/cfs_backup.py b/tests/cfs_backup.py index 436db31e7..861c9f1ea 100644 --- a/tests/cfs_backup.py +++ b/tests/cfs_backup.py @@ -171,12 +171,16 @@ def test_fullbackup_after_create_table(self): "ERROR: File pg_compression not found in {0}".format( os.path.join(self.backup_dir, 'node', backup_id)) ) - self.assertTrue( - find_by_extensions( - [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], - ['.cfm']), - "ERROR: .cfm files not found in backup dir" - ) + cfms = find_by_extensions( + [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], + ['.cfm']) + self.assertTrue(cfms, "ERROR: .cfm files not found in backup dir") + for cfm in cfms: + size = os.stat(cfm).st_size + self.assertLessEqual(size, 4096, + "ERROR: {0} is not truncated (has size {1} > 4096)".format( + cfm, size + )) # @unittest.expectedFailure # @unittest.skip("skip") From 5f71f7710fc4df684f53b99c575182b00f2c8a99 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Mon, 24 Oct 2022 17:28:07 +0300 Subject: [PATCH 347/525] fix fio_send_file_local --- src/utils/file.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/utils/file.c b/src/utils/file.c index b45fea0e7..7f88a3ad0 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -2933,13 +2933,13 @@ fio_send_file_local(const char *from_fullpath, FILE* out, bool cut_zero_tail, if (st.write_size < st.read_size) { - /* - * We still need to calc crc for zero tail. - */ - fio_send_file_crc(&st, NULL, 0); - if (!cut_zero_tail) { + /* + * We still need to calc crc for zero tail. + */ + fio_send_file_crc(&st, NULL, 0); + /* * Let's write single zero byte to the end of file to restore * logical size. From 302db1c49f49ff61662b2f201e0e4342b77de539 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Mon, 24 Oct 2022 20:35:11 +0300 Subject: [PATCH 348/525] [PGPRO-146] store at least cfs header size bytes. --- src/utils/file.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/src/utils/file.c b/src/utils/file.c index 7f88a3ad0..b4ba30594 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -2911,6 +2911,17 @@ fio_send_file_local(const char *from_fullpath, FILE* out, bool cut_zero_tail, if (read_len > 0) { non_zero_len = find_zero_tail(buf, read_len); + /* + * It is dirty trick to silence warnings in CFS GC process: + * backup at least cfs header size bytes. + */ + if (st.read_size + non_zero_len < PAGE_ZEROSEARCH_FINE_GRANULARITY && + st.read_size + read_len > 0) + { + non_zero_len = Min(PAGE_ZEROSEARCH_FINE_GRANULARITY, + st.read_size + read_len); + non_zero_len -= st.read_size; + } if (non_zero_len > 0) { fio_send_file_crc(&st, buf, non_zero_len); From a20eb7bddb65578e5cfb2dff1ad73c67dd55ece2 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Mon, 24 Oct 2022 21:57:14 +0300 Subject: [PATCH 349/525] [PGPRO-146] pgdata_content: checksum for truncated cfm --- tests/helpers/ptrack_helpers.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index d800f0d3e..abb715b7e 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -1709,8 +1709,18 @@ def pgdata_content(self, pgdata, ignore_ptrack=True, exclude_dirs=None): file_relpath = os.path.relpath(file_fullpath, pgdata) directory_dict['files'][file_relpath] = {'is_datafile': False} with open(file_fullpath, 'rb') as f: - directory_dict['files'][file_relpath]['md5'] = hashlib.md5(f.read()).hexdigest() - f.close() + content = f.read() + # truncate cfm's content's zero tail + if file_relpath.endswith('.cfm'): + zero64 = b"\x00"*64 + l = len(content) + while l > 64: + s = (l - 1) & ~63 + if content[s:l] != zero64[:l-s]: + break + l = s + content = content[:l] + directory_dict['files'][file_relpath]['md5'] = hashlib.md5(content).hexdigest() # directory_dict['files'][file_relpath]['md5'] = hashlib.md5( # f = open(file_fullpath, 'rb').read()).hexdigest() From f2537982503d988cf960b8c5b5b37fa39b602d8e Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Mon, 24 Oct 2022 21:57:55 +0300 Subject: [PATCH 350/525] [PGPRO-146] cfs_catchup test Test full catchup and delta catchup. --- tests/cfs_catchup.py | 107 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 107 insertions(+) create mode 100644 tests/cfs_catchup.py diff --git a/tests/cfs_catchup.py b/tests/cfs_catchup.py new file mode 100644 index 000000000..068311035 --- /dev/null +++ b/tests/cfs_catchup.py @@ -0,0 +1,107 @@ +import os +import unittest +import random +import shutil + +from .helpers.cfs_helpers import find_by_extensions, find_by_name, find_by_pattern, corrupt_file +from .helpers.ptrack_helpers import ProbackupTest, ProbackupException + +module_name = 'cfs_catchup' +tblspace_name = 'cfs_tblspace' + + +class CfsCatchupNoEncTest(ProbackupTest, unittest.TestCase): + def setUp(self): + self.fname = self.id().split('.')[3] + + @unittest.skipUnless(ProbackupTest.enterprise, 'skip') + def test_full_catchup_with_tablespace(self): + """ + Test tablespace transfers + """ + # preparation + src_pg = self.make_simple_node( + base_dir = os.path.join(module_name, self.fname, 'src'), + set_replication = True + ) + src_pg.slow_start() + tblspace1_old_path = self.get_tblspace_path(src_pg, 'tblspace1_old') + self.create_tblspace_in_node(src_pg, 'tblspace1', tblspc_path = tblspace1_old_path, cfs=True) + src_pg.safe_psql( + "postgres", + "CREATE TABLE ultimate_question TABLESPACE tblspace1 AS SELECT 42 AS answer") + src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + src_pg.safe_psql( + "postgres", + "CHECKPOINT") + + # do full catchup with tablespace mapping + dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + tblspace1_new_path = self.get_tblspace_path(dst_pg, 'tblspace1_new') + self.catchup_node( + backup_mode = 'FULL', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = [ + '-d', 'postgres', + '-p', str(src_pg.port), + '--stream', + '-T', '{0}={1}'.format(tblspace1_old_path, tblspace1_new_path) + ] + ) + + # 1st check: compare data directories + self.compare_pgdata( + self.pgdata_content(src_pg.data_dir), + self.pgdata_content(dst_pg.data_dir) + ) + + # make changes in master tablespace + src_pg.safe_psql( + "postgres", + "UPDATE ultimate_question SET answer = -1") + src_pg.safe_psql( + "postgres", + "CHECKPOINT") + + # run&recover catchup'ed instance + dst_options = {} + dst_options['port'] = str(dst_pg.port) + self.set_auto_conf(dst_pg, dst_options) + dst_pg.slow_start() + + # 2nd check: run verification query + dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy') + + # and now delta backup + dst_pg.stop() + + self.catchup_node( + backup_mode = 'DELTA', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = [ + '-d', 'postgres', + '-p', str(src_pg.port), + '--stream', + '-T', '{0}={1}'.format(tblspace1_old_path, tblspace1_new_path) + ] + ) + + # run&recover catchup'ed instance + dst_options = {} + dst_options['port'] = str(dst_pg.port) + self.set_auto_conf(dst_pg, dst_options) + dst_pg.slow_start() + + + # 3rd check: run verification query + src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy') + + # Cleanup + src_pg.stop() + dst_pg.stop() + self.del_test_dir(module_name, self.fname) From 26f9992b2ab484a0bdd605d9669b2c86a07c28ed Mon Sep 17 00:00:00 2001 From: Ivan Lazarev Date: Mon, 24 Oct 2022 18:28:30 +0300 Subject: [PATCH 351/525] [PBCKP-236] added PGPROBACKUP_MANUAL testing and PGPROBACKUP_SSH_AGENT_PATH flags. --- tests/compatibility.py | 53 +++++++++++++++++++++++++++--------------- 1 file changed, 34 insertions(+), 19 deletions(-) diff --git a/tests/compatibility.py b/tests/compatibility.py index 04af1478f..6c2bc9204 100644 --- a/tests/compatibility.py +++ b/tests/compatibility.py @@ -8,32 +8,48 @@ module_name = 'compatibility' +def check_manual_tests_enabled(): + return 'PGPROBACKUP_MANUAL' in os.environ and os.environ['PGPROBACKUP_MANUAL'] == 'ON' + + +def check_ssh_agent_path_exists(): + return 'PGPROBACKUP_SSH_AGENT_PATH' in os.environ + + class CompatibilityTest(ProbackupTest, unittest.TestCase): def setUp(self): self.fname = self.id().split('.')[3] # @unittest.expectedFailure - @unittest.skip("skip") + @unittest.skipUnless(check_manual_tests_enabled(), 'skip manual test') + @unittest.skipUnless(check_ssh_agent_path_exists(), 'skip no ssh agent path exist') + # @unittest.skip("skip") def test_catchup_with_different_remote_major_pg(self): """ Decription in jira issue PBCKP-236 - This test requires builds both PGPROEE11 and PGPROEE9_6 + This test exposures ticket error using pg_probackup builds for both PGPROEE11 and PGPROEE9_6 + + Prerequisites: + - pg_probackup git tag for PBCKP 2.5.1 + - master pg_probackup build should be made for PGPROEE11 + - agent pg_probackup build should be made for PGPROEE9_6 - prerequisites: - - git tag for PBCKP 2.5.1 - - master probackup build should be inside PGPROEE11 - - agent probackup build is inside PGPROEE9_6 + Calling probackup PGPROEE9_6 pg_probackup agent from PGPROEE11 pg_probackup master for DELTA backup causes + the PBCKP-236 problem - calling probackup PGPROEE9_6 agent from PGPROEE11 probackup master for DELTA backup causes the PBCKP-236 problem + Please give env variables PROBACKUP_MANUAL=ON;PGPROBACKUP_SSH_AGENT_PATH= + for the test - please correct path for agent's pg_path_remote_version = '/home/avaness/postgres/postgres.build.ee.9.6/bin/' + Please make path for agent's pgprobackup_ssh_agent_path = '/home/avaness/postgres/postgres.build.ee.9.6/bin/' + without pg_probackup executable """ self.verbose = True self.remote = True - # please use your own local path - pg_path_remote_version = '/home/avaness/postgres/postgres.build.clean/bin' + # please use your own local path like + # pgprobackup_ssh_agent_path = '/home/avaness/postgres/postgres.build.clean/bin/' + pgprobackup_ssh_agent_path = os.environ['PGPROBACKUP_SSH_AGENT_PATH'] src_pg = self.make_simple_node( base_dir=os.path.join(module_name, self.fname, 'src'), @@ -47,14 +63,13 @@ def test_catchup_with_different_remote_major_pg(self): # do full catchup dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) self.catchup_node( - backup_mode = 'FULL', - source_pgdata = src_pg.data_dir, - destination_node = dst_pg, + backup_mode='FULL', + source_pgdata=src_pg.data_dir, + destination_node=dst_pg, options=['-d', 'postgres', '-p', str(src_pg.port), '--stream'] ) - dst_options = {} - dst_options['port'] = str(dst_pg.port) + dst_options = {'port': str(dst_pg.port)} self.set_auto_conf(dst_pg, dst_options) dst_pg.slow_start() dst_pg.stop() @@ -66,11 +81,11 @@ def test_catchup_with_different_remote_major_pg(self): # do delta catchup with remote pg_probackup agent with another postgres major version # this DELTA backup should fail without PBCKP-236 patch. self.catchup_node( - backup_mode = 'DELTA', - source_pgdata = src_pg.data_dir, - destination_node = dst_pg, + backup_mode='DELTA', + source_pgdata=src_pg.data_dir, + destination_node=dst_pg, # here's substitution of --remoge-path pg_probackup agent compiled with another postgres version - options=['-d', 'postgres', '-p', str(src_pg.port), '--stream', '--remote-path=' + pg_path_remote_version] + options=['-d', 'postgres', '-p', str(src_pg.port), '--stream', '--remote-path=' + pgprobackup_ssh_agent_path] ) # Clean after yourself From 97355f1562041136c952add40b5ccafc76eaff58 Mon Sep 17 00:00:00 2001 From: dlepikhova <43872363+dlepikhova@users.noreply.github.com> Date: Wed, 26 Oct 2022 18:34:36 +0500 Subject: [PATCH 352/525] release_2_5_9-pbckp-227 (#533) [PBCKP-227]: Fix some potential problems in pg_probackup code * Fix fwrite parameters in fio_open_stream * Remove unused get_system_dbstate function * Set actual sourse for fields in instance_options to SOURCE_DEFAULT * Remove get_system_dbstate declaration from header file --- src/configure.c | 132 ++++++++++++++++++++++----------------------- src/pg_probackup.h | 1 - src/util.c | 16 ------ src/utils/file.c | 2 +- 4 files changed, 67 insertions(+), 84 deletions(-) diff --git a/src/configure.c b/src/configure.c index 6e8700de1..f7befb0c5 100644 --- a/src/configure.c +++ b/src/configure.c @@ -53,7 +53,7 @@ ConfigOption instance_options[] = /* Instance options */ { 's', 'D', "pgdata", - &instance_config.pgdata, SOURCE_CMD, 0, + &instance_config.pgdata, SOURCE_CMD, SOURCE_DEFAULT, OPTION_INSTANCE_GROUP, 0, option_get_value }, { @@ -70,49 +70,49 @@ ConfigOption instance_options[] = #endif { 's', 'E', "external-dirs", - &instance_config.external_dir_str, SOURCE_CMD, 0, + &instance_config.external_dir_str, SOURCE_CMD, SOURCE_DEFAULT, OPTION_INSTANCE_GROUP, 0, option_get_value }, /* Connection options */ { 's', 'd', "pgdatabase", - &instance_config.conn_opt.pgdatabase, SOURCE_CMD, 0, + &instance_config.conn_opt.pgdatabase, SOURCE_CMD, SOURCE_DEFAULT, OPTION_CONN_GROUP, 0, option_get_value }, { 's', 'h', "pghost", - &instance_config.conn_opt.pghost, SOURCE_CMD, 0, + &instance_config.conn_opt.pghost, SOURCE_CMD, SOURCE_DEFAULT, OPTION_CONN_GROUP, 0, option_get_value }, { 's', 'p', "pgport", - &instance_config.conn_opt.pgport, SOURCE_CMD, 0, + &instance_config.conn_opt.pgport, SOURCE_CMD, SOURCE_DEFAULT, OPTION_CONN_GROUP, 0, option_get_value }, { 's', 'U', "pguser", - &instance_config.conn_opt.pguser, SOURCE_CMD, 0, + &instance_config.conn_opt.pguser, SOURCE_CMD, SOURCE_DEFAULT, OPTION_CONN_GROUP, 0, option_get_value }, /* Replica options */ { 's', 202, "master-db", - &instance_config.master_conn_opt.pgdatabase, SOURCE_CMD, 0, + &instance_config.master_conn_opt.pgdatabase, SOURCE_CMD, SOURCE_DEFAULT, OPTION_REPLICA_GROUP, 0, option_get_value }, { 's', 203, "master-host", - &instance_config.master_conn_opt.pghost, SOURCE_CMD, 0, + &instance_config.master_conn_opt.pghost, SOURCE_CMD, SOURCE_DEFAULT, OPTION_REPLICA_GROUP, 0, option_get_value }, { 's', 204, "master-port", - &instance_config.master_conn_opt.pgport, SOURCE_CMD, 0, + &instance_config.master_conn_opt.pgport, SOURCE_CMD, SOURCE_DEFAULT, OPTION_REPLICA_GROUP, 0, option_get_value }, { 's', 205, "master-user", - &instance_config.master_conn_opt.pguser, SOURCE_CMD, 0, + &instance_config.master_conn_opt.pguser, SOURCE_CMD, SOURCE_DEFAULT, OPTION_REPLICA_GROUP, 0, option_get_value }, { @@ -128,17 +128,17 @@ ConfigOption instance_options[] = }, { 's', 208, "archive-host", - &instance_config.archive.host, SOURCE_CMD, 0, + &instance_config.archive.host, SOURCE_CMD, SOURCE_DEFAULT, OPTION_ARCHIVE_GROUP, 0, option_get_value }, { 's', 209, "archive-port", - &instance_config.archive.port, SOURCE_CMD, 0, + &instance_config.archive.port, SOURCE_CMD, SOURCE_DEFAULT, OPTION_ARCHIVE_GROUP, 0, option_get_value }, { 's', 210, "archive-user", - &instance_config.archive.user, SOURCE_CMD, 0, + &instance_config.archive.user, SOURCE_CMD, SOURCE_DEFAULT, OPTION_ARCHIVE_GROUP, 0, option_get_value }, { @@ -149,37 +149,37 @@ ConfigOption instance_options[] = /* Logging options */ { 'f', 212, "log-level-console", - assign_log_level_console, SOURCE_CMD, 0, + assign_log_level_console, SOURCE_CMD, SOURCE_DEFAULT, OPTION_LOG_GROUP, 0, get_log_level_console }, { 'f', 213, "log-level-file", - assign_log_level_file, SOURCE_CMD, 0, + assign_log_level_file, SOURCE_CMD, SOURCE_DEFAULT, OPTION_LOG_GROUP, 0, get_log_level_file }, { 'f', 214, "log-format-console", - assign_log_format_console, SOURCE_CMD_STRICT, 0, + assign_log_format_console, SOURCE_CMD_STRICT, SOURCE_DEFAULT, OPTION_LOG_GROUP, 0, get_log_format_console }, { 'f', 215, "log-format-file", - assign_log_format_file, SOURCE_CMD, 0, + assign_log_format_file, SOURCE_CMD, SOURCE_DEFAULT, OPTION_LOG_GROUP, 0, get_log_format_file }, { 's', 216, "log-filename", - &instance_config.logger.log_filename, SOURCE_CMD, 0, + &instance_config.logger.log_filename, SOURCE_CMD, SOURCE_DEFAULT, OPTION_LOG_GROUP, 0, option_get_value }, { 's', 217, "error-log-filename", - &instance_config.logger.error_log_filename, SOURCE_CMD, 0, + &instance_config.logger.error_log_filename, SOURCE_CMD, SOURCE_DEFAULT, OPTION_LOG_GROUP, 0, option_get_value }, { 's', 218, "log-directory", - &instance_config.logger.log_directory, SOURCE_CMD, 0, + &instance_config.logger.log_directory, SOURCE_CMD, SOURCE_DEFAULT, OPTION_LOG_GROUP, 0, option_get_value }, { @@ -195,64 +195,64 @@ ConfigOption instance_options[] = /* Retention options */ { 'u', 221, "retention-redundancy", - &instance_config.retention_redundancy, SOURCE_CMD, 0, + &instance_config.retention_redundancy, SOURCE_CMD, SOURCE_DEFAULT, OPTION_RETENTION_GROUP, 0, option_get_value }, { 'u', 222, "retention-window", - &instance_config.retention_window, SOURCE_CMD, 0, + &instance_config.retention_window, SOURCE_CMD, SOURCE_DEFAULT, OPTION_RETENTION_GROUP, 0, option_get_value }, { 'u', 223, "wal-depth", - &instance_config.wal_depth, SOURCE_CMD, 0, + &instance_config.wal_depth, SOURCE_CMD, SOURCE_DEFAULT, OPTION_RETENTION_GROUP, 0, option_get_value }, /* Compression options */ { 'f', 224, "compress-algorithm", - assign_compress_alg, SOURCE_CMD, 0, + assign_compress_alg, SOURCE_CMD, SOURCE_DEFAULT, OPTION_COMPRESS_GROUP, 0, get_compress_alg }, { 'u', 225, "compress-level", - &instance_config.compress_level, SOURCE_CMD, 0, + &instance_config.compress_level, SOURCE_CMD, SOURCE_DEFAULT, OPTION_COMPRESS_GROUP, 0, option_get_value }, /* Remote backup options */ { 's', 226, "remote-proto", - &instance_config.remote.proto, SOURCE_CMD, 0, + &instance_config.remote.proto, SOURCE_CMD, SOURCE_DEFAULT, OPTION_REMOTE_GROUP, 0, option_get_value }, { 's', 227, "remote-host", - &instance_config.remote.host, SOURCE_CMD, 0, + &instance_config.remote.host, SOURCE_CMD, SOURCE_DEFAULT, OPTION_REMOTE_GROUP, 0, option_get_value }, { 's', 228, "remote-port", - &instance_config.remote.port, SOURCE_CMD, 0, + &instance_config.remote.port, SOURCE_CMD, SOURCE_DEFAULT, OPTION_REMOTE_GROUP, 0, option_get_value }, { 's', 229, "remote-path", - &instance_config.remote.path, SOURCE_CMD, 0, + &instance_config.remote.path, SOURCE_CMD, SOURCE_DEFAULT, OPTION_REMOTE_GROUP, 0, option_get_value }, { 's', 230, "remote-user", - &instance_config.remote.user, SOURCE_CMD, 0, + &instance_config.remote.user, SOURCE_CMD, SOURCE_DEFAULT, OPTION_REMOTE_GROUP, 0, option_get_value }, { 's', 231, "ssh-options", - &instance_config.remote.ssh_options, SOURCE_CMD, 0, + &instance_config.remote.ssh_options, SOURCE_CMD, SOURCE_DEFAULT, OPTION_REMOTE_GROUP, 0, option_get_value }, { 's', 232, "ssh-config", - &instance_config.remote.ssh_config, SOURCE_CMD, 0, + &instance_config.remote.ssh_config, SOURCE_CMD, SOURCE_DEFAULT, OPTION_REMOTE_GROUP, 0, option_get_value }, { 0 } @@ -412,7 +412,7 @@ readInstanceConfigFile(InstanceState *instanceState) /* Instance options */ { 's', 'D', "pgdata", - &instance->pgdata, SOURCE_CMD, 0, + &instance->pgdata, SOURCE_CMD, SOURCE_DEFAULT, OPTION_INSTANCE_GROUP, 0, option_get_value }, { @@ -429,49 +429,49 @@ readInstanceConfigFile(InstanceState *instanceState) #endif { 's', 'E', "external-dirs", - &instance->external_dir_str, SOURCE_CMD, 0, + &instance->external_dir_str, SOURCE_CMD, SOURCE_DEFAULT, OPTION_INSTANCE_GROUP, 0, option_get_value }, /* Connection options */ { 's', 'd', "pgdatabase", - &instance->conn_opt.pgdatabase, SOURCE_CMD, 0, + &instance->conn_opt.pgdatabase, SOURCE_CMD, SOURCE_DEFAULT, OPTION_CONN_GROUP, 0, option_get_value }, { 's', 'h', "pghost", - &instance->conn_opt.pghost, SOURCE_CMD, 0, + &instance->conn_opt.pghost, SOURCE_CMD, SOURCE_DEFAULT, OPTION_CONN_GROUP, 0, option_get_value }, { 's', 'p', "pgport", - &instance->conn_opt.pgport, SOURCE_CMD, 0, + &instance->conn_opt.pgport, SOURCE_CMD, SOURCE_DEFAULT, OPTION_CONN_GROUP, 0, option_get_value }, { 's', 'U', "pguser", - &instance->conn_opt.pguser, SOURCE_CMD, 0, + &instance->conn_opt.pguser, SOURCE_CMD, SOURCE_DEFAULT, OPTION_CONN_GROUP, 0, option_get_value }, /* Replica options */ { 's', 202, "master-db", - &instance->master_conn_opt.pgdatabase, SOURCE_CMD, 0, + &instance->master_conn_opt.pgdatabase, SOURCE_CMD, SOURCE_DEFAULT, OPTION_REPLICA_GROUP, 0, option_get_value }, { 's', 203, "master-host", - &instance->master_conn_opt.pghost, SOURCE_CMD, 0, + &instance->master_conn_opt.pghost, SOURCE_CMD, SOURCE_DEFAULT, OPTION_REPLICA_GROUP, 0, option_get_value }, { 's', 204, "master-port", - &instance->master_conn_opt.pgport, SOURCE_CMD, 0, + &instance->master_conn_opt.pgport, SOURCE_CMD, SOURCE_DEFAULT, OPTION_REPLICA_GROUP, 0, option_get_value }, { 's', 205, "master-user", - &instance->master_conn_opt.pguser, SOURCE_CMD, 0, + &instance->master_conn_opt.pguser, SOURCE_CMD, SOURCE_DEFAULT, OPTION_REPLICA_GROUP, 0, option_get_value }, { @@ -487,66 +487,66 @@ readInstanceConfigFile(InstanceState *instanceState) }, { 's', 208, "archive-host", - &instance_config.archive.host, SOURCE_CMD, 0, + &instance_config.archive.host, SOURCE_CMD, SOURCE_DEFAULT, OPTION_ARCHIVE_GROUP, 0, option_get_value }, { 's', 209, "archive-port", - &instance_config.archive.port, SOURCE_CMD, 0, + &instance_config.archive.port, SOURCE_CMD, SOURCE_DEFAULT, OPTION_ARCHIVE_GROUP, 0, option_get_value }, { 's', 210, "archive-user", - &instance_config.archive.user, SOURCE_CMD, 0, + &instance_config.archive.user, SOURCE_CMD, SOURCE_DEFAULT, OPTION_ARCHIVE_GROUP, 0, option_get_value }, { 's', 211, "restore-command", - &instance->restore_command, SOURCE_CMD, 0, + &instance->restore_command, SOURCE_CMD, SOURCE_DEFAULT, OPTION_ARCHIVE_GROUP, 0, option_get_value }, /* Instance options */ { 's', 'D', "pgdata", - &instance->pgdata, SOURCE_CMD, 0, + &instance->pgdata, SOURCE_CMD, SOURCE_DEFAULT, OPTION_INSTANCE_GROUP, 0, option_get_value }, /* Logging options */ { 's', 212, "log-level-console", - &log_level_console, SOURCE_CMD, 0, + &log_level_console, SOURCE_CMD, SOURCE_DEFAULT, OPTION_LOG_GROUP, 0, option_get_value }, { 's', 213, "log-level-file", - &log_level_file, SOURCE_CMD, 0, + &log_level_file, SOURCE_CMD, SOURCE_DEFAULT, OPTION_LOG_GROUP, 0, option_get_value }, { 's', 214, "log-format-console", - &log_format_console, SOURCE_CMD_STRICT, 0, + &log_format_console, SOURCE_CMD_STRICT, SOURCE_DEFAULT, OPTION_LOG_GROUP, 0, option_get_value }, { 's', 215, "log-format-file", - &log_format_file, SOURCE_CMD, 0, + &log_format_file, SOURCE_CMD, SOURCE_DEFAULT, OPTION_LOG_GROUP, 0, option_get_value }, { 's', 216, "log-filename", - &instance->logger.log_filename, SOURCE_CMD, 0, + &instance->logger.log_filename, SOURCE_CMD, SOURCE_DEFAULT, OPTION_LOG_GROUP, 0, option_get_value }, { 's', 217, "error-log-filename", - &instance->logger.error_log_filename, SOURCE_CMD, 0, + &instance->logger.error_log_filename, SOURCE_CMD, SOURCE_DEFAULT, OPTION_LOG_GROUP, 0, option_get_value }, { 's', 218, "log-directory", - &instance->logger.log_directory, SOURCE_CMD, 0, + &instance->logger.log_directory, SOURCE_CMD, SOURCE_DEFAULT, OPTION_LOG_GROUP, 0, option_get_value }, { @@ -562,64 +562,64 @@ readInstanceConfigFile(InstanceState *instanceState) /* Retention options */ { 'u', 221, "retention-redundancy", - &instance->retention_redundancy, SOURCE_CMD, 0, + &instance->retention_redundancy, SOURCE_CMD, SOURCE_DEFAULT, OPTION_RETENTION_GROUP, 0, option_get_value }, { 'u', 222, "retention-window", - &instance->retention_window, SOURCE_CMD, 0, + &instance->retention_window, SOURCE_CMD, SOURCE_DEFAULT, OPTION_RETENTION_GROUP, 0, option_get_value }, { 'u', 223, "wal-depth", - &instance->wal_depth, SOURCE_CMD, 0, + &instance->wal_depth, SOURCE_CMD, SOURCE_DEFAULT, OPTION_RETENTION_GROUP, 0, option_get_value }, /* Compression options */ { 's', 224, "compress-algorithm", - &compress_alg, SOURCE_CMD, 0, + &compress_alg, SOURCE_CMD, SOURCE_DEFAULT, OPTION_LOG_GROUP, 0, option_get_value }, { 'u', 225, "compress-level", - &instance->compress_level, SOURCE_CMD, 0, + &instance->compress_level, SOURCE_CMD, SOURCE_DEFAULT, OPTION_COMPRESS_GROUP, 0, option_get_value }, /* Remote backup options */ { 's', 226, "remote-proto", - &instance->remote.proto, SOURCE_CMD, 0, + &instance->remote.proto, SOURCE_CMD, SOURCE_DEFAULT, OPTION_REMOTE_GROUP, 0, option_get_value }, { 's', 227, "remote-host", - &instance->remote.host, SOURCE_CMD, 0, + &instance->remote.host, SOURCE_CMD, SOURCE_DEFAULT, OPTION_REMOTE_GROUP, 0, option_get_value }, { 's', 228, "remote-port", - &instance->remote.port, SOURCE_CMD, 0, + &instance->remote.port, SOURCE_CMD, SOURCE_DEFAULT, OPTION_REMOTE_GROUP, 0, option_get_value }, { 's', 229, "remote-path", - &instance->remote.path, SOURCE_CMD, 0, + &instance->remote.path, SOURCE_CMD, SOURCE_DEFAULT, OPTION_REMOTE_GROUP, 0, option_get_value }, { 's', 230, "remote-user", - &instance->remote.user, SOURCE_CMD, 0, + &instance->remote.user, SOURCE_CMD, SOURCE_DEFAULT, OPTION_REMOTE_GROUP, 0, option_get_value }, { 's', 231, "ssh-options", - &instance->remote.ssh_options, SOURCE_CMD, 0, + &instance->remote.ssh_options, SOURCE_CMD, SOURCE_DEFAULT, OPTION_REMOTE_GROUP, 0, option_get_value }, { 's', 232, "ssh-config", - &instance->remote.ssh_config, SOURCE_CMD, 0, + &instance->remote.ssh_config, SOURCE_CMD, SOURCE_DEFAULT, OPTION_REMOTE_GROUP, 0, option_get_value }, { 0 } diff --git a/src/pg_probackup.h b/src/pg_probackup.h index bc9f9b8a8..ffb74da1a 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -1175,7 +1175,6 @@ extern uint64 get_system_identifier(const char *pgdata_path, fio_location locati extern uint64 get_remote_system_identifier(PGconn *conn); extern uint32 get_data_checksum_version(bool safe); extern pg_crc32c get_pgcontrol_checksum(const char *pgdata_path); -extern DBState get_system_dbstate(const char *pgdata_path, fio_location location); extern uint32 get_xlog_seg_size(const char *pgdata_path); extern void get_redo(const char *pgdata_path, fio_location pgdata_location, RedoParams *redo); extern void set_min_recovery_point(pgFile *file, const char *backup_path, diff --git a/src/util.c b/src/util.c index 4d6c50a07..d19877f06 100644 --- a/src/util.c +++ b/src/util.c @@ -349,22 +349,6 @@ get_pgcontrol_checksum(const char *pgdata_path) return ControlFile.crc; } -DBState -get_system_dbstate(const char *pgdata_path, fio_location location) -{ - ControlFileData ControlFile; - char *buffer; - size_t size; - - buffer = slurpFile(pgdata_path, XLOG_CONTROL_FILE, &size, false, location); - if (buffer == NULL) - return 0; - digestControlFile(&ControlFile, buffer, size); - pg_free(buffer); - - return ControlFile.state; -} - void get_redo(const char *pgdata_path, fio_location pgdata_location, RedoParams *redo) { diff --git a/src/utils/file.c b/src/utils/file.c index 727b48c60..e32696f15 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -305,7 +305,7 @@ fio_open_stream(char const* path, fio_location location) IO_CHECK(fio_read_all(fio_stdin, fio_stdin_buffer, hdr.size), hdr.size); #ifdef WIN32 f = tmpfile(); - IO_CHECK(fwrite(f, 1, hdr.size, fio_stdin_buffer), hdr.size); + IO_CHECK(fwrite(fio_stdin_buffer, 1, hdr.size, f), hdr.size); SYS_CHECK(fseek(f, 0, SEEK_SET)); #else f = fmemopen(fio_stdin_buffer, hdr.size, "r"); From 4ac1b536a5f6be93901ad2f7fef216d80d868229 Mon Sep 17 00:00:00 2001 From: Daniel Shelepanov Date: Fri, 14 Oct 2022 14:01:41 +0300 Subject: [PATCH 353/525] [PBCKP-235] README.md now contains correct package versions tags: pg_probackup --- .travis.yml | 5 ++-- README.md | 72 ++++++++++++++++++++++++++--------------------------- 2 files changed, 39 insertions(+), 38 deletions(-) diff --git a/.travis.yml b/.travis.yml index 5d0d786c4..bd3c8a09a 100644 --- a/.travis.yml +++ b/.travis.yml @@ -26,7 +26,8 @@ notifications: # Default MODE is basic, i.e. all tests with PG_PROBACKUP_TEST_BASIC=ON env: - - PG_VERSION=15 PG_BRANCH=master PTRACK_PATCH_PG_BRANCH=master + - PG_VERSION=16 PG_BRANCH=master PTRACK_PATCH_PG_BRANCH=master + - PG_VERSION=15 PG_BRANCH=REL_15_STABLE PTRACK_PATCH_PG_BRANCH=REL_15_STABLE - PG_VERSION=14 PG_BRANCH=REL_14_STABLE PTRACK_PATCH_PG_BRANCH=REL_14_STABLE - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE - PG_VERSION=12 PG_BRANCH=REL_12_STABLE PTRACK_PATCH_PG_BRANCH=REL_12_STABLE @@ -54,7 +55,7 @@ jobs: allow_failures: - if: env(PG_BRANCH) = master - if: env(PG_BRANCH) = REL9_5_STABLE -# - if: env(MODE) IN (archive, backup, delta, locking, merge, replica, retention, restore) +# - if: env(MODE) IN (archive, backup, delta, locking, merge, replica, retention, restore) # Only run CI for master branch commits to limit our travis usage #branches: diff --git a/README.md b/README.md index d1ccd9866..bae1171cb 100644 --- a/README.md +++ b/README.md @@ -42,8 +42,8 @@ Regardless of the chosen backup type, all backups taken with `pg_probackup` supp `PTRACK` backup support provided via following options: * vanilla PostgreSQL 11, 12, 13, 14, 15 with [ptrack extension](https://github.com/postgrespro/ptrack) -* Postgres Pro Standard 11, 12, 13, 14, 15 -* Postgres Pro Enterprise 11, 12, 13, 14, 15 +* Postgres Pro Standard 11, 12, 13, 14 +* Postgres Pro Enterprise 11, 12, 13, 14 ## Limitations @@ -74,62 +74,62 @@ Installers are available in release **assets**. [Latests](https://github.com/pos #DEB Ubuntu|Debian Packages sudo sh -c 'echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup/deb/ $(lsb_release -cs) main-$(lsb_release -cs)" > /etc/apt/sources.list.d/pg_probackup.list' sudo wget -O - https://repo.postgrespro.ru/pg_probackup/keys/GPG-KEY-PG_PROBACKUP | sudo apt-key add - && sudo apt-get update -sudo apt-get install pg-probackup-{15,14,13,12,11,10,9.6} -sudo apt-get install pg-probackup-{15,14,13,12,11,10,9.6}-dbg +sudo apt-get install pg-probackup-{14,13,12,11,10,9.6} +sudo apt-get install pg-probackup-{14,13,12,11,10,9.6}-dbg #DEB-SRC Packages sudo sh -c 'echo "deb-src [arch=amd64] https://repo.postgrespro.ru/pg_probackup/deb/ $(lsb_release -cs) main-$(lsb_release -cs)" >>\ /etc/apt/sources.list.d/pg_probackup.list' && sudo apt-get update -sudo apt-get source pg-probackup-{15,14,13,12,11,10,9.6} +sudo apt-get source pg-probackup-{14,13,12,11,10,9.6} #DEB Astra Linix Orel sudo sh -c 'echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup/deb/ stretch main-stretch" > /etc/apt/sources.list.d/pg_probackup.list' sudo wget -O - https://repo.postgrespro.ru/pg_probackup/keys/GPG-KEY-PG_PROBACKUP | sudo apt-key add - && sudo apt-get update -sudo apt-get install pg-probackup-{15,14,13,12,11,10,9.6}{-dbg,} +sudo apt-get install pg-probackup-{14,13,12,11,10,9.6}{-dbg,} #RPM Centos Packages rpm -ivh https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-centos.noarch.rpm -yum install pg_probackup-{15,14,13,12,11,10,9.6} -yum install pg_probackup-{15,14,13,12,11,10,9.6}-debuginfo +yum install pg_probackup-{14,13,12,11,10,9.6} +yum install pg_probackup-{14,13,12,11,10,9.6}-debuginfo #RPM RHEL Packages rpm -ivh https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-rhel.noarch.rpm -yum install pg_probackup-{15,14,13,12,11,10,9.6} -yum install pg_probackup-{15,14,13,12,11,10,9.6}-debuginfo +yum install pg_probackup-{14,13,12,11,10,9.6} +yum install pg_probackup-{14,13,12,11,10,9.6}-debuginfo #RPM Oracle Linux Packages rpm -ivh https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-oraclelinux.noarch.rpm -yum install pg_probackup-{15,14,13,12,11,10,9.6} -yum install pg_probackup-{15,14,13,12,11,10,9.6}-debuginfo +yum install pg_probackup-{14,13,12,11,10,9.6} +yum install pg_probackup-{14,13,12,11,10,9.6}-debuginfo #SRPM Centos|RHEL|OracleLinux Packages -yumdownloader --source pg_probackup-{15,14,13,12,11,10,9.6} +yumdownloader --source pg_probackup-{14,13,12,11,10,9.6} #RPM SUSE|SLES Packages zypper install --allow-unsigned-rpm -y https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-suse.noarch.rpm -zypper --gpg-auto-import-keys install -y pg_probackup-{15,14,13,12,11,10,9.6} -zypper install pg_probackup-{15,14,13,12,11,10,9.6}-debuginfo +zypper --gpg-auto-import-keys install -y pg_probackup-{14,13,12,11,10,9.6} +zypper install pg_probackup-{14,13,12,11,10,9.6}-debuginfo #SRPM SUSE|SLES Packages -zypper si pg_probackup-{15,14,13,12,11,10,9.6} +zypper si pg_probackup-{14,13,12,11,10,9.6} #RPM ALT Linux 7 sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-p7 x86_64 vanilla" > /etc/apt/sources.list.d/pg_probackup.list' sudo apt-get update -sudo apt-get install pg_probackup-{15,14,13,12,11,10,9.6} -sudo apt-get install pg_probackup-{15,14,13,12,11,10,9.6}-debuginfo +sudo apt-get install pg_probackup-{14,13,12,11,10,9.6} +sudo apt-get install pg_probackup-{14,13,12,11,10,9.6}-debuginfo #RPM ALT Linux 8 sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-p8 x86_64 vanilla" > /etc/apt/sources.list.d/pg_probackup.list' sudo apt-get update -sudo apt-get install pg_probackup-{15,14,13,12,11,10,9.6} -sudo apt-get install pg_probackup-{15,14,13,12,11,10,9.6}-debuginfo +sudo apt-get install pg_probackup-{14,13,12,11,10,9.6} +sudo apt-get install pg_probackup-{14,13,12,11,10,9.6}-debuginfo #RPM ALT Linux 9 sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-p9 x86_64 vanilla" > /etc/apt/sources.list.d/pg_probackup.list' sudo apt-get update -sudo apt-get install pg_probackup-{15,14,13,12,11,10,9.6} -sudo apt-get install pg_probackup-{15,14,13,12,11,10,9.6}-debuginfo +sudo apt-get install pg_probackup-{14,13,12,11,10,9.6} +sudo apt-get install pg_probackup-{14,13,12,11,10,9.6}-debuginfo ``` #### pg_probackup for PostgresPro Standard and Enterprise @@ -137,8 +137,8 @@ sudo apt-get install pg_probackup-{15,14,13,12,11,10,9.6}-debuginfo #DEB Ubuntu|Debian Packages sudo sh -c 'echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup-forks/deb/ $(lsb_release -cs) main-$(lsb_release -cs)" > /etc/apt/sources.list.d/pg_probackup-forks.list' sudo wget -O - https://repo.postgrespro.ru/pg_probackup-forks/keys/GPG-KEY-PG_PROBACKUP | sudo apt-key add - && sudo apt-get update -sudo apt-get install pg-probackup-{std,ent}-{15,14,13,12,11,10,9.6} -sudo apt-get install pg-probackup-{std,ent}-{15,14,13,12,11,10,9.6}-dbg +sudo apt-get install pg-probackup-{std,ent}-{14,13,12,11,10,9.6} +sudo apt-get install pg-probackup-{std,ent}-{14,13,12,11,10,9.6}-dbg #DEB Astra Linix Orel sudo sh -c 'echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup-forks/deb/ stretch main-stretch" > /etc/apt/sources.list.d/pg_probackup.list' @@ -148,35 +148,35 @@ sudo apt-get install pg-probackup-{std,ent}-{12,11,10,9.6}{-dbg,} #RPM Centos Packages rpm -ivh https://repo.postgrespro.ru/pg_probackup-forks/keys/pg_probackup-repo-forks-centos.noarch.rpm -yum install pg_probackup-{std,ent}-{15,14,13,12,11,10,9.6} -yum install pg_probackup-{std,ent}-{15,14,13,12,11,10,9.6}-debuginfo +yum install pg_probackup-{std,ent}-{14,13,12,11,10,9.6} +yum install pg_probackup-{std,ent}-{14,13,12,11,10,9.6}-debuginfo #RPM RHEL Packages rpm -ivh https://repo.postgrespro.ru/pg_probackup-forks/keys/pg_probackup-repo-forks-rhel.noarch.rpm -yum install pg_probackup-{std,ent}-{15,14,13,12,11,10,9.6} -yum install pg_probackup-{std,ent}-{15,14,13,12,11,10,9.6}-debuginfo +yum install pg_probackup-{std,ent}-{14,13,12,11,10,9.6} +yum install pg_probackup-{std,ent}-{14,13,12,11,10,9.6}-debuginfo #RPM Oracle Linux Packages rpm -ivh https://repo.postgrespro.ru/pg_probackup-forks/keys/pg_probackup-repo-forks-oraclelinux.noarch.rpm -yum install pg_probackup-{std,ent}-{15,14,13,12,11,10,9.6} -yum install pg_probackup-{std,ent}-{15,14,13,12,11,10,9.6}-debuginfo +yum install pg_probackup-{std,ent}-{14,13,12,11,10,9.6} +yum install pg_probackup-{std,ent}-{14,13,12,11,10,9.6}-debuginfo #RPM ALT Linux 7 sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup-forks/rpm/latest/altlinux-p7 x86_64 forks" > /etc/apt/sources.list.d/pg_probackup_forks.list' sudo apt-get update -sudo apt-get install pg_probackup-{std,ent}-{15,14,13,12,11,10,9.6} -sudo apt-get install pg_probackup-{std,ent}-{15,14,13,12,11,10,9.6}-debuginfo +sudo apt-get install pg_probackup-{std,ent}-{14,13,12,11,10,9.6} +sudo apt-get install pg_probackup-{std,ent}-{14,13,12,11,10,9.6}-debuginfo #RPM ALT Linux 8 sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup-forks/rpm/latest/altlinux-p8 x86_64 forks" > /etc/apt/sources.list.d/pg_probackup_forks.list' sudo apt-get update -sudo apt-get install pg_probackup-{std,ent}-{15,14,13,12,11,10,9.6} -sudo apt-get install pg_probackup-{std,ent}-{15,14,13,12,11,10,9.6}-debuginfo +sudo apt-get install pg_probackup-{std,ent}-{14,13,12,11,10,9.6} +sudo apt-get install pg_probackup-{std,ent}-{14,13,12,11,10,9.6}-debuginfo #RPM ALT Linux 9 sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup-forks/rpm/latest/altlinux-p9 x86_64 forks" > /etc/apt/sources.list.d/pg_probackup_forks.list' && sudo apt-get update -sudo apt-get install pg_probackup-{std,ent}-{15,14,13,12,11,10,9.6} -sudo apt-get install pg_probackup-{std,ent}-{15,14,13,12,11,10,9.6}-debuginfo +sudo apt-get install pg_probackup-{std,ent}-{14,13,12,11,10,9.6} +sudo apt-get install pg_probackup-{std,ent}-{14,13,12,11,10,9.6}-debuginfo ``` Once you have `pg_probackup` installed, complete [the setup](https://postgrespro.github.io/pg_probackup/#pbk-install-and-setup). From 1a48b6c5959f75ceba18223bb7e597bce6035763 Mon Sep 17 00:00:00 2001 From: Ivan Lazarev Date: Fri, 28 Oct 2022 19:40:15 +0300 Subject: [PATCH 354/525] [PBCKP-236] added remote shh agent path to log output --- src/utils/remote.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/utils/remote.c b/src/utils/remote.c index addd73dc8..8562c85e3 100644 --- a/src/utils/remote.c +++ b/src/utils/remote.c @@ -229,7 +229,7 @@ bool launch_agent(void) return false; } else { #endif - elog(LOG, "Start SSH client process, pid %d", child_pid); + elog(LOG, "Start SSH client process, pid %d, cmd \"%s\"", child_pid, cmd); SYS_CHECK(close(infd[1])); /* These are being used by the child */ SYS_CHECK(close(outfd[0])); SYS_CHECK(close(errfd[1])); From 79009c652cd7231edf9609837a669f7d20ae67f2 Mon Sep 17 00:00:00 2001 From: MetalDream666 <61190185+MetalDream666@users.noreply.github.com> Date: Sat, 29 Oct 2022 01:17:13 +0300 Subject: [PATCH 355/525] Revert "[PBCKP-120] skip partitioned indexes for checkdb --amcheck" --- src/checkdb.c | 10 +++------- tests/checkdb.py | 9 --------- 2 files changed, 3 insertions(+), 16 deletions(-) diff --git a/src/checkdb.c b/src/checkdb.c index 1133a7b5d..177fc3cc7 100644 --- a/src/checkdb.c +++ b/src/checkdb.c @@ -461,9 +461,7 @@ get_index_list(const char *dbname, bool first_db_with_amcheck, "LEFT JOIN pg_catalog.pg_class cls ON idx.indexrelid=cls.oid " "LEFT JOIN pg_catalog.pg_namespace nmspc ON cls.relnamespace=nmspc.oid " "LEFT JOIN pg_catalog.pg_am am ON cls.relam=am.oid " - "WHERE am.amname='btree' " - "AND cls.relpersistence != 't' " - "AND cls.relkind != 'I' " + "WHERE am.amname='btree' AND cls.relpersistence != 't' " "ORDER BY nmspc.nspname DESC", 0, NULL); } @@ -475,10 +473,8 @@ get_index_list(const char *dbname, bool first_db_with_amcheck, "LEFT JOIN pg_catalog.pg_class cls ON idx.indexrelid=cls.oid " "LEFT JOIN pg_catalog.pg_namespace nmspc ON cls.relnamespace=nmspc.oid " "LEFT JOIN pg_catalog.pg_am am ON cls.relam=am.oid " - "WHERE am.amname='btree' " - "AND cls.relpersistence != 't' " - "AND cls.relkind != 'I' " - "AND (cls.reltablespace IN " + "WHERE am.amname='btree' AND cls.relpersistence != 't' AND " + "(cls.reltablespace IN " "(SELECT oid from pg_catalog.pg_tablespace where spcname <> 'pg_global') " "OR cls.reltablespace = 0) " "ORDER BY nmspc.nspname DESC", diff --git a/tests/checkdb.py b/tests/checkdb.py index 07b55c6db..bcda0fb23 100644 --- a/tests/checkdb.py +++ b/tests/checkdb.py @@ -38,15 +38,6 @@ def test_checkdb_amcheck_only_sanity(self): node.safe_psql( "postgres", "create index on t_heap(id)") - - node.safe_psql( - "postgres", - "create table idxpart (a int) " - "partition by range (a)") - - node.safe_psql( - "postgres", - "create index on idxpart(a)") try: node.safe_psql( From 0b8cf419c344499d0540636a19ab944dcc167527 Mon Sep 17 00:00:00 2001 From: Sofia Kopikova Date: Mon, 20 Jun 2022 13:44:42 +0300 Subject: [PATCH 356/525] [PBCKP-120] skip partitioned indexes for checkdb --amcheck Tags: pg_probackup --- src/checkdb.c | 10 +++++++--- tests/checkdb.py | 9 +++++++++ 2 files changed, 16 insertions(+), 3 deletions(-) diff --git a/src/checkdb.c b/src/checkdb.c index 177fc3cc7..1133a7b5d 100644 --- a/src/checkdb.c +++ b/src/checkdb.c @@ -461,7 +461,9 @@ get_index_list(const char *dbname, bool first_db_with_amcheck, "LEFT JOIN pg_catalog.pg_class cls ON idx.indexrelid=cls.oid " "LEFT JOIN pg_catalog.pg_namespace nmspc ON cls.relnamespace=nmspc.oid " "LEFT JOIN pg_catalog.pg_am am ON cls.relam=am.oid " - "WHERE am.amname='btree' AND cls.relpersistence != 't' " + "WHERE am.amname='btree' " + "AND cls.relpersistence != 't' " + "AND cls.relkind != 'I' " "ORDER BY nmspc.nspname DESC", 0, NULL); } @@ -473,8 +475,10 @@ get_index_list(const char *dbname, bool first_db_with_amcheck, "LEFT JOIN pg_catalog.pg_class cls ON idx.indexrelid=cls.oid " "LEFT JOIN pg_catalog.pg_namespace nmspc ON cls.relnamespace=nmspc.oid " "LEFT JOIN pg_catalog.pg_am am ON cls.relam=am.oid " - "WHERE am.amname='btree' AND cls.relpersistence != 't' AND " - "(cls.reltablespace IN " + "WHERE am.amname='btree' " + "AND cls.relpersistence != 't' " + "AND cls.relkind != 'I' " + "AND (cls.reltablespace IN " "(SELECT oid from pg_catalog.pg_tablespace where spcname <> 'pg_global') " "OR cls.reltablespace = 0) " "ORDER BY nmspc.nspname DESC", diff --git a/tests/checkdb.py b/tests/checkdb.py index bcda0fb23..07b55c6db 100644 --- a/tests/checkdb.py +++ b/tests/checkdb.py @@ -38,6 +38,15 @@ def test_checkdb_amcheck_only_sanity(self): node.safe_psql( "postgres", "create index on t_heap(id)") + + node.safe_psql( + "postgres", + "create table idxpart (a int) " + "partition by range (a)") + + node.safe_psql( + "postgres", + "create index on idxpart(a)") try: node.safe_psql( From 0b474d261686f4554d5b853803444b3379b650ce Mon Sep 17 00:00:00 2001 From: Ivan Lazarev Date: Tue, 1 Nov 2022 11:53:17 +0300 Subject: [PATCH 357/525] [PBCKP-236] hotfix for C89 --- src/utils/remote.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/utils/remote.c b/src/utils/remote.c index 8562c85e3..9068c9406 100644 --- a/src/utils/remote.c +++ b/src/utils/remote.c @@ -321,7 +321,8 @@ size_t prepare_compatibility_str(char* compatibility_buf, size_t compatibility_b size_t result_size = 0; *compatibility_buf = '\0'; - for (int i = 0; i < (sizeof compatibility_params / sizeof(compatibility_param)); i++) + int i; + for (i = 0; i < (sizeof compatibility_params / sizeof(compatibility_param)); i++) { if (compatibility_params[i].strval != NULL) result_size += snprintf(compatibility_buf + result_size, compatibility_buf_size - result_size, From e36924a0fd5c42a0538421411a92377672bbf3c3 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 1 Nov 2022 12:53:46 +0300 Subject: [PATCH 358/525] [PBCKP-146] review fixes --- src/catalog.c | 10 ++++++---- src/data.c | 5 +++-- tests/cfs_backup.py | 2 ++ tests/cfs_catchup.py | 20 ++++++++++++++++++++ 4 files changed, 31 insertions(+), 6 deletions(-) diff --git a/src/catalog.c b/src/catalog.c index 9668427bb..561ab876e 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -1069,7 +1069,7 @@ get_backup_filelist(pgBackup *backup, bool strict) char linked[MAXPGPATH]; char compress_alg_string[MAXPGPATH]; int64 write_size, - full_size, + uncompressed_size, mode, /* bit length of mode_t depends on platforms */ is_datafile, is_cfs, @@ -1088,8 +1088,6 @@ get_backup_filelist(pgBackup *backup, bool strict) get_control_value_str(buf, "path", path, sizeof(path),true); get_control_value_int64(buf, "size", &write_size, true); - if (!get_control_value_int64(buf, "full_size", &full_size, false)) - full_size = write_size; get_control_value_int64(buf, "mode", &mode, true); get_control_value_int64(buf, "is_datafile", &is_datafile, true); get_control_value_int64(buf, "is_cfs", &is_cfs, false); @@ -1100,7 +1098,6 @@ get_backup_filelist(pgBackup *backup, bool strict) file = pgFileInit(path); file->write_size = (int64) write_size; - file->uncompressed_size = full_size; file->mode = (mode_t) mode; file->is_datafile = is_datafile ? true : false; file->is_cfs = is_cfs ? true : false; @@ -1136,6 +1133,11 @@ get_backup_filelist(pgBackup *backup, bool strict) if (get_control_value_int64(buf, "hdr_size", &hdr_size, false)) file->hdr_size = (int) hdr_size; + if (get_control_value_int64(buf, "full_size", &uncompressed_size, false)) + file->uncompressed_size = uncompressed_size; + else + file->uncompressed_size = write_size; + if (file->external_dir_num == 0) set_forkname(file); diff --git a/src/data.c b/src/data.c index a020c6efc..2a8806cde 100644 --- a/src/data.c +++ b/src/data.c @@ -1447,11 +1447,12 @@ backup_non_data_file_internal(const char *from_fullpath, elog(ERROR, "Cannot access remote file \"%s\"", from_fullpath); } - pg_free(errmsg); /* ????? */ - file->uncompressed_size = file->read_size; cleanup: + if (errmsg != NULL) + pg_free(errmsg); + /* finish CRC calculation and store into pgFile */ FIN_FILE_CRC32(true, file->crc); diff --git a/tests/cfs_backup.py b/tests/cfs_backup.py index 861c9f1ea..306c2396c 100644 --- a/tests/cfs_backup.py +++ b/tests/cfs_backup.py @@ -171,6 +171,8 @@ def test_fullbackup_after_create_table(self): "ERROR: File pg_compression not found in {0}".format( os.path.join(self.backup_dir, 'node', backup_id)) ) + + # check cfm size cfms = find_by_extensions( [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], ['.cfm']) diff --git a/tests/cfs_catchup.py b/tests/cfs_catchup.py index 068311035..2cbb46729 100644 --- a/tests/cfs_catchup.py +++ b/tests/cfs_catchup.py @@ -56,6 +56,16 @@ def test_full_catchup_with_tablespace(self): self.pgdata_content(dst_pg.data_dir) ) + # check cfm size + cfms = find_by_extensions([os.path.join(dst_pg.data_dir)], ['.cfm']) + self.assertTrue(cfms, "ERROR: .cfm files not found in backup dir") + for cfm in cfms: + size = os.stat(cfm).st_size + self.assertLessEqual(size, 4096, + "ERROR: {0} is not truncated (has size {1} > 4096)".format( + cfm, size + )) + # make changes in master tablespace src_pg.safe_psql( "postgres", @@ -89,6 +99,16 @@ def test_full_catchup_with_tablespace(self): ] ) + # check cfm size again + cfms = find_by_extensions([os.path.join(dst_pg.data_dir)], ['.cfm']) + self.assertTrue(cfms, "ERROR: .cfm files not found in backup dir") + for cfm in cfms: + size = os.stat(cfm).st_size + self.assertLessEqual(size, 4096, + "ERROR: {0} is not truncated (has size {1} > 4096)".format( + cfm, size + )) + # run&recover catchup'ed instance dst_options = {} dst_options['port'] = str(dst_pg.port) From 64b84d0ca64a95db6154185aafc1291a6aa142df Mon Sep 17 00:00:00 2001 From: Ivan Lazarev Date: Tue, 1 Nov 2022 14:41:02 +0300 Subject: [PATCH 359/525] [PBCKP-236] hotfix-2 for C89 compatibility --- src/utils/remote.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/utils/remote.c b/src/utils/remote.c index 9068c9406..7ef8d3239 100644 --- a/src/utils/remote.c +++ b/src/utils/remote.c @@ -319,9 +319,9 @@ size_t prepare_compatibility_str(char* compatibility_buf, size_t compatibility_b }; size_t result_size = 0; + int i; *compatibility_buf = '\0'; - int i; for (i = 0; i < (sizeof compatibility_params / sizeof(compatibility_param)); i++) { if (compatibility_params[i].strval != NULL) From feacabd8ab15129743fdd9ef287dfccd793fdfe3 Mon Sep 17 00:00:00 2001 From: Victor Spirin Date: Wed, 2 Nov 2022 14:25:12 +0300 Subject: [PATCH 360/525] [PBCKP-308] Changed check_server_version function for postgresql version for 1c. --- src/backup.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/src/backup.c b/src/backup.c index 31289978d..c73ee56c7 100644 --- a/src/backup.c +++ b/src/backup.c @@ -946,10 +946,21 @@ check_server_version(PGconn *conn, PGNodeInfo *nodeInfo) */ #ifdef PGPRO_VERSION if (!res) + { /* It seems we connected to PostgreSQL (not Postgres Pro) */ - elog(ERROR, "%s was built with Postgres Pro %s %s, " - "but connection is made with PostgreSQL %s", - PROGRAM_NAME, PG_MAJORVERSION, PGPRO_EDITION, nodeInfo->server_version_str); + if(strcmp(PGPRO_EDITION, "1C") != 0) + { + elog(ERROR, "%s was built with Postgres Pro %s %s, " + "but connection is made with PostgreSQL %s", + PROGRAM_NAME, PG_MAJORVERSION, PGPRO_EDITION, nodeInfo->server_version_str); + } + /* We have PostgresPro for 1C and connect to PostgreSQL or PostgresPro for 1C + * Check the major version + */ + if (strcmp(nodeInfo->server_version_str, PG_MAJORVERSION) != 0) + elog(ERROR, "%s was built with PostgrePro %s %s, but connection is made with %s", + PROGRAM_NAME, PG_MAJORVERSION, PGPRO_EDITION, nodeInfo->server_version_str); + } else { if (strcmp(nodeInfo->server_version_str, PG_MAJORVERSION) != 0 && From eaf3b14c22ec4cae50e5546539404232f56a9d7a Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 3 Nov 2022 02:37:29 +0300 Subject: [PATCH 361/525] fix set_forkname Fork detection were broken before set_forkname extraction, and its bug were copied into. Lets reimplement it to be like `parse_filename_for_nonetemp_relation` in PostgreSQL code. --- src/catalog.c | 11 ++++++ src/dir.c | 94 +++++++++++++++++++++++++++++----------------- src/pg_probackup.h | 2 +- 3 files changed, 72 insertions(+), 35 deletions(-) diff --git a/src/catalog.c b/src/catalog.c index 561ab876e..80cdacdc5 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -1139,7 +1139,18 @@ get_backup_filelist(pgBackup *backup, bool strict) file->uncompressed_size = write_size; if (file->external_dir_num == 0) + { + bool is_datafile = file->is_datafile; set_forkname(file); + if (is_datafile != file->is_datafile) + { + elog(WARNING, "File '%s' was stored as datafile, but looks like it is not", + file->rel_path); + /* Lets fail in tests */ + Assert(file->is_datafile == file->is_datafile); + file->is_datafile = is_datafile; + } + } parray_append(files, file); } diff --git a/src/dir.c b/src/dir.c index 73d6db09b..b55f25e18 100644 --- a/src/dir.c +++ b/src/dir.c @@ -631,20 +631,6 @@ dir_check_file(pgFile *file, bool backup_logs) if (file->forkName == ptrack) /* Compatibility with left-overs from ptrack1 */ return CHECK_FALSE; - else if (file->forkName != none) - return CHECK_TRUE; - - /* Set is_datafile flag */ - { - char suffix[MAXFNAMELEN]; - - /* check if file is datafile */ - sscanf_res = sscanf(file->name, "%u.%d.%s", &(file->relOid), - &(file->segno), suffix); - Assert(sscanf_res > 0); /* since first char is digit */ - if (sscanf_res == 1 || sscanf_res == 2) - file->is_datafile = true; - } } } @@ -1789,34 +1775,74 @@ pfilearray_clear_locks(parray *file_list) } } +static inline bool +is_forkname(char *name, size_t *pos, const char *forkname) +{ + size_t fnlen = strlen(forkname); + if (strncmp(name + *pos, forkname, fnlen) != 0) + return false; + *pos += fnlen; + return true; +} + +#define OIDCHARS 10 + /* Set forkName if possible */ -void +bool set_forkname(pgFile *file) { - int name_len = strlen(file->name); - - /* Auxiliary fork of the relfile */ - if (name_len > 3 && strcmp(file->name + name_len - 3, "_vm") == 0) - file->forkName = vm; + size_t i = 0; + uint64_t oid = 0; /* use 64bit to not check for overflow in a loop */ - else if (name_len > 4 && strcmp(file->name + name_len - 4, "_fsm") == 0) - file->forkName = fsm; + /* pretend it is not relation file */ + file->relOid = 0; + file->forkName = none; + file->is_datafile = false; - else if (name_len > 4 && strcmp(file->name + name_len - 4, ".cfm") == 0) - file->forkName = cfm; + for (i = 0; isdigit(file->name[i]); i++) + { + if (i == 0 && file->name[i] == '0') + return false; + oid = oid * 10 + file->name[i] - '0'; + } + if (i == 0 || i > OIDCHARS || oid > UINT32_MAX) + return false; - else if (name_len > 5 && strcmp(file->name + name_len - 5, "_init") == 0) + /* usual fork name */ + /* /^\d+_(vm|fsm|init|ptrack)$/ */ + if (is_forkname(file->name, &i, "_vm")) + file->forkName = vm; + else if (is_forkname(file->name, &i, "_fsm")) + file->forkName = fsm; + else if (is_forkname(file->name, &i, "_init")) file->forkName = init; - - else if (name_len > 7 && strcmp(file->name + name_len - 7, "_ptrack") == 0) + else if (is_forkname(file->name, &i, "_ptrack")) file->forkName = ptrack; - // extract relOid for certain forks + /* segment number */ + /* /^\d+(_(vm|fsm|init|ptrack))?\.\d+$/ */ + if (file->name[i] == '.' && isdigit(file->name[i+1])) + { + for (i++; isdigit(file->name[i]); i++) + ; + } + + /* CFS "fork name" */ + if (file->forkName == none && + is_forkname(file->name, &i, ".cfm")) + { + /* /^\d+(\.\d+)?.cfm$/ */ + file->forkName = cfm; + } + + /* If there are excess characters, it is not relation file */ + if (file->name[i] != 0) + { + file->forkName = none; + return false; + } - if ((file->forkName == vm || - file->forkName == fsm || - file->forkName == init || - file->forkName == cfm) && - (sscanf(file->name, "%u*", &(file->relOid)) != 1)) - file->relOid = 0; + file->relOid = oid; + file->is_datafile = file->forkName == none; + return true; } diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 82504bb9a..f2201ebdd 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -1098,7 +1098,7 @@ extern int pgCompareString(const void *str1, const void *str2); extern int pgPrefixCompareString(const void *str1, const void *str2); extern int pgCompareOid(const void *f1, const void *f2); extern void pfilearray_clear_locks(parray *file_list); -extern void set_forkname(pgFile *file); +extern bool set_forkname(pgFile *file); /* in data.c */ extern bool check_data_file(ConnectionArgs *arguments, pgFile *file, From a8ee334c3fbf1dda7c6c5ad58a8845672666a643 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 3 Nov 2022 02:37:58 +0300 Subject: [PATCH 362/525] [PBCKP-235] fix one test for <15.0 --- tests/backup.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/tests/backup.py b/tests/backup.py index 4f447c9bd..6028a3ff6 100644 --- a/tests/backup.py +++ b/tests/backup.py @@ -3441,10 +3441,15 @@ def test_backup_atexit(self): self.assertIn( 'WARNING: backup in progress, stop backup', log_content) - - self.assertIn( - 'FROM pg_catalog.pg_backup_stop', - log_content) + + if self.get_version(node) < 150000: + self.assertIn( + 'FROM pg_catalog.pg_stop_backup', + log_content) + else: + self.assertIn( + 'FROM pg_catalog.pg_backup_stop', + log_content) self.assertIn( 'setting its status to ERROR', From 85708251bb623ed2ba55e2158adcb5c85838393a Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 3 Nov 2022 03:21:27 +0300 Subject: [PATCH 363/525] fix for forkname detection in get_backup_filelist --- src/catalog.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/src/catalog.c b/src/catalog.c index 80cdacdc5..60bf4184d 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -1138,14 +1138,18 @@ get_backup_filelist(pgBackup *backup, bool strict) else file->uncompressed_size = write_size; - if (file->external_dir_num == 0) + if (file->external_dir_num == 0 && S_ISREG(file->mode)) { bool is_datafile = file->is_datafile; set_forkname(file); if (is_datafile != file->is_datafile) { - elog(WARNING, "File '%s' was stored as datafile, but looks like it is not", - file->rel_path); + if (is_datafile) + elog(WARNING, "File '%s' was stored as datafile, but looks like it is not", + file->rel_path); + else + elog(WARNING, "File '%s' was stored as non-datafile, but looks like it is", + file->rel_path); /* Lets fail in tests */ Assert(file->is_datafile == file->is_datafile); file->is_datafile = is_datafile; From 03f210b2becaa1577e950a8b49258a008a3644d0 Mon Sep 17 00:00:00 2001 From: Ivan Lazarev Date: Tue, 18 Oct 2022 12:51:57 +0300 Subject: [PATCH 364/525] [PBCKP-304] cfs tests moved back to build --- tests/__init__.py | 6 +++--- tests/cfs_backup.py | 12 +++++++----- tests/cfs_restore.py | 9 ++++++--- tests/helpers/ptrack_helpers.py | 25 +++++++++++++++++++++++++ 4 files changed, 41 insertions(+), 11 deletions(-) diff --git a/tests/__init__.py b/tests/__init__.py index 79537ad78..c02788e29 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -35,9 +35,9 @@ def load_tests(loader, tests, pattern): suite.addTests(loader.loadTestsFromModule(compatibility)) suite.addTests(loader.loadTestsFromModule(checkdb)) suite.addTests(loader.loadTestsFromModule(config)) -# suite.addTests(loader.loadTestsFromModule(cfs_backup)) -# suite.addTests(loader.loadTestsFromModule(cfs_restore)) -# suite.addTests(loader.loadTestsFromModule(cfs_validate_backup)) + suite.addTests(loader.loadTestsFromModule(cfs_backup)) + suite.addTests(loader.loadTestsFromModule(cfs_restore)) + suite.addTests(loader.loadTestsFromModule(cfs_validate_backup)) suite.addTests(loader.loadTestsFromModule(compression)) suite.addTests(loader.loadTestsFromModule(delete)) suite.addTests(loader.loadTestsFromModule(delta)) diff --git a/tests/cfs_backup.py b/tests/cfs_backup.py index 436db31e7..8e625e534 100644 --- a/tests/cfs_backup.py +++ b/tests/cfs_backup.py @@ -4,7 +4,7 @@ import shutil from .helpers.cfs_helpers import find_by_extensions, find_by_name, find_by_pattern, corrupt_file -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException +from .helpers.ptrack_helpers import ProbackupTest, ProbackupException, is_test_result_ok module_name = 'cfs_backup' tblspace_name = 'cfs_tblspace' @@ -1159,10 +1159,12 @@ def test_broken_file_pg_compression_into_tablespace_dir(self): ) # # --- End ---# -# @unittest.skipUnless(ProbackupTest.enterprise, 'skip') -# def tearDown(self): -# self.node.cleanup() -# self.del_test_dir(module_name, self.fname) + @unittest.skipUnless(ProbackupTest.enterprise, 'skip') + def tearDown(self): + module_name = self.id().split('.')[1] + fname = self.id().split('.')[3] + if is_test_result_ok(self): + self.del_test_dir(module_name, fname) #class CfsBackupEncTest(CfsBackupNoEncTest): diff --git a/tests/cfs_restore.py b/tests/cfs_restore.py index 611afc49e..0b1bb886f 100644 --- a/tests/cfs_restore.py +++ b/tests/cfs_restore.py @@ -13,7 +13,7 @@ import shutil from .helpers.cfs_helpers import find_by_name -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException +from .helpers.ptrack_helpers import ProbackupTest, ProbackupException, is_test_result_ok module_name = 'cfs_restore' @@ -60,9 +60,12 @@ def setUp(self): def add_data_in_cluster(self): pass + @unittest.skipUnless(ProbackupTest.enterprise, 'skip') def tearDown(self): - self.node.cleanup() - self.del_test_dir(module_name, self.fname) + module_name = self.id().split('.')[1] + fname = self.id().split('.')[3] + if is_test_result_ok(self): + self.del_test_dir(module_name, fname) class CfsRestoreNoencEmptyTablespaceTest(CfsRestoreBase): diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index d800f0d3e..e19cde7d0 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -1,6 +1,7 @@ # you need os for unittest to work import os import gc +import unittest from sys import exit, argv, version_info import subprocess import shutil @@ -172,6 +173,30 @@ def slow_start(self, replica=False): sleep(0.5) + +def is_test_result_ok(test_case): + # sources of solution: + # 1. python versions 2.7 - 3.10, verified on 3.10, 3.7, 2.7, taken from: + # https://tousu.in/qa/?qa=555402/unit-testing-getting-pythons-unittest-results-in-a-teardown-method&show=555403#a555403 + # + # 2. python versions 3.11+ mixin, verified on 3.11, taken from: https://stackoverflow.com/a/39606065 + + if hasattr(test_case, '_outcome'): # Python 3.4+ + if hasattr(test_case._outcome, 'errors'): + # Python 3.4 - 3.10 (These two methods have no side effects) + result = test_case.defaultTestResult() # These two methods have no side effects + test_case._feedErrorsToResult(result, test_case._outcome.errors) + else: + # Python 3.11+ + result = test_case._outcome.result + else: # Python 2.7, 3.0-3.3 + result = getattr(test_case, '_outcomeForDoCleanups', test_case._resultForDoCleanups) + + ok = all(test != test_case for test, text in result.errors + result.failures) + + return ok + + class ProbackupTest(object): # Class attributes enterprise = is_enterprise() From fc8b89079b83ce221ac7327a4e74e91fb1033ffa Mon Sep 17 00:00:00 2001 From: Ivan Lazarev Date: Tue, 25 Oct 2022 12:24:22 +0300 Subject: [PATCH 365/525] [PBCKP-304] fix cfs_restore test --- tests/cfs_restore.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/cfs_restore.py b/tests/cfs_restore.py index 0b1bb886f..cadbff8a1 100644 --- a/tests/cfs_restore.py +++ b/tests/cfs_restore.py @@ -105,8 +105,7 @@ def test_restore_empty_tablespace_from_fullbackup(self): tblspace = self.node.safe_psql( "postgres", "SELECT * FROM pg_tablespace WHERE spcname='{0}'".format(tblspace_name) - ) - tblspace = str(tblspace) + ).decode("UTF-8") self.assertTrue( tblspace_name in tblspace and "compression=true" in tblspace, "ERROR: The tablespace not restored or it restored without compressions" From bc945994def2b41034f8a29f2470b98e6c81d1f7 Mon Sep 17 00:00:00 2001 From: Ivan Lazarev Date: Tue, 25 Oct 2022 12:27:01 +0300 Subject: [PATCH 366/525] [PBCKP-304] auto tests cleanup added to ptrack_helper.py --- tests/cfs_backup.py | 14 +++----------- tests/cfs_restore.py | 21 +++++---------------- tests/helpers/ptrack_helpers.py | 12 ++++++++++++ 3 files changed, 20 insertions(+), 27 deletions(-) diff --git a/tests/cfs_backup.py b/tests/cfs_backup.py index 8e625e534..4509b7e7b 100644 --- a/tests/cfs_backup.py +++ b/tests/cfs_backup.py @@ -4,9 +4,8 @@ import shutil from .helpers.cfs_helpers import find_by_extensions, find_by_name, find_by_pattern, corrupt_file -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException, is_test_result_ok +from .helpers.ptrack_helpers import ProbackupTest, ProbackupException -module_name = 'cfs_backup' tblspace_name = 'cfs_tblspace' @@ -14,11 +13,10 @@ class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase): # --- Begin --- # @unittest.skipUnless(ProbackupTest.enterprise, 'skip') def setUp(self): - self.fname = self.id().split('.')[3] self.backup_dir = os.path.join( - self.tmp_path, module_name, self.fname, 'backup') + self.tmp_path, self.module_name, self.fname, 'backup') self.node = self.make_simple_node( - base_dir="{0}/{1}/node".format(module_name, self.fname), + base_dir="{0}/{1}/node".format(self.module_name, self.fname), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums'], @@ -1159,12 +1157,6 @@ def test_broken_file_pg_compression_into_tablespace_dir(self): ) # # --- End ---# - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def tearDown(self): - module_name = self.id().split('.')[1] - fname = self.id().split('.')[3] - if is_test_result_ok(self): - self.del_test_dir(module_name, fname) #class CfsBackupEncTest(CfsBackupNoEncTest): diff --git a/tests/cfs_restore.py b/tests/cfs_restore.py index cadbff8a1..660cef9c6 100644 --- a/tests/cfs_restore.py +++ b/tests/cfs_restore.py @@ -13,10 +13,7 @@ import shutil from .helpers.cfs_helpers import find_by_name -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException, is_test_result_ok - - -module_name = 'cfs_restore' +from .helpers.ptrack_helpers import ProbackupTest, ProbackupException tblspace_name = 'cfs_tblspace' tblspace_name_new = 'cfs_tblspace_new' @@ -24,11 +21,10 @@ class CfsRestoreBase(ProbackupTest, unittest.TestCase): def setUp(self): - self.fname = self.id().split('.')[3] - self.backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + self.backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.node = self.make_simple_node( - base_dir="{0}/{1}/node".format(module_name, self.fname), + base_dir="{0}/{1}/node".format(self.module_name, self.fname), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -60,13 +56,6 @@ def setUp(self): def add_data_in_cluster(self): pass - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def tearDown(self): - module_name = self.id().split('.')[1] - fname = self.id().split('.')[3] - if is_test_result_ok(self): - self.del_test_dir(module_name, fname) - class CfsRestoreNoencEmptyTablespaceTest(CfsRestoreBase): # @unittest.expectedFailure @@ -214,7 +203,7 @@ def test_restore_from_fullbackup_to_new_location(self): self.node.cleanup() shutil.rmtree(self.get_tblspace_path(self.node, tblspace_name)) - node_new = self.make_simple_node(base_dir="{0}/{1}/node_new_location".format(module_name, self.fname)) + node_new = self.make_simple_node(base_dir="{0}/{1}/node_new_location".format(self.module_name, self.fname)) node_new.cleanup() try: @@ -257,7 +246,7 @@ def test_restore_from_fullbackup_to_new_location_5_jobs(self): self.node.cleanup() shutil.rmtree(self.get_tblspace_path(self.node, tblspace_name)) - node_new = self.make_simple_node(base_dir="{0}/{1}/node_new_location".format(module_name, self.fname)) + node_new = self.make_simple_node(base_dir="{0}/{1}/node_new_location".format(self.module_name, self.fname)) node_new.cleanup() try: diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index e19cde7d0..bd5ea01fd 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -204,6 +204,11 @@ class ProbackupTest(object): def __init__(self, *args, **kwargs): super(ProbackupTest, self).__init__(*args, **kwargs) + + if isinstance(self, unittest.TestCase): + self.module_name = self.id().split('.')[1] + self.fname = self.id().split('.')[3] + if '-v' in argv or '--verbose' in argv: self.verbose = True else: @@ -367,6 +372,13 @@ def __init__(self, *args, **kwargs): os.environ["PGAPPNAME"] = "pg_probackup" + def tearDown(self): + if isinstance(self, unittest.TestCase): + module_name = self.id().split('.')[1] + fname = self.id().split('.')[3] + if is_test_result_ok(self): + self.del_test_dir(module_name, fname) + @property def pg_config_version(self): return self.version_to_num( From 693bffe08ded725de24569a8fa20b6e16b547d6e Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 3 Nov 2022 11:47:08 +0300 Subject: [PATCH 367/525] [PBCKP-146] fix cfm truncated crc calculation in delta/page backup - On backup we should compare truncated crc with previous version. - copying remote file didn't honor "don't truncate first 64 bytes" rule. - crc calculation didn't honoer "don't truncate first 64 bytes" rule. --- src/data.c | 7 +++++-- src/pg_probackup.h | 2 +- src/utils/file.c | 49 +++++++++++++++++++++++++++++++++++++--------- src/utils/file.h | 3 ++- 4 files changed, 48 insertions(+), 13 deletions(-) diff --git a/src/data.c b/src/data.c index 2a8806cde..08727d41c 100644 --- a/src/data.c +++ b/src/data.c @@ -806,7 +806,10 @@ backup_non_data_file(pgFile *file, pgFile *prev_file, * file could be deleted under our feets. * But then backup_non_data_file_internal will handle it safely */ - file->crc = fio_get_crc32(from_fullpath, FIO_DB_HOST, false, true); + if (file->forkName != cfm) + file->crc = fio_get_crc32(from_fullpath, FIO_DB_HOST, false, true); + else + file->crc = fio_get_crc32_truncated(from_fullpath, FIO_DB_HOST, true); /* ...and checksum is the same... */ if (EQ_TRADITIONAL_CRC32(file->crc, prev_file->crc)) @@ -1334,7 +1337,7 @@ restore_non_data_file(parray *parent_chain, pgBackup *dest_backup, pg_crc32 file_crc; if (tmp_file->forkName == cfm && tmp_file->uncompressed_size > tmp_file->write_size) - file_crc = fio_get_crc32_truncated(to_fullpath, FIO_DB_HOST); + file_crc = fio_get_crc32_truncated(to_fullpath, FIO_DB_HOST, false); else file_crc = fio_get_crc32(to_fullpath, FIO_DB_HOST, false, false); diff --git a/src/pg_probackup.h b/src/pg_probackup.h index f2201ebdd..6aeba189e 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -1082,7 +1082,7 @@ extern void fio_pgFileDelete(pgFile *file, const char *full_path); extern void pgFileFree(void *file); extern pg_crc32 pgFileGetCRC(const char *file_path, bool use_crc32c, bool missing_ok); -extern pg_crc32 pgFileGetCRCTruncated(const char *file_path, bool use_crc32c); +extern pg_crc32 pgFileGetCRCTruncated(const char *file_path, bool use_crc32c, bool missing_ok); extern pg_crc32 pgFileGetCRCgz(const char *file_path, bool use_crc32c, bool missing_ok); extern int pgFileMapComparePath(const void *f1, const void *f2); diff --git a/src/utils/file.c b/src/utils/file.c index 627fbbad7..c4ed9c721 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -1377,8 +1377,6 @@ fio_get_crc32_ex(const char *file_path, fio_location location, { if (decompress && truncated) elog(ERROR, "Could not calculate CRC for compressed truncated file"); - if (missing_ok && truncated) - elog(ERROR, "CRC calculation for missing truncated file is forbidden"); if (fio_is_remote(location)) { @@ -1408,7 +1406,7 @@ fio_get_crc32_ex(const char *file_path, fio_location location, if (decompress) return pgFileGetCRCgz(file_path, true, missing_ok); else if (truncated) - return pgFileGetCRCTruncated(file_path, true); + return pgFileGetCRCTruncated(file_path, true, missing_ok); else return pgFileGetCRC(file_path, true, missing_ok); } @@ -1422,9 +1420,10 @@ fio_get_crc32(const char *file_path, fio_location location, } pg_crc32 -fio_get_crc32_truncated(const char *file_path, fio_location location) +fio_get_crc32_truncated(const char *file_path, fio_location location, + bool missing_ok) { - return fio_get_crc32_ex(file_path, location, false, false, true); + return fio_get_crc32_ex(file_path, location, false, missing_ok, true); } /* Remove file */ @@ -3003,6 +3002,7 @@ fio_send_file_impl(int out, char const* path) fio_header hdr; char *buf = pgut_malloc(CHUNK_SIZE); size_t read_len = 0; + int64_t read_size = 0; char *errormsg = NULL; /* open source file for read */ @@ -3066,7 +3066,19 @@ fio_send_file_impl(int out, char const* path) if (read_len > 0) { /* send chunk */ - size_t non_zero_len = find_zero_tail(buf, read_len); + int64_t non_zero_len = find_zero_tail(buf, read_len); + /* + * It is dirty trick to silence warnings in CFS GC process: + * backup at least cfs header size bytes. + */ + if (read_size + non_zero_len < PAGE_ZEROSEARCH_FINE_GRANULARITY && + read_size + read_len > 0) + { + non_zero_len = Min(PAGE_ZEROSEARCH_FINE_GRANULARITY, + read_size + read_len); + non_zero_len -= read_size; + } + if (non_zero_len > 0) { hdr.cop = FIO_PAGE; @@ -3082,6 +3094,8 @@ fio_send_file_impl(int out, char const* path) hdr.arg = read_len - non_zero_len; IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); } + + read_size += read_len; } if (feof(fp)) @@ -3166,7 +3180,7 @@ pgFileGetCRC(const char *file_path, bool use_crc32c, bool missing_ok) * Read the local file to compute CRC for it extened to real_size. */ pg_crc32 -pgFileGetCRCTruncated(const char *file_path, bool use_crc32c) +pgFileGetCRCTruncated(const char *file_path, bool use_crc32c, bool missing_ok) { FILE *fp; char *buf; @@ -3180,6 +3194,15 @@ pgFileGetCRCTruncated(const char *file_path, bool use_crc32c) fp = fopen(file_path, PG_BINARY_R); if (fp == NULL) { + if (errno == ENOENT) + { + if (missing_ok) + { + FIN_FILE_CRC32(use_crc32c, st.crc); + return st.crc; + } + } + elog(ERROR, "Cannot open file \"%s\": %s", file_path, strerror(errno)); } @@ -3200,6 +3223,14 @@ pgFileGetCRCTruncated(const char *file_path, bool use_crc32c) elog(ERROR, "Cannot read \"%s\": %s", file_path, strerror(errno)); non_zero_len = find_zero_tail(buf, len); + /* same trick as in fio_send_file */ + if (st.read_size + non_zero_len < PAGE_ZEROSEARCH_FINE_GRANULARITY && + st.read_size + len > 0) + { + non_zero_len = Min(PAGE_ZEROSEARCH_FINE_GRANULARITY, + st.read_size + len); + non_zero_len -= st.read_size; + } if (non_zero_len) { fio_send_file_crc(&st, buf, non_zero_len); @@ -3894,12 +3925,12 @@ fio_communicate(int in, int out) break; case FIO_GET_CRC32: Assert((hdr.arg & GET_CRC32_TRUNCATED) == 0 || - (hdr.arg & GET_CRC32_TRUNCATED) == GET_CRC32_TRUNCATED); + (hdr.arg & (GET_CRC32_TRUNCATED|GET_CRC32_DECOMPRESS)) == GET_CRC32_TRUNCATED); /* calculate crc32 for a file */ if ((hdr.arg & GET_CRC32_DECOMPRESS)) crc = pgFileGetCRCgz(buf, true, (hdr.arg & GET_CRC32_MISSING_OK) != 0); else if ((hdr.arg & GET_CRC32_TRUNCATED)) - crc = pgFileGetCRCTruncated(buf, true); + crc = pgFileGetCRCTruncated(buf, true, (hdr.arg & GET_CRC32_MISSING_OK) != 0); else crc = pgFileGetCRC(buf, true, (hdr.arg & GET_CRC32_MISSING_OK) != 0); IO_CHECK(fio_write_all(out, &crc, sizeof(crc)), sizeof(crc)); diff --git a/src/utils/file.h b/src/utils/file.h index 621a4bf9f..01e5a24f4 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -123,7 +123,8 @@ extern void fio_disconnect(void); extern int fio_sync(char const* path, fio_location location); extern pg_crc32 fio_get_crc32(const char *file_path, fio_location location, bool decompress, bool missing_ok); -extern pg_crc32 fio_get_crc32_truncated(const char *file_path, fio_location location); +extern pg_crc32 fio_get_crc32_truncated(const char *file_path, fio_location location, + bool missing_ok); extern int fio_rename(char const* old_path, char const* new_path, fio_location location); extern int fio_symlink(char const* target, char const* link_path, bool overwrite, fio_location location); From 7cadc3378c11250c393dfe7415a0e28b8fc32ccf Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 3 Nov 2022 11:48:46 +0300 Subject: [PATCH 368/525] [PBCKP-146] fix filesize filling file->size were not filled while reading backup filelist. That lead to excess non-data file backups. --- src/catalog.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/catalog.c b/src/catalog.c index 60bf4184d..488d7349f 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -1137,6 +1137,8 @@ get_backup_filelist(pgBackup *backup, bool strict) file->uncompressed_size = uncompressed_size; else file->uncompressed_size = write_size; + if (!file->is_datafile || file->is_cfs) + file->size = file->uncompressed_size; if (file->external_dir_num == 0 && S_ISREG(file->mode)) { From b17669c96920ac8056454d61595412caca12cd57 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 3 Nov 2022 11:49:33 +0300 Subject: [PATCH 369/525] [PBCKP-146] add test for "unchanged cfm is not backuped" --- tests/cfs_backup.py | 63 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 63 insertions(+) diff --git a/tests/cfs_backup.py b/tests/cfs_backup.py index 306c2396c..fe2af20e3 100644 --- a/tests/cfs_backup.py +++ b/tests/cfs_backup.py @@ -419,6 +419,69 @@ def test_fullbackup_empty_tablespace_page_after_create_table(self): "ERROR: .cfm files not found in backup dir" ) + @unittest.skipUnless(ProbackupTest.enterprise, 'skip') + def test_page_doesnt_store_unchanged_cfm(self): + """ + Case: Test page backup doesn't store cfm file if table were not modified + """ + + self.node.safe_psql( + "postgres", + "CREATE TABLE {0} TABLESPACE {1} " + "AS SELECT i AS id, MD5(i::text) AS text, " + "MD5(repeat(i::text,10))::tsvector AS tsvector " + "FROM generate_series(0,256) i".format('t1', tblspace_name) + ) + + try: + backup_id_full = self.backup_node( + self.backup_dir, 'node', self.node, backup_type='full') + except ProbackupException as e: + self.fail( + "ERROR: Full backup failed.\n {0} \n {1}".format( + repr(self.cmd), + repr(e.message) + ) + ) + + self.assertTrue( + find_by_extensions( + [os.path.join(self.backup_dir, 'backups', 'node', backup_id_full)], + ['.cfm']), + "ERROR: .cfm files not found in backup dir" + ) + + try: + backup_id = self.backup_node( + self.backup_dir, 'node', self.node, backup_type='page') + except ProbackupException as e: + self.fail( + "ERROR: Incremental backup failed.\n {0} \n {1}".format( + repr(self.cmd), + repr(e.message) + ) + ) + + show_backup = self.show_pb(self.backup_dir, 'node', backup_id) + self.assertEqual( + "OK", + show_backup["status"], + "ERROR: Incremental backup status is not valid. \n " + "Current backup status={0}".format(show_backup["status"]) + ) + self.assertTrue( + find_by_name( + [self.get_tblspace_path(self.node, tblspace_name)], + ['pg_compression']), + "ERROR: File pg_compression not found" + ) + self.assertFalse( + find_by_extensions( + [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], + ['.cfm']), + "ERROR: .cfm files is found in backup dir" + ) + # @unittest.expectedFailure # @unittest.skip("skip") @unittest.skipUnless(ProbackupTest.enterprise, 'skip') From 9f3f530ec7999b47e49763217f6bb62c6cfd5d5c Mon Sep 17 00:00:00 2001 From: Ivan Lazarev Date: Sun, 23 Oct 2022 05:30:13 +0300 Subject: [PATCH 370/525] [PBCKP-304] extracted all cleanup routines to ptrack.py.TearDown() --- tests/CVE_2018_1058.py | 24 +-- tests/archive.py | 280 ++++++++----------------- tests/auth_test.py | 11 +- tests/backup.py | 434 +++++++++++---------------------------- tests/catchup.py | 138 +++++-------- tests/checkdb.py | 39 ++-- tests/compatibility.py | 158 +++++--------- tests/compression.py | 51 ++--- tests/config.py | 14 +- tests/delete.py | 102 +++------ tests/delta.py | 145 ++++--------- tests/exclude.py | 54 ++--- tests/external.py | 286 ++++++++------------------ tests/false_positive.py | 66 ++---- tests/incr_restore.py | 305 ++++++++------------------- tests/init.py | 35 +--- tests/locking.py | 72 ++----- tests/logging.py | 56 ++--- tests/merge.py | 287 ++++++++------------------ tests/option.py | 20 +- tests/page.py | 155 ++++---------- tests/pgpro2068.py | 13 +- tests/pgpro560.py | 20 +- tests/pgpro589.py | 11 +- tests/ptrack.py | 430 +++++++++++++------------------------- tests/remote.py | 11 +- tests/replica.py | 220 +++++++------------- tests/restore.py | 444 ++++++++++++---------------------------- tests/retention.py | 212 ++++++------------- tests/set_backup.py | 65 ++---- tests/show.py | 83 ++------ tests/time_consuming.py | 9 +- tests/time_stamp.py | 41 +--- tests/validate.py | 334 ++++++++---------------------- 34 files changed, 1321 insertions(+), 3304 deletions(-) diff --git a/tests/CVE_2018_1058.py b/tests/CVE_2018_1058.py index 3da41f116..cfd55cc60 100644 --- a/tests/CVE_2018_1058.py +++ b/tests/CVE_2018_1058.py @@ -2,17 +2,14 @@ import unittest from .helpers.ptrack_helpers import ProbackupTest, ProbackupException -module_name = 'CVE-2018-1058' - class CVE_2018_1058(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") def test_basic_default_search_path(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True) self.init_pb(backup_dir) @@ -31,16 +28,12 @@ def test_basic_default_search_path(self): self.backup_node(backup_dir, 'node', node, backup_type='full', options=['--stream']) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_basic_backup_modified_search_path(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True) self.set_auto_conf(node, options={'search_path': 'public,pg_catalog'}) @@ -77,15 +70,11 @@ def test_basic_backup_modified_search_path(self): self.assertFalse( 'pg_probackup vulnerable!' in log_content) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_basic_checkdb_modified_search_path(self): """""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) self.set_auto_conf(node, options={'search_path': 'public,pg_catalog'}) node.slow_start() @@ -138,6 +127,3 @@ def test_basic_checkdb_modified_search_path(self): e.message, "\n Unexpected Error Message: {0}\n CMD: {1}".format( repr(e.message), self.cmd)) - - # Clean after yourself - self.del_test_dir(module_name, fname) diff --git a/tests/archive.py b/tests/archive.py index 81d013f6b..f40cf3c5d 100644 --- a/tests/archive.py +++ b/tests/archive.py @@ -10,19 +10,15 @@ from distutils.dir_util import copy_tree -module_name = 'archive' - - class ArchiveTest(ProbackupTest, unittest.TestCase): # @unittest.expectedFailure # @unittest.skip("skip") def test_pgpro434_1(self): """Description in jira issue PGPRO-434""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -64,8 +60,6 @@ def test_pgpro434_1(self): self.assertEqual( result, node.safe_psql("postgres", "SELECT * FROM t_heap"), 'data after restore not equal to original data') - # Clean after yourself - self.del_test_dir(module_name, fname) # @unittest.skip("skip") # @unittest.expectedFailure @@ -74,10 +68,9 @@ def test_pgpro434_2(self): Check that timelines are correct. WAITING PGPRO-1053 for --immediate """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -85,7 +78,7 @@ def test_pgpro434_2(self): ) if self.get_version(node) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) + self.del_test_dir(self.module_name, self.fname) return unittest.skip( 'Skipped because pg_control_checkpoint() is not supported in PG 9.5') @@ -219,9 +212,6 @@ def test_pgpro434_2(self): "SELECT * FROM t_heap"), 'data after restore not equal to original data') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_pgpro434_3(self): """ @@ -230,10 +220,9 @@ def test_pgpro434_3(self): """ self._check_gdb_flag_or_skip_test() - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -284,9 +273,6 @@ def test_pgpro434_3(self): log_content, 'PostgreSQL crashed because of a failed assert') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_pgpro434_4(self): """ @@ -295,10 +281,9 @@ def test_pgpro434_4(self): """ self._check_gdb_flag_or_skip_test() - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -361,16 +346,12 @@ def test_pgpro434_4(self): log_content, 'PostgreSQL crashed because of a failed assert') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_archive_push_file_exists(self): """Archive-push if file exists""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -454,16 +435,12 @@ def test_archive_push_file_exists(self): print(log_content) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_archive_push_file_exists_overwrite(self): """Archive-push if file exists""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={'checkpoint_timeout': '30s'}) @@ -530,16 +507,12 @@ def test_archive_push_file_exists_overwrite(self): 'WAL file already exists in archive with ' 'different checksum, overwriting', log_content) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_archive_push_partial_file_exists(self): """Archive-push if stale '.part' file exists""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -613,16 +586,12 @@ def test_archive_push_partial_file_exists(self): 'Reusing stale temp WAL file', log_content) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_archive_push_part_file_exists_not_stale(self): """Archive-push if .part file exists and it is not stale""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -693,9 +662,6 @@ def test_archive_push_part_file_exists_not_stale(self): # 'is not stale', # log_content) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_replica_archive(self): @@ -704,10 +670,9 @@ def test_replica_archive(self): turn it into replica, set replica with archiving, make archive backup from replica """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') master = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'master'), + base_dir=os.path.join(self.module_name, self.fname, 'master'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -716,7 +681,7 @@ def test_replica_archive(self): 'max_wal_size': '32MB'}) if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) + self.del_test_dir(self.module_name, self.fname) return unittest.skip( 'Skipped because backup from replica is not supported in PG 9.5') @@ -726,7 +691,7 @@ def test_replica_archive(self): master.slow_start() replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() master.psql( @@ -775,7 +740,7 @@ def test_replica_archive(self): # RESTORE FULL BACKUP TAKEN FROM replica node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node')) + base_dir=os.path.join(self.module_name, self.fname, 'node')) node.cleanup() self.restore_node(backup_dir, 'replica', data_dir=node.data_dir) @@ -824,9 +789,6 @@ def test_replica_archive(self): after = node.safe_psql("postgres", "SELECT * FROM t_heap") self.assertEqual(before, after) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_master_and_replica_parallel_archiving(self): @@ -836,10 +798,9 @@ def test_master_and_replica_parallel_archiving(self): set replica with archiving, make archive backup from replica, make archive backup from master """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') master = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'master'), + base_dir=os.path.join(self.module_name, self.fname, 'master'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -847,12 +808,12 @@ def test_master_and_replica_parallel_archiving(self): ) if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) + self.del_test_dir(self.module_name, self.fname) return unittest.skip( 'Skipped because backup from replica is not supported in PG 9.5') replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.init_pb(backup_dir) @@ -916,9 +877,6 @@ def test_master_and_replica_parallel_archiving(self): self.assertEqual( 'OK', self.show_pb(backup_dir, 'master', backup_id)['status']) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_basic_master_and_replica_concurrent_archiving(self): @@ -931,10 +889,9 @@ def test_basic_master_and_replica_concurrent_archiving(self): if self.pg_config_version < self.version_to_num('9.6.0'): return unittest.skip('You need PostgreSQL >= 9.6 for this test') - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') master = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'master'), + base_dir=os.path.join(self.module_name, self.fname, 'master'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -942,12 +899,12 @@ def test_basic_master_and_replica_concurrent_archiving(self): 'archive_timeout': '10s'}) if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) + self.del_test_dir(self.module_name, self.fname) return unittest.skip( 'Skipped because backup from replica is not supported in PG 9.5') replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.init_pb(backup_dir) @@ -1020,10 +977,6 @@ def test_basic_master_and_replica_concurrent_archiving(self): self.backup_node(backup_dir, 'master', master) self.backup_node(backup_dir, 'master', replica) - # Clean after yourself - self.del_test_dir(module_name, fname) - - # @unittest.expectedFailure # @unittest.skip("skip") def test_concurrent_archiving(self): @@ -1037,10 +990,9 @@ def test_concurrent_archiving(self): if self.pg_config_version < self.version_to_num('11.0'): return unittest.skip('You need PostgreSQL >= 11 for this test') - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') master = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'master'), + base_dir=os.path.join(self.module_name, self.fname, 'master'), set_replication=True, initdb_params=['--data-checksums']) @@ -1056,7 +1008,7 @@ def test_concurrent_archiving(self): # Settings for Replica replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.restore_node(backup_dir, 'node', replica) @@ -1067,7 +1019,7 @@ def test_concurrent_archiving(self): # create cascade replicas replica1 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica1')) + base_dir=os.path.join(self.module_name, self.fname, 'replica1')) replica1.cleanup() # Settings for casaced replica @@ -1103,17 +1055,13 @@ def test_concurrent_archiving(self): log_content = f.read() self.assertNotIn('different checksum', log_content) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_archive_pg_receivexlog(self): """Test backup with pg_receivexlog wal delivary method""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -1177,16 +1125,14 @@ def test_archive_pg_receivexlog(self): # Clean after yourself pg_receivexlog.kill() - self.del_test_dir(module_name, fname) # @unittest.expectedFailure # @unittest.skip("skip") def test_archive_pg_receivexlog_compression_pg10(self): """Test backup with pg_receivewal compressed wal delivary method""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -1245,7 +1191,6 @@ def test_archive_pg_receivexlog_compression_pg10(self): # Clean after yourself pg_receivexlog.kill() - self.del_test_dir(module_name, fname) # @unittest.expectedFailure # @unittest.skip("skip") @@ -1266,10 +1211,9 @@ def test_archive_catalog(self): ARCHIVE master: t1 -Z1--Z2--- """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') master = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'master'), + base_dir=os.path.join(self.module_name, self.fname, 'master'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -1277,7 +1221,7 @@ def test_archive_catalog(self): 'checkpoint_timeout': '30s'}) if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) + self.del_test_dir(self.module_name, self.fname) return unittest.skip( 'Skipped because backup from replica is not supported in PG 9.5') @@ -1307,7 +1251,7 @@ def test_archive_catalog(self): backup_dir, 'master', master, backup_type='page') replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.restore_node(backup_dir, 'master', replica) self.set_replica(master, replica) @@ -1576,8 +1520,6 @@ def test_archive_catalog(self): self.assertEqual(timeline_2['parent-tli'], 1) self.assertEqual(timeline_1['parent-tli'], 0) - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_archive_catalog_1(self): @@ -1588,10 +1530,9 @@ def test_archive_catalog_1(self): self.skipTest('You need to enable ARCHIVE_COMPRESSION ' 'for this test to run') - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -1631,8 +1572,6 @@ def test_archive_catalog_1(self): '000000010000000000000001') self.assertEqual(timeline['status'], 'OK') - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_archive_catalog_2(self): @@ -1643,10 +1582,9 @@ def test_archive_catalog_2(self): self.skipTest('You need to enable ARCHIVE_COMPRESSION ' 'for this test to run') - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -1688,8 +1626,6 @@ def test_archive_catalog_2(self): '000000010000000000000002') self.assertEqual(timeline['status'], 'OK') - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_archive_options(self): @@ -1700,10 +1636,9 @@ def test_archive_options(self): if not self.remote: self.skipTest("You must enable PGPROBACKUP_SSH_REMOTE" " for run this test") - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1766,8 +1701,6 @@ def test_archive_options(self): 'postgres', 'select 1') - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_archive_options_1(self): @@ -1775,10 +1708,9 @@ def test_archive_options_1(self): check that '--archive-host', '--archive-user', '--archiver-port' and '--restore-command' are working as expected with set-config """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1837,8 +1769,6 @@ def test_archive_options_1(self): self.probackup_path, backup_dir, 'node', self.user), recovery_content) - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_undefined_wal_file_path(self): @@ -1846,10 +1776,9 @@ def test_undefined_wal_file_path(self): check that archive-push works correct with undefined --wal-file-path """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1879,19 +1808,16 @@ def test_undefined_wal_file_path(self): # check self.assertEqual(self.show_archive(backup_dir, instance='node', tli=1)['min-segno'], '000000010000000000000001') - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_intermediate_archiving(self): """ check that archive-push works correct with --wal-file-path setting by user """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) node_pg_options = {} @@ -1904,7 +1830,7 @@ def test_intermediate_archiving(self): self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) - wal_dir = os.path.join(self.tmp_path, module_name, fname, 'intermediate_dir') + wal_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'intermediate_dir') shutil.rmtree(wal_dir, ignore_errors=True) os.makedirs(wal_dir) if os.name == 'posix': @@ -1929,8 +1855,6 @@ def test_intermediate_archiving(self): self.assertEqual(self.show_archive(backup_dir, instance='node', tli=1)['min-segno'], wal_segment) - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_waldir_outside_pgdata_archiving(self): @@ -1941,13 +1865,12 @@ def test_waldir_outside_pgdata_archiving(self): return unittest.skip( 'Skipped because waldir outside pgdata is supported since PG 10') - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') - external_wal_dir = os.path.join(self.tmp_path, module_name, fname, 'ext_wal_dir') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + external_wal_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'ext_wal_dir') shutil.rmtree(external_wal_dir, ignore_errors=True) node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums', '--waldir={0}'.format(external_wal_dir)]) self.init_pb(backup_dir) @@ -1964,18 +1887,15 @@ def test_waldir_outside_pgdata_archiving(self): # check self.assertEqual(self.show_archive(backup_dir, instance='node', tli=1)['min-segno'], '000000010000000000000001') - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_hexadecimal_timeline(self): """ Check that timelines are correct. """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2023,9 +1943,6 @@ def test_hexadecimal_timeline(self): '0000000D000000000000001C', tli13['max-segno']) - # Clean after yourself - self.del_test_dir(module_name, fname) - @unittest.skip("skip") # @unittest.expectedFailure def test_archiving_and_slots(self): @@ -2033,10 +1950,9 @@ def test_archiving_and_slots(self): Check that archiving don`t break slot guarantee. """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -2087,15 +2003,11 @@ def test_archiving_and_slots(self): exit(1) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_archive_push_sanity(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -2125,14 +2037,14 @@ def test_archive_push_sanity(self): self.assertNotIn('WARNING', postgres_log_content) replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.restore_node( backup_dir, 'node', replica, data_dir=replica.data_dir, options=['-R']) - #self.set_archiving(backup_dir, 'replica', replica, replica=True) + # self.set_archiving(backup_dir, 'replica', replica, replica=True) self.set_auto_conf(replica, {'port': replica.port}) self.set_auto_conf(replica, {'archive_mode': 'always'}) self.set_auto_conf(replica, {'hot_standby': 'on'}) @@ -2160,22 +2072,18 @@ def test_archive_push_sanity(self): self.assertNotIn('WARNING', output) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_archive_pg_receivexlog_partial_handling(self): """check that archive-get delivers .partial and .gz.partial files""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) if self.get_version(node) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) + self.del_test_dir(self.module_name, self.fname) return unittest.skip( 'Skipped because backup from replica is not supported in PG 9.5') @@ -2234,7 +2142,7 @@ def test_archive_pg_receivexlog_partial_handling(self): pg_receivexlog.kill() node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node( @@ -2255,16 +2163,12 @@ def test_archive_pg_receivexlog_partial_handling(self): self.assertEqual(result, result_new) - # Clean after yourself - self.del_test_dir(module_name, fname) - @unittest.skip("skip") def test_multi_timeline_recovery_prefetching(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2367,24 +2271,20 @@ def test_multi_timeline_recovery_prefetching(self): 'WAL segment 000000010000000000000006, prefetch state: 5/10', postgres_log_content) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_archive_get_batching_sanity(self): """ Make sure that batching works. .gz file is corrupted and uncompressed is not, check that both corruption detected and uncompressed file is used. """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) if self.get_version(node) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) + self.del_test_dir(self.module_name, self.fname) return unittest.skip( 'Skipped because backup from replica is not supported in PG 9.5') @@ -2399,7 +2299,7 @@ def test_archive_get_batching_sanity(self): node.pgbench_init(scale=50) replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.restore_node( @@ -2440,18 +2340,14 @@ def test_archive_get_batching_sanity(self): self.assertIn('prefetch state: 9/10', postgres_log_content) self.assertIn('prefetch state: 8/10', postgres_log_content) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_archive_get_prefetch_corruption(self): """ Make sure that WAL corruption is detected. And --prefetch-dir is honored. """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2466,7 +2362,7 @@ def test_archive_get_prefetch_corruption(self): node.pgbench_init(scale=50) replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.restore_node( @@ -2576,19 +2472,15 @@ def test_archive_get_prefetch_corruption(self): 'LOG: restored log file "{0}" from archive'.format(filename), postgres_log_content) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_archive_show_partial_files_handling(self): """ check that files with '.part', '.part.gz', '.partial' and '.partial.gz' siffixes are handled correctly """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2707,19 +2599,15 @@ def test_archive_show_partial_files_handling(self): 'WARNING', log_content) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_archive_empty_history_file(self): """ https://github.com/postgrespro/pg_probackup/issues/326 """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2800,8 +2688,6 @@ def test_archive_empty_history_file(self): 'WARNING: History file is corrupted or missing: "{0}"'.format(os.path.join(wal_dir, '00000004.history')), log_content) - self.del_test_dir(module_name, fname) - # TODO test with multiple not archived segments. # TODO corrupted file in archive. diff --git a/tests/auth_test.py b/tests/auth_test.py index 39786d7a9..4b0c4a5b2 100644 --- a/tests/auth_test.py +++ b/tests/auth_test.py @@ -30,13 +30,12 @@ def test_backup_via_unprivileged_user(self): run a backups without EXECUTE rights on certain functions """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -146,7 +145,6 @@ def test_backup_via_unprivileged_user(self): "postgres", "GRANT EXECUTE ON FUNCTION pg_backup_stop() TO backup") - self.backup_node( backup_dir, 'node', node, options=['-U', 'backup']) @@ -176,14 +174,12 @@ def test_backup_via_unprivileged_user(self): # backup_dir, 'node', node, # backup_type='ptrack', options=['-U', 'backup']) - # Clean after yourself - self.del_test_dir(module_name, fname) - class AuthTest(unittest.TestCase): pb = None node = None + # TODO move to object scope, replace module_name @classmethod def setUpClass(cls): @@ -240,6 +236,7 @@ def setUpClass(cls): cls.pgpass_file = os.path.join(os.path.expanduser('~'), '.pgpass') + # TODO move to object scope, replace module_name @classmethod def tearDownClass(cls): cls.node.cleanup() diff --git a/tests/backup.py b/tests/backup.py index 4f447c9bd..b7bb1b8b4 100644 --- a/tests/backup.py +++ b/tests/backup.py @@ -8,9 +8,6 @@ import subprocess -module_name = 'backup' - - class BackupTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") @@ -18,12 +15,11 @@ class BackupTest(ProbackupTest, unittest.TestCase): # PGPRO-707 def test_backup_modes_archive(self): """standart backup modes with ARCHIVE WAL method""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -80,18 +76,14 @@ def test_backup_modes_archive(self): backup_dir, 'node', backup_id=show_backup_2['id'])["parent-backup-id"]) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_smooth_checkpoint(self): """full backup with smooth checkpoint""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -103,18 +95,14 @@ def test_smooth_checkpoint(self): self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK") node.stop() - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_incremental_backup_without_full(self): """page backup without validated full backup""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -139,18 +127,14 @@ def test_incremental_backup_without_full(self): self.show_pb(backup_dir, 'node')[0]['status'], "ERROR") - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_incremental_backup_corrupt_full(self): """page-level backup with corrupted full backup""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -200,19 +184,15 @@ def test_incremental_backup_corrupt_full(self): self.assertEqual( self.show_pb(backup_dir, 'node')[1]['status'], "ERROR") - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_delta_threads_stream(self): """delta multi thread backup mode and stream""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -227,21 +207,17 @@ def test_delta_threads_stream(self): backup_type="delta", options=["-j", "4", "--stream"]) self.assertEqual(self.show_pb(backup_dir, 'node')[1]['status'], "OK") - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_page_detect_corruption(self): """make node, corrupt some page, check that backup failed""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=self.ptrack, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -292,21 +268,16 @@ def test_page_detect_corruption(self): 'ERROR', "Backup Status should be ERROR") - # Clean after yourself - self.del_test_dir(module_name, fname) - - # @unittest.skip("skip") def test_backup_detect_corruption(self): """make node, corrupt some page, check that backup failed""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=self.ptrack, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -439,20 +410,16 @@ def test_backup_detect_corruption(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_backup_detect_invalid_block_header(self): """make node, corrupt some page, check that backup failed""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=self.ptrack, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -580,20 +547,16 @@ def test_backup_detect_invalid_block_header(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_backup_detect_missing_permissions(self): """make node, corrupt some page, check that backup failed""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=self.ptrack, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -721,22 +684,18 @@ def test_backup_detect_missing_permissions(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_backup_truncate_misaligned(self): """ make node, truncate file to size not even to BLCKSIZE, take backup """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -772,19 +731,15 @@ def test_backup_truncate_misaligned(self): self.assertIn("WARNING: File", output) self.assertIn("invalid file size", output) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_tablespace_in_pgdata_pgpro_1376(self): """PGPRO-1376 """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -866,9 +821,6 @@ def test_tablespace_in_pgdata_pgpro_1376(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_basic_tablespace_handling(self): """ @@ -877,13 +829,12 @@ def test_basic_tablespace_handling(self): check that restore with tablespace mapping will end with success """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -925,7 +876,7 @@ def test_basic_tablespace_handling(self): tblspace2_new_path = self.get_tblspace_path(node, 'tblspace2_new') node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() try: @@ -979,22 +930,18 @@ def test_basic_tablespace_handling(self): pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_tablespace_handling_1(self): """ make node with tablespace A, take full backup, check that restore with tablespace mapping of tablespace B will end with error """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -1014,7 +961,7 @@ def test_tablespace_handling_1(self): options=["-j", "4", "--stream"]) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() try: @@ -1037,22 +984,18 @@ def test_tablespace_handling_1(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_tablespace_handling_2(self): """ make node without tablespaces, take full backup, check that restore with tablespace mapping will end with error """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -1066,7 +1009,7 @@ def test_tablespace_handling_2(self): options=["-j", "4", "--stream"]) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() try: @@ -1089,18 +1032,14 @@ def test_tablespace_handling_2(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_drop_rel_during_full_backup(self): """""" self._check_gdb_flag_or_skip_test() - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1172,16 +1111,12 @@ def test_drop_rel_during_full_backup(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - @unittest.skip("skip") def test_drop_db_during_full_backup(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1240,18 +1175,14 @@ def test_drop_db_during_full_backup(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_drop_rel_during_backup_delta(self): """""" self._check_gdb_flag_or_skip_test() - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1311,18 +1242,14 @@ def test_drop_rel_during_backup_delta(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_drop_rel_during_backup_page(self): """""" self._check_gdb_flag_or_skip_test() - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1379,16 +1306,12 @@ def test_drop_rel_during_backup_page(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_persistent_slot_for_stream_backup(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -1413,16 +1336,12 @@ def test_persistent_slot_for_stream_backup(self): backup_dir, 'node', node, options=['--stream', '--slot=slot_1']) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_basic_temp_slot_for_stream_backup(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={'max_wal_size': '40MB'}) @@ -1445,18 +1364,14 @@ def test_basic_temp_slot_for_stream_backup(self): backup_dir, 'node', node, options=['--stream', '--slot=slot_1', '--temp-slot']) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_backup_concurrent_drop_table(self): """""" self._check_gdb_flag_or_skip_test() - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1493,19 +1408,15 @@ def test_backup_concurrent_drop_table(self): self.assertEqual(show_backup['status'], "OK") - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_pg_11_adjusted_wal_segment_size(self): """""" if self.pg_config_version < self.version_to_num('11.0'): return unittest.skip('You need PostgreSQL >= 11 for this test') - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=[ '--data-checksums', @@ -1581,18 +1492,14 @@ def test_pg_11_adjusted_wal_segment_size(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_sigint_handling(self): """""" self._check_gdb_flag_or_skip_test() - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1622,18 +1529,14 @@ def test_sigint_handling(self): self.show_pb(backup_dir, 'node', backup_id)['status'], 'Backup STATUS should be "ERROR"') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_sigterm_handling(self): """""" self._check_gdb_flag_or_skip_test() - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1662,18 +1565,14 @@ def test_sigterm_handling(self): self.show_pb(backup_dir, 'node', backup_id)['status'], 'Backup STATUS should be "ERROR"') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_sigquit_handling(self): """""" self._check_gdb_flag_or_skip_test() - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1701,16 +1600,12 @@ def test_sigquit_handling(self): self.show_pb(backup_dir, 'node', backup_id)['status'], 'Backup STATUS should be "ERROR"') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_drop_table(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1736,19 +1631,15 @@ def test_drop_table(self): self.backup_node( backup_dir, 'node', node, options=['--stream']) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_basic_missing_file_permissions(self): """""" if os.name == 'nt': return unittest.skip('Skipped because it is POSIX only test') - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1783,19 +1674,15 @@ def test_basic_missing_file_permissions(self): os.chmod(full_path, 700) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_basic_missing_dir_permissions(self): """""" if os.name == 'nt': return unittest.skip('Skipped because it is POSIX only test') - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1826,16 +1713,12 @@ def test_basic_missing_dir_permissions(self): os.rmdir(full_path) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_backup_with_least_privileges_role(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=self.ptrack, initdb_params=['--data-checksums'], @@ -2055,9 +1938,6 @@ def test_backup_with_least_privileges_role(self): backup_dir, 'node', node, backup_type='ptrack', datname='backupdb', options=['--stream', '-U', 'backup']) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_parent_choosing(self): """ @@ -2066,10 +1946,9 @@ def test_parent_choosing(self): PAGE1 <- CORRUPT FULL """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2119,9 +1998,6 @@ def test_parent_choosing(self): backup_dir, 'node', backup_id=page3_id)['parent-backup-id'], full_id) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_parent_choosing_1(self): """ @@ -2130,10 +2006,9 @@ def test_parent_choosing_1(self): PAGE1 <- (missing) FULL """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2179,9 +2054,6 @@ def test_parent_choosing_1(self): backup_dir, 'node', backup_id=page3_id)['parent-backup-id'], full_id) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_parent_choosing_2(self): """ @@ -2190,10 +2062,9 @@ def test_parent_choosing_2(self): PAGE1 <- OK FULL <- (missing) """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2239,19 +2110,15 @@ def test_parent_choosing_2(self): backup_dir, 'node')[2]['status'], 'ERROR') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_backup_with_less_privileges_role(self): """ check permissions correctness from documentation: https://github.com/postgrespro/pg_probackup/blob/master/Documentation.md#configuring-the-database-cluster """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=self.ptrack, initdb_params=['--data-checksums'], @@ -2386,12 +2253,12 @@ def test_backup_with_less_privileges_role(self): datname='backupdb', options=['--stream', '-U', 'backup']) if self.get_version(node) < 90600: - self.del_test_dir(module_name, fname) + self.del_test_dir(self.module_name, self.fname) return # Restore as replica replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.restore_node(backup_dir, 'node', replica) @@ -2456,18 +2323,14 @@ def test_backup_with_less_privileges_role(self): backup_dir, 'replica', replica, backup_type='ptrack', datname='backupdb', options=['--stream', '-U', 'backup']) - # Clean after yourself - self.del_test_dir(module_name, fname) - @unittest.skip("skip") def test_issue_132(self): """ https://github.com/postgrespro/pg_probackup/issues/132 """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2494,18 +2357,14 @@ def test_issue_132(self): exit(1) - # Clean after yourself - self.del_test_dir(module_name, fname) - @unittest.skip("skip") def test_issue_132_1(self): """ https://github.com/postgrespro/pg_probackup/issues/132 """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2654,17 +2513,13 @@ def test_issue_132_1(self): 'INFO: Restore of backup {0} completed.'.format(delta_id), output) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_note_sanity(self): """ test that adding note to backup works as expected """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2692,18 +2547,14 @@ def test_note_sanity(self): 'note', backup_meta) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_parent_backup_made_by_newer_version(self): """incremental backup with parent made by newer version""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -2745,20 +2596,16 @@ def test_parent_backup_made_by_newer_version(self): self.assertEqual( self.show_pb(backup_dir, 'node')[1]['status'], "ERROR") - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_issue_289(self): """ https://github.com/postgrespro/pg_probackup/issues/289 """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -2790,20 +2637,16 @@ def test_issue_289(self): self.assertEqual( self.show_pb(backup_dir, 'node')[0]['status'], "ERROR") - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_issue_290(self): """ https://github.com/postgrespro/pg_probackup/issues/290 """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -2839,18 +2682,14 @@ def test_issue_290(self): self.assertEqual( self.show_pb(backup_dir, 'node')[0]['status'], "ERROR") - # Clean after yourself - self.del_test_dir(module_name, fname) - @unittest.skip("skip") def test_issue_203(self): """ https://github.com/postgrespro/pg_probackup/issues/203 """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2870,7 +2709,7 @@ def test_issue_203(self): pgdata = self.pgdata_content(node.data_dir) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node(backup_dir, 'node', @@ -2879,18 +2718,14 @@ def test_issue_203(self): pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_issue_231(self): """ https://github.com/postgrespro/pg_probackup/issues/231 """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2911,17 +2746,13 @@ def test_issue_231(self): # it is a bit racy self.assertIn("WARNING: Cannot create directory", out) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_incr_backup_filenode_map(self): """ https://github.com/postgrespro/pg_probackup/issues/320 """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) self.init_pb(backup_dir) @@ -2930,7 +2761,7 @@ def test_incr_backup_filenode_map(self): node.slow_start() node1 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node1'), + base_dir=os.path.join(self.module_name, self.fname, 'node1'), initdb_params=['--data-checksums']) node1.cleanup() @@ -2962,18 +2793,14 @@ def test_incr_backup_filenode_map(self): 'postgres', 'select 1') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_missing_wal_segment(self): """""" self._check_gdb_flag_or_skip_test() - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=self.ptrack, initdb_params=['--data-checksums'], @@ -3039,16 +2866,12 @@ def test_missing_wal_segment(self): # TODO: check the same for PAGE backup - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_missing_replication_permission(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=self.ptrack, initdb_params=['--data-checksums']) @@ -3063,7 +2886,7 @@ def test_missing_replication_permission(self): # Create replica replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.restore_node(backup_dir, 'node', replica) @@ -3193,16 +3016,12 @@ def test_missing_replication_permission(self): "\n Unexpected Error Message: {0}\n CMD: {1}".format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_missing_replication_permission_1(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=self.ptrack, initdb_params=['--data-checksums']) @@ -3217,7 +3036,7 @@ def test_missing_replication_permission_1(self): # Create replica replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.restore_node(backup_dir, 'node', replica) @@ -3347,16 +3166,12 @@ def test_missing_replication_permission_1(self): r'WARNING: could not connect to database backupdb: (connection to server (on socket "/tmp/.s.PGSQL.\d+"|at "localhost" \(127.0.0.1\), port \d+) failed: ){0,1}' 'FATAL: must be superuser or replication role to start walsender') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_basic_backup_default_transaction_read_only(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={'default_transaction_read_only': 'on'}) @@ -3395,18 +3210,14 @@ def test_basic_backup_default_transaction_read_only(self): # PAGE backup self.backup_node(backup_dir, 'node', node, backup_type='page') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_backup_atexit(self): """""" self._check_gdb_flag_or_skip_test() - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=self.ptrack, initdb_params=['--data-checksums']) @@ -3450,16 +3261,12 @@ def test_backup_atexit(self): 'setting its status to ERROR', log_content) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_pg_stop_backup_missing_permissions(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=self.ptrack, initdb_params=['--data-checksums']) @@ -3529,20 +3336,16 @@ def test_pg_stop_backup_missing_permissions(self): "\n Unexpected Error Message: {0}\n CMD: {1}".format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_start_time(self): """Test, that option --start-time allows to set backup_id and restore""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=self.ptrack, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -3556,7 +3359,7 @@ def test_start_time(self): # restore FULL backup by backup_id calculated from start-time self.restore_node( backup_dir, 'node', - data_dir=os.path.join(self.tmp_path, module_name, fname, 'node_restored_full'), + data_dir=os.path.join(self.tmp_path, self.module_name, self.fname, 'node_restored_full'), backup_id=base36enc(startTime)) #FULL backup with incorrect start time @@ -3586,7 +3389,7 @@ def test_start_time(self): # restore DELTA backup by backup_id calculated from start-time self.restore_node( backup_dir, 'node', - data_dir=os.path.join(self.tmp_path, module_name, fname, 'node_restored_delta'), + data_dir=os.path.join(self.tmp_path, self.module_name, self.fname, 'node_restored_delta'), backup_id=base36enc(startTime)) # PAGE backup @@ -3597,7 +3400,7 @@ def test_start_time(self): # restore PAGE backup by backup_id calculated from start-time self.restore_node( backup_dir, 'node', - data_dir=os.path.join(self.tmp_path, module_name, fname, 'node_restored_page'), + data_dir=os.path.join(self.tmp_path, self.module_name, self.fname, 'node_restored_page'), backup_id=base36enc(startTime)) # PTRACK backup @@ -3613,35 +3416,31 @@ def test_start_time(self): # restore PTRACK backup by backup_id calculated from start-time self.restore_node( backup_dir, 'node', - data_dir=os.path.join(self.tmp_path, module_name, fname, 'node_restored_ptrack'), + data_dir=os.path.join(self.tmp_path, self.module_name, self.fname, 'node_restored_ptrack'), backup_id=base36enc(startTime)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_start_time_few_nodes(self): """Test, that we can synchronize backup_id's for different DBs""" - fname = self.id().split('.')[3] node1 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node1'), + base_dir=os.path.join(self.module_name, self.fname, 'node1'), set_replication=True, ptrack_enable=self.ptrack, initdb_params=['--data-checksums']) - backup_dir1 = os.path.join(self.tmp_path, module_name, fname, 'backup1') + backup_dir1 = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup1') self.init_pb(backup_dir1) self.add_instance(backup_dir1, 'node1', node1) self.set_archiving(backup_dir1, 'node1', node1) node1.slow_start() node2 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node2'), + base_dir=os.path.join(self.module_name, self.fname, 'node2'), set_replication=True, ptrack_enable=self.ptrack, initdb_params=['--data-checksums']) - backup_dir2 = os.path.join(self.tmp_path, module_name, fname, 'backup2') + backup_dir2 = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup2') self.init_pb(backup_dir2) self.add_instance(backup_dir2, 'node2', node2) self.set_archiving(backup_dir2, 'node2', node2) @@ -3703,6 +3502,3 @@ def test_start_time_few_nodes(self): show_backup2 = self.show_pb(backup_dir2, 'node2')[3] self.assertEqual(show_backup1['id'], show_backup2['id']) - # Clean after yourself - self.del_test_dir(module_name, fname) - diff --git a/tests/catchup.py b/tests/catchup.py index 7ecd84697..12622207a 100644 --- a/tests/catchup.py +++ b/tests/catchup.py @@ -4,11 +4,7 @@ import unittest from .helpers.ptrack_helpers import ProbackupTest, ProbackupException -module_name = 'catchup' - class CatchupTest(ProbackupTest, unittest.TestCase): - def setUp(self): - self.fname = self.id().split('.')[3] ######################################### # Basic tests @@ -19,7 +15,7 @@ def test_basic_full_catchup(self): """ # preparation src_pg = self.make_simple_node( - base_dir = os.path.join(module_name, self.fname, 'src'), + base_dir = os.path.join(self.module_name, self.fname, 'src'), set_replication = True ) src_pg.slow_start() @@ -29,7 +25,7 @@ def test_basic_full_catchup(self): src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") # do full catchup - dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) self.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, @@ -57,7 +53,7 @@ def test_basic_full_catchup(self): # Cleanup dst_pg.stop() #self.assertEqual(1, 0, 'Stop test') - self.del_test_dir(module_name, self.fname) + self.del_test_dir(self.module_name, self.fname) def test_full_catchup_with_tablespace(self): """ @@ -65,7 +61,7 @@ def test_full_catchup_with_tablespace(self): """ # preparation src_pg = self.make_simple_node( - base_dir = os.path.join(module_name, self.fname, 'src'), + base_dir = os.path.join(self.module_name, self.fname, 'src'), set_replication = True ) src_pg.slow_start() @@ -77,7 +73,7 @@ def test_full_catchup_with_tablespace(self): src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") # do full catchup with tablespace mapping - dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) tblspace1_new_path = self.get_tblspace_path(dst_pg, 'tblspace1_new') self.catchup_node( backup_mode = 'FULL', @@ -115,7 +111,6 @@ def test_full_catchup_with_tablespace(self): # Cleanup dst_pg.stop() - self.del_test_dir(module_name, self.fname) def test_basic_delta_catchup(self): """ @@ -123,7 +118,7 @@ def test_basic_delta_catchup(self): """ # preparation 1: source src_pg = self.make_simple_node( - base_dir = os.path.join(module_name, self.fname, 'src'), + base_dir = os.path.join(self.module_name, self.fname, 'src'), set_replication = True, pg_options = { 'wal_log_hints': 'on' } ) @@ -133,7 +128,7 @@ def test_basic_delta_catchup(self): "CREATE TABLE ultimate_question(answer int)") # preparation 2: make clean shutdowned lagging behind replica - dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) self.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, @@ -183,7 +178,6 @@ def test_basic_delta_catchup(self): # Cleanup dst_pg.stop() #self.assertEqual(1, 0, 'Stop test') - self.del_test_dir(module_name, self.fname) def test_basic_ptrack_catchup(self): """ @@ -194,7 +188,7 @@ def test_basic_ptrack_catchup(self): # preparation 1: source src_pg = self.make_simple_node( - base_dir = os.path.join(module_name, self.fname, 'src'), + base_dir = os.path.join(self.module_name, self.fname, 'src'), set_replication = True, ptrack_enable = True, initdb_params = ['--data-checksums'] @@ -206,7 +200,7 @@ def test_basic_ptrack_catchup(self): "CREATE TABLE ultimate_question(answer int)") # preparation 2: make clean shutdowned lagging behind replica - dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) self.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, @@ -256,7 +250,6 @@ def test_basic_ptrack_catchup(self): # Cleanup dst_pg.stop() #self.assertEqual(1, 0, 'Stop test') - self.del_test_dir(module_name, self.fname) def test_tli_delta_catchup(self): """ @@ -264,14 +257,14 @@ def test_tli_delta_catchup(self): """ # preparation 1: source src_pg = self.make_simple_node( - base_dir = os.path.join(module_name, self.fname, 'src'), + base_dir = os.path.join(self.module_name, self.fname, 'src'), set_replication = True, pg_options = { 'wal_log_hints': 'on' } ) src_pg.slow_start() # preparation 2: destination - dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) self.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, @@ -329,7 +322,6 @@ def test_tli_delta_catchup(self): # Cleanup src_pg.stop() - self.del_test_dir(module_name, self.fname) def test_tli_ptrack_catchup(self): """ @@ -340,7 +332,7 @@ def test_tli_ptrack_catchup(self): # preparation 1: source src_pg = self.make_simple_node( - base_dir = os.path.join(module_name, self.fname, 'src'), + base_dir = os.path.join(self.module_name, self.fname, 'src'), set_replication = True, ptrack_enable = True, initdb_params = ['--data-checksums'] @@ -349,7 +341,7 @@ def test_tli_ptrack_catchup(self): src_pg.safe_psql("postgres", "CREATE EXTENSION ptrack") # preparation 2: destination - dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) self.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, @@ -412,7 +404,6 @@ def test_tli_ptrack_catchup(self): # Cleanup src_pg.stop() - self.del_test_dir(module_name, self.fname) ######################################### # Test various corner conditions @@ -423,7 +414,7 @@ def test_table_drop_with_delta(self): """ # preparation 1: source src_pg = self.make_simple_node( - base_dir = os.path.join(module_name, self.fname, 'src'), + base_dir = os.path.join(self.module_name, self.fname, 'src'), set_replication = True, pg_options = { 'wal_log_hints': 'on' } ) @@ -433,7 +424,7 @@ def test_table_drop_with_delta(self): "CREATE TABLE ultimate_question AS SELECT 42 AS answer") # preparation 2: make clean shutdowned lagging behind replica - dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) self.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, @@ -468,7 +459,6 @@ def test_table_drop_with_delta(self): # Cleanup src_pg.stop() - self.del_test_dir(module_name, self.fname) def test_table_drop_with_ptrack(self): """ @@ -479,7 +469,7 @@ def test_table_drop_with_ptrack(self): # preparation 1: source src_pg = self.make_simple_node( - base_dir = os.path.join(module_name, self.fname, 'src'), + base_dir = os.path.join(self.module_name, self.fname, 'src'), set_replication = True, ptrack_enable = True, initdb_params = ['--data-checksums'] @@ -491,7 +481,7 @@ def test_table_drop_with_ptrack(self): "CREATE TABLE ultimate_question AS SELECT 42 AS answer") # preparation 2: make clean shutdowned lagging behind replica - dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) self.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, @@ -526,7 +516,6 @@ def test_table_drop_with_ptrack(self): # Cleanup src_pg.stop() - self.del_test_dir(module_name, self.fname) def test_tablefile_truncation_with_delta(self): """ @@ -534,7 +523,7 @@ def test_tablefile_truncation_with_delta(self): """ # preparation 1: source src_pg = self.make_simple_node( - base_dir = os.path.join(module_name, self.fname, 'src'), + base_dir = os.path.join(self.module_name, self.fname, 'src'), set_replication = True, pg_options = { 'wal_log_hints': 'on' } ) @@ -549,7 +538,7 @@ def test_tablefile_truncation_with_delta(self): src_pg.safe_psql("postgres", "VACUUM t_heap") # preparation 2: make clean shutdowned lagging behind replica - dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) self.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, @@ -583,7 +572,6 @@ def test_tablefile_truncation_with_delta(self): # Cleanup src_pg.stop() - self.del_test_dir(module_name, self.fname) def test_tablefile_truncation_with_ptrack(self): """ @@ -594,7 +582,7 @@ def test_tablefile_truncation_with_ptrack(self): # preparation 1: source src_pg = self.make_simple_node( - base_dir = os.path.join(module_name, self.fname, 'src'), + base_dir = os.path.join(self.module_name, self.fname, 'src'), set_replication = True, ptrack_enable = True, initdb_params = ['--data-checksums'] @@ -611,7 +599,7 @@ def test_tablefile_truncation_with_ptrack(self): src_pg.safe_psql("postgres", "VACUUM t_heap") # preparation 2: make clean shutdowned lagging behind replica - dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) self.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, @@ -645,7 +633,6 @@ def test_tablefile_truncation_with_ptrack(self): # Cleanup src_pg.stop() - self.del_test_dir(module_name, self.fname) ######################################### # Test reaction on user errors @@ -657,7 +644,7 @@ def test_local_tablespace_without_mapping(self): if self.remote: return unittest.skip('Skipped because this test tests local catchup error handling') - src_pg = self.make_simple_node(base_dir = os.path.join(module_name, self.fname, 'src')) + src_pg = self.make_simple_node(base_dir = os.path.join(self.module_name, self.fname, 'src')) src_pg.slow_start() tblspace_path = self.get_tblspace_path(src_pg, 'tblspace') @@ -669,7 +656,7 @@ def test_local_tablespace_without_mapping(self): "postgres", "CREATE TABLE ultimate_question TABLESPACE tblspace AS SELECT 42 AS answer") - dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) try: self.catchup_node( backup_mode = 'FULL', @@ -691,7 +678,6 @@ def test_local_tablespace_without_mapping(self): # Cleanup src_pg.stop() - self.del_test_dir(module_name, self.fname) def test_running_dest_postmaster(self): """ @@ -699,14 +685,14 @@ def test_running_dest_postmaster(self): """ # preparation 1: source src_pg = self.make_simple_node( - base_dir = os.path.join(module_name, self.fname, 'src'), + base_dir = os.path.join(self.module_name, self.fname, 'src'), set_replication = True, pg_options = { 'wal_log_hints': 'on' } ) src_pg.slow_start() # preparation 2: destination - dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) self.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, @@ -738,7 +724,6 @@ def test_running_dest_postmaster(self): # Cleanup src_pg.stop() - self.del_test_dir(module_name, self.fname) def test_same_db_id(self): """ @@ -747,12 +732,12 @@ def test_same_db_id(self): # preparation: # source src_pg = self.make_simple_node( - base_dir = os.path.join(module_name, self.fname, 'src'), + base_dir = os.path.join(self.module_name, self.fname, 'src'), set_replication = True ) src_pg.slow_start() # destination - dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) self.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, @@ -765,9 +750,9 @@ def test_same_db_id(self): dst_pg.slow_start() dst_pg.stop() # fake destination - fake_dst_pg = self.make_simple_node(base_dir = os.path.join(module_name, self.fname, 'fake_dst')) + fake_dst_pg = self.make_simple_node(base_dir = os.path.join(self.module_name, self.fname, 'fake_dst')) # fake source - fake_src_pg = self.make_simple_node(base_dir = os.path.join(module_name, self.fname, 'fake_src')) + fake_src_pg = self.make_simple_node(base_dir = os.path.join(self.module_name, self.fname, 'fake_src')) # try delta catchup (src (with correct src conn), fake_dst) try: @@ -803,7 +788,6 @@ def test_same_db_id(self): # Cleanup src_pg.stop() - self.del_test_dir(module_name, self.fname) def test_tli_destination_mismatch(self): """ @@ -811,14 +795,14 @@ def test_tli_destination_mismatch(self): """ # preparation 1: source src_pg = self.make_simple_node( - base_dir = os.path.join(module_name, self.fname, 'src'), + base_dir = os.path.join(self.module_name, self.fname, 'src'), set_replication = True, pg_options = { 'wal_log_hints': 'on' } ) src_pg.slow_start() # preparation 2: destination - dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) self.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, @@ -860,7 +844,6 @@ def test_tli_destination_mismatch(self): # Cleanup src_pg.stop() - self.del_test_dir(module_name, self.fname) def test_tli_source_mismatch(self): """ @@ -868,14 +851,14 @@ def test_tli_source_mismatch(self): """ # preparation 1: source src_pg = self.make_simple_node( - base_dir = os.path.join(module_name, self.fname, 'src'), + base_dir = os.path.join(self.module_name, self.fname, 'src'), set_replication = True, pg_options = { 'wal_log_hints': 'on' } ) src_pg.slow_start() # preparation 2: fake source (promouted copy) - fake_src_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'fake_src')) + fake_src_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'fake_src')) self.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, @@ -899,7 +882,7 @@ def test_tli_source_mismatch(self): fake_src_pg.safe_psql("postgres", "CREATE TABLE ultimate_question AS SELECT 'trash' AS garbage") # preparation 3: destination - dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) self.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, @@ -940,7 +923,6 @@ def test_tli_source_mismatch(self): # Cleanup src_pg.stop() fake_src_pg.stop() - self.del_test_dir(module_name, self.fname) ######################################### # Test unclean destination @@ -951,7 +933,7 @@ def test_unclean_delta_catchup(self): """ # preparation 1: source src_pg = self.make_simple_node( - base_dir = os.path.join(module_name, self.fname, 'src'), + base_dir = os.path.join(self.module_name, self.fname, 'src'), set_replication = True, pg_options = { 'wal_log_hints': 'on' } ) @@ -961,7 +943,7 @@ def test_unclean_delta_catchup(self): "CREATE TABLE ultimate_question(answer int)") # preparation 2: destination - dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) self.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, @@ -1028,7 +1010,6 @@ def test_unclean_delta_catchup(self): # Cleanup dst_pg.stop() - self.del_test_dir(module_name, self.fname) def test_unclean_ptrack_catchup(self): """ @@ -1039,7 +1020,7 @@ def test_unclean_ptrack_catchup(self): # preparation 1: source src_pg = self.make_simple_node( - base_dir = os.path.join(module_name, self.fname, 'src'), + base_dir = os.path.join(self.module_name, self.fname, 'src'), set_replication = True, ptrack_enable = True, pg_options = { 'wal_log_hints': 'on' } @@ -1051,7 +1032,7 @@ def test_unclean_ptrack_catchup(self): "CREATE TABLE ultimate_question(answer int)") # preparation 2: destination - dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) self.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, @@ -1118,7 +1099,6 @@ def test_unclean_ptrack_catchup(self): # Cleanup dst_pg.stop() - self.del_test_dir(module_name, self.fname) ######################################### # Test replication slot logic @@ -1139,13 +1119,13 @@ def test_catchup_with_replication_slot(self): """ # preparation src_pg = self.make_simple_node( - base_dir = os.path.join(module_name, self.fname, 'src'), + base_dir = os.path.join(self.module_name, self.fname, 'src'), set_replication = True ) src_pg.slow_start() # 1a. --slot option - dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst_1a')) + dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst_1a')) try: self.catchup_node( backup_mode = 'FULL', @@ -1165,7 +1145,7 @@ def test_catchup_with_replication_slot(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) # 1b. --slot option - dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst_1b')) + dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst_1b')) src_pg.safe_psql("postgres", "SELECT pg_catalog.pg_create_physical_replication_slot('existentslot_1b')") self.catchup_node( backup_mode = 'FULL', @@ -1178,7 +1158,7 @@ def test_catchup_with_replication_slot(self): ) # 2a. --slot --perm-slot - dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst_2a')) + dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst_2a')) self.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, @@ -1191,7 +1171,7 @@ def test_catchup_with_replication_slot(self): ) # 2b. and 4. --slot --perm-slot - dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst_2b')) + dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst_2b')) src_pg.safe_psql("postgres", "SELECT pg_catalog.pg_create_physical_replication_slot('existentslot_2b')") try: self.catchup_node( @@ -1213,7 +1193,7 @@ def test_catchup_with_replication_slot(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) # 3. --perm-slot --slot - dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst_3')) + dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst_3')) self.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, @@ -1233,7 +1213,7 @@ def test_catchup_with_replication_slot(self): # 5. --perm-slot --temp-slot (PG>=10) if self.get_version(src_pg) >= self.version_to_num('10.0'): - dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst_5')) + dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst_5')) try: self.catchup_node( backup_mode = 'FULL', @@ -1254,7 +1234,6 @@ def test_catchup_with_replication_slot(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) #self.assertEqual(1, 0, 'Stop test') - self.del_test_dir(module_name, self.fname) ######################################### # --exclude-path @@ -1265,7 +1244,7 @@ def test_catchup_with_exclude_path(self): """ # preparation src_pg = self.make_simple_node( - base_dir = os.path.join(module_name, self.fname, 'src'), + base_dir = os.path.join(self.module_name, self.fname, 'src'), set_replication = True ) src_pg.slow_start() @@ -1282,7 +1261,7 @@ def test_catchup_with_exclude_path(self): f.flush() f.close - dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) self.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, @@ -1333,7 +1312,6 @@ def test_catchup_with_exclude_path(self): #self.assertEqual(1, 0, 'Stop test') src_pg.stop() - self.del_test_dir(module_name, self.fname) def test_config_exclusion(self): """ @@ -1341,7 +1319,7 @@ def test_config_exclusion(self): """ # preparation 1: source src_pg = self.make_simple_node( - base_dir = os.path.join(module_name, self.fname, 'src'), + base_dir = os.path.join(self.module_name, self.fname, 'src'), set_replication = True, pg_options = { 'wal_log_hints': 'on' } ) @@ -1351,7 +1329,7 @@ def test_config_exclusion(self): "CREATE TABLE ultimate_question(answer int)") # preparation 2: make lagging behind replica - dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) self.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, @@ -1458,7 +1436,6 @@ def test_config_exclusion(self): src_pg.stop() dst_pg.stop() #self.assertEqual(1, 0, 'Stop test') - self.del_test_dir(module_name, self.fname) ######################################### # --dry-run @@ -1469,13 +1446,13 @@ def test_dry_run_catchup_full(self): """ # preparation 1: source src_pg = self.make_simple_node( - base_dir = os.path.join(module_name, self.fname, 'src'), + base_dir = os.path.join(self.module_name, self.fname, 'src'), set_replication = True ) src_pg.slow_start() # preparation 2: make clean shutdowned lagging behind replica - dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) src_pg.pgbench_init(scale = 10) pgbench = src_pg.pgbench(options=['-T', '10', '--no-vacuum']) @@ -1500,7 +1477,6 @@ def test_dry_run_catchup_full(self): # Cleanup src_pg.stop() - self.del_test_dir(module_name, self.fname) def test_dry_run_catchup_ptrack(self): """ @@ -1511,7 +1487,7 @@ def test_dry_run_catchup_ptrack(self): # preparation 1: source src_pg = self.make_simple_node( - base_dir = os.path.join(module_name, self.fname, 'src'), + base_dir = os.path.join(self.module_name, self.fname, 'src'), set_replication = True, ptrack_enable = True, initdb_params = ['--data-checksums'] @@ -1524,7 +1500,7 @@ def test_dry_run_catchup_ptrack(self): pgbench.wait() # preparation 2: make clean shutdowned lagging behind replica - dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) self.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, @@ -1557,7 +1533,7 @@ def test_dry_run_catchup_ptrack(self): # Cleanup src_pg.stop() - self.del_test_dir(module_name, self.fname) + self.del_test_dir(self.module_name, self.fname) def test_dry_run_catchup_delta(self): """ @@ -1566,7 +1542,7 @@ def test_dry_run_catchup_delta(self): # preparation 1: source src_pg = self.make_simple_node( - base_dir = os.path.join(module_name, self.fname, 'src'), + base_dir = os.path.join(self.module_name, self.fname, 'src'), set_replication = True, initdb_params = ['--data-checksums'], pg_options = { 'wal_log_hints': 'on' } @@ -1578,7 +1554,7 @@ def test_dry_run_catchup_delta(self): pgbench.wait() # preparation 2: make clean shutdowned lagging behind replica - dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) self.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, @@ -1611,5 +1587,3 @@ def test_dry_run_catchup_delta(self): # Cleanup src_pg.stop() - self.del_test_dir(module_name, self.fname) - diff --git a/tests/checkdb.py b/tests/checkdb.py index 07b55c6db..2caf4fcb2 100644 --- a/tests/checkdb.py +++ b/tests/checkdb.py @@ -9,9 +9,6 @@ import time -module_name = 'checkdb' - - class CheckdbTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") @@ -19,10 +16,9 @@ def test_checkdb_amcheck_only_sanity(self): """""" self._check_gdb_flag_or_skip_test() - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir="{0}/{1}/node".format(module_name, fname), + base_dir="{0}/{1}/node".format(self.module_name, self.fname), set_replication=True, initdb_params=['--data-checksums']) @@ -223,15 +219,13 @@ def test_checkdb_amcheck_only_sanity(self): # Clean after yourself gdb.kill() node.stop() - self.del_test_dir(module_name, fname) # @unittest.skip("skip") def test_basic_checkdb_amcheck_only_sanity(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir="{0}/{1}/node".format(module_name, fname), + base_dir="{0}/{1}/node".format(self.module_name, self.fname), set_replication=True, initdb_params=['--data-checksums']) @@ -362,18 +356,16 @@ def test_basic_checkdb_amcheck_only_sanity(self): # Clean after yourself node.stop() - self.del_test_dir(module_name, fname) # @unittest.skip("skip") def test_checkdb_block_validation_sanity(self): """make node, corrupt some pages, check that checkdb failed""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -459,14 +451,12 @@ def test_checkdb_block_validation_sanity(self): # Clean after yourself node.stop() - self.del_test_dir(module_name, fname) def test_checkdb_checkunique(self): """Test checkunique parameter of amcheck.bt_index_check function""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) node.slow_start() @@ -550,17 +540,15 @@ def test_checkdb_checkunique(self): # Clean after yourself node.stop() - self.del_test_dir(module_name, fname) # @unittest.skip("skip") def test_checkdb_sigint_handling(self): """""" self._check_gdb_flag_or_skip_test() - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -605,15 +593,13 @@ def test_checkdb_sigint_handling(self): # Clean after yourself gdb.kill() node.stop() - self.del_test_dir(module_name, fname) # @unittest.skip("skip") def test_checkdb_with_least_privileges(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) self.init_pb(backup_dir) @@ -861,4 +847,3 @@ def test_checkdb_with_least_privileges(self): # Clean after yourself node.stop() - self.del_test_dir(module_name, fname) diff --git a/tests/compatibility.py b/tests/compatibility.py index 6c2bc9204..8a7812c57 100644 --- a/tests/compatibility.py +++ b/tests/compatibility.py @@ -5,8 +5,6 @@ from sys import exit import shutil -module_name = 'compatibility' - def check_manual_tests_enabled(): return 'PGPROBACKUP_MANUAL' in os.environ and os.environ['PGPROBACKUP_MANUAL'] == 'ON' @@ -52,7 +50,7 @@ def test_catchup_with_different_remote_major_pg(self): pgprobackup_ssh_agent_path = os.environ['PGPROBACKUP_SSH_AGENT_PATH'] src_pg = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'src'), + base_dir=os.path.join(self.module_name, self.fname, 'src'), set_replication=True, ) src_pg.slow_start() @@ -61,7 +59,7 @@ def test_catchup_with_different_remote_major_pg(self): "CREATE TABLE ultimate_question AS SELECT 42 AS answer") # do full catchup - dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) self.catchup_node( backup_mode='FULL', source_pgdata=src_pg.data_dir, @@ -89,16 +87,15 @@ def test_catchup_with_different_remote_major_pg(self): ) # Clean after yourself - self.del_test_dir(module_name, self.fname) + self.del_test_dir(self.module_name, self.fname) # @unittest.expectedFailure # @unittest.skip("skip") def test_backward_compatibility_page(self): """Description in jira issue PGPRO-434""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -126,7 +123,7 @@ def test_backward_compatibility_page(self): # RESTORE old FULL with new binary node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() @@ -223,17 +220,13 @@ def test_backward_compatibility_page(self): pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_backward_compatibility_delta(self): """Description in jira issue PGPRO-434""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -261,7 +254,7 @@ def test_backward_compatibility_delta(self): # RESTORE old FULL with new binary node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() @@ -357,9 +350,6 @@ def test_backward_compatibility_delta(self): pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_backward_compatibility_ptrack(self): @@ -368,10 +358,9 @@ def test_backward_compatibility_ptrack(self): if not self.ptrack: return unittest.skip('Skipped because ptrack support is disabled') - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) @@ -404,7 +393,7 @@ def test_backward_compatibility_ptrack(self): # RESTORE old FULL with new binary node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() @@ -471,17 +460,13 @@ def test_backward_compatibility_ptrack(self): pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_backward_compatibility_compression(self): """Description in jira issue PGPRO-434""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -504,7 +489,7 @@ def test_backward_compatibility_compression(self): # restore OLD FULL with new binary node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() @@ -630,9 +615,6 @@ def test_backward_compatibility_compression(self): pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_backward_compatibility_merge(self): @@ -640,10 +622,9 @@ def test_backward_compatibility_merge(self): Create node, take FULL and PAGE backups with old binary, merge them with new binary """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -674,7 +655,7 @@ def test_backward_compatibility_merge(self): # restore OLD FULL with new binary node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() @@ -685,9 +666,6 @@ def test_backward_compatibility_merge(self): pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_backward_compatibility_merge_1(self): @@ -696,10 +674,9 @@ def test_backward_compatibility_merge_1(self): merge them with new binary. old binary version =< 2.2.7 """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -749,7 +726,7 @@ def test_backward_compatibility_merge_1(self): # restore merged backup node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node(backup_dir, 'node', node_restored) @@ -757,9 +734,6 @@ def test_backward_compatibility_merge_1(self): pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_backward_compatibility_merge_2(self): @@ -768,10 +742,9 @@ def test_backward_compatibility_merge_2(self): merge them with new binary. old binary version =< 2.2.7 """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -788,7 +761,7 @@ def test_backward_compatibility_merge_2(self): 'VACUUM pgbench_accounts') node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) # FULL backup with OLD binary self.backup_node(backup_dir, 'node', node, old_binary=True) @@ -879,9 +852,6 @@ def test_backward_compatibility_merge_2(self): pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata4, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_backward_compatibility_merge_3(self): @@ -890,10 +860,9 @@ def test_backward_compatibility_merge_3(self): merge them with new binary. old binary version =< 2.2.7 """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -910,7 +879,7 @@ def test_backward_compatibility_merge_3(self): 'VACUUM pgbench_accounts') node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) # FULL backup with OLD binary self.backup_node( @@ -1002,9 +971,6 @@ def test_backward_compatibility_merge_3(self): pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata4, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_backward_compatibility_merge_4(self): @@ -1016,10 +982,9 @@ def test_backward_compatibility_merge_4(self): self.assertTrue( False, 'You need pg_probackup old_binary =< 2.4.0 for this test') - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1036,7 +1001,7 @@ def test_backward_compatibility_merge_4(self): 'VACUUM pgbench_accounts') node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) # FULL backup with OLD binary self.backup_node( @@ -1079,9 +1044,6 @@ def test_backward_compatibility_merge_4(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_backward_compatibility_merge_5(self): @@ -1098,10 +1060,9 @@ def test_backward_compatibility_merge_5(self): self.version_to_num(self.old_probackup_version), self.version_to_num(self.probackup_version)) - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1151,7 +1112,7 @@ def test_backward_compatibility_merge_5(self): # restore merged backup node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node(backup_dir, 'node', node_restored) @@ -1159,9 +1120,6 @@ def test_backward_compatibility_merge_5(self): pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_page_vacuum_truncate(self): """ @@ -1173,10 +1131,9 @@ def test_page_vacuum_truncate(self): and check data correctness old binary should be 2.2.x version """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1224,7 +1181,7 @@ def test_page_vacuum_truncate(self): pgdata3 = self.pgdata_content(node.data_dir) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node( @@ -1263,9 +1220,6 @@ def test_page_vacuum_truncate(self): node_restored.slow_start() node_restored.cleanup() - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_page_vacuum_truncate_compression(self): """ @@ -1277,10 +1231,9 @@ def test_page_vacuum_truncate_compression(self): and check data correctness old binary should be 2.2.x version """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1330,7 +1283,7 @@ def test_page_vacuum_truncate_compression(self): pgdata = self.pgdata_content(node.data_dir) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node(backup_dir, 'node', node_restored) @@ -1342,9 +1295,6 @@ def test_page_vacuum_truncate_compression(self): self.set_auto_conf(node_restored, {'port': node_restored.port}) node_restored.slow_start() - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_page_vacuum_truncate_compressed_1(self): """ @@ -1356,10 +1306,9 @@ def test_page_vacuum_truncate_compressed_1(self): and check data correctness old binary should be 2.2.x version """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1411,7 +1360,7 @@ def test_page_vacuum_truncate_compressed_1(self): pgdata3 = self.pgdata_content(node.data_dir) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node( @@ -1450,9 +1399,6 @@ def test_page_vacuum_truncate_compressed_1(self): node_restored.slow_start() node_restored.cleanup() - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_hidden_files(self): """ @@ -1461,10 +1407,9 @@ def test_hidden_files(self): with old binary, then try to delete backup with new binary """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1479,21 +1424,17 @@ def test_hidden_files(self): self.delete_pb(backup_dir, 'node', backup_id) - # Clean after yourself - self.del_test_dir(module_name, fname) - - # @unittest.skip("skip") + # @unittest.skip("skip") def test_compatibility_tablespace(self): """ https://github.com/postgrespro/pg_probackup/issues/348 """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node, old_binary=True) @@ -1519,7 +1460,7 @@ def test_compatibility_tablespace(self): tblspace_new_path = self.get_tblspace_path(node, 'tblspace_new') node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() try: @@ -1560,6 +1501,3 @@ def test_compatibility_tablespace(self): if self.paranoia: pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - - # Clean after yourself - self.del_test_dir(module_name, fname) diff --git a/tests/compression.py b/tests/compression.py index c10a59489..94f2dffff 100644 --- a/tests/compression.py +++ b/tests/compression.py @@ -5,9 +5,6 @@ import subprocess -module_name = 'compression' - - class CompressionTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") @@ -18,10 +15,9 @@ def test_basic_compression_stream_zlib(self): check data correctness in restored instance """ self.maxDiff = None - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -116,19 +112,15 @@ def test_basic_compression_stream_zlib(self): delta_result_new = node.execute("postgres", "SELECT * FROM t_heap") self.assertEqual(delta_result, delta_result_new) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_compression_archive_zlib(self): """ make archive node, make full and page backups, check data correctness in restored instance """ self.maxDiff = None - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -219,19 +211,15 @@ def test_compression_archive_zlib(self): self.assertEqual(delta_result, delta_result_new) node.cleanup() - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_compression_stream_pglz(self): """ make archive node, make full and page stream backups, check data correctness in restored instance """ self.maxDiff = None - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -324,19 +312,15 @@ def test_compression_stream_pglz(self): self.assertEqual(delta_result, delta_result_new) node.cleanup() - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_compression_archive_pglz(self): """ make archive node, make full and page backups, check data correctness in restored instance """ self.maxDiff = None - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -429,19 +413,15 @@ def test_compression_archive_pglz(self): self.assertEqual(delta_result, delta_result_new) node.cleanup() - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_compression_wrong_algorithm(self): """ make archive node, make full and page backups, check data correctness in restored instance """ self.maxDiff = None - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -467,9 +447,6 @@ def test_compression_wrong_algorithm(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_incompressible_pages(self): """ @@ -477,10 +454,9 @@ def test_incompressible_pages(self): take backup with compression, make sure that page was not compressed, restore backup and check data correctness """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -517,6 +493,3 @@ def test_incompressible_pages(self): self.compare_pgdata(pgdata, pgdata_restored) node.slow_start() - - # Clean after yourself - self.del_test_dir(module_name, fname) diff --git a/tests/config.py b/tests/config.py index b41382204..b1a0f9295 100644 --- a/tests/config.py +++ b/tests/config.py @@ -5,19 +5,16 @@ from sys import exit from shutil import copyfile -module_name = 'config' - class ConfigTest(ProbackupTest, unittest.TestCase): # @unittest.expectedFailure # @unittest.skip("skip") def test_remove_instance_config(self): - """remove pg_probackup.conf""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + """remove pg_probackup.conself.f""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -57,10 +54,9 @@ def test_remove_instance_config(self): # @unittest.skip("skip") def test_corrupt_backup_content(self): """corrupt backup_content.control""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) diff --git a/tests/delete.py b/tests/delete.py index 345a70284..6b30cc712 100644 --- a/tests/delete.py +++ b/tests/delete.py @@ -2,10 +2,6 @@ import os from .helpers.ptrack_helpers import ProbackupTest, ProbackupException import subprocess -from sys import exit - - -module_name = 'delete' class DeleteTest(ProbackupTest, unittest.TestCase): @@ -14,12 +10,11 @@ class DeleteTest(ProbackupTest, unittest.TestCase): # @unittest.expectedFailure def test_delete_full_backups(self): """delete full backups""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -51,19 +46,15 @@ def test_delete_full_backups(self): self.assertEqual(show_backups[0]['id'], id_1) self.assertEqual(show_backups[1]['id'], id_3) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_del_instance_archive(self): """delete full backups""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -83,19 +74,15 @@ def test_del_instance_archive(self): # Delete instance self.del_instance(backup_dir, 'node') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_delete_archive_mix_compress_and_non_compressed_segments(self): """delete full backups""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir="{0}/{1}/node".format(module_name, fname), + base_dir="{0}/{1}/node".format(self.module_name, self.fname), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving( @@ -142,18 +129,14 @@ def test_delete_archive_mix_compress_and_non_compressed_segments(self): '--retention-redundancy=3', '--delete-expired']) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_delete_increment_page(self): """delete increment and all after him""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -182,22 +165,18 @@ def test_delete_increment_page(self): self.assertEqual(show_backups[1]['backup-mode'], "FULL") self.assertEqual(show_backups[1]['status'], "OK") - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_delete_increment_ptrack(self): """delete increment and all after him""" if not self.ptrack: return unittest.skip('Skipped because ptrack support is disabled') - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), ptrack_enable=self.ptrack, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -230,9 +209,6 @@ def test_delete_increment_ptrack(self): self.assertEqual(show_backups[1]['backup-mode'], "FULL") self.assertEqual(show_backups[1]['status'], "OK") - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_delete_orphaned_wal_segments(self): """ @@ -240,12 +216,11 @@ def test_delete_orphaned_wal_segments(self): delete second backup without --wal option, then delete orphaned wals via --wal option """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -302,9 +277,6 @@ def test_delete_orphaned_wal_segments(self): wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join(wals_dir, f))] self.assertEqual (0, len(wals), "Number of wals should be equal to 0") - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_delete_wal_between_multiple_timelines(self): """ @@ -315,12 +287,11 @@ def test_delete_wal_between_multiple_timelines(self): [A1, B1) are deleted and backups B1 and A2 keep their WAL """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -332,7 +303,7 @@ def test_delete_wal_between_multiple_timelines(self): node.pgbench_init(scale=3) node2 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node2')) + base_dir=os.path.join(self.module_name, self.fname, 'node2')) node2.cleanup() self.restore_node(backup_dir, 'node', node2) @@ -356,22 +327,18 @@ def test_delete_wal_between_multiple_timelines(self): self.validate_pb(backup_dir) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_delete_backup_with_empty_control_file(self): """ take backup, truncate its control file, try to delete it via 'delete' command """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums'], set_replication=True) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -397,18 +364,14 @@ def test_delete_backup_with_empty_control_file(self): self.delete_pb(backup_dir, 'node', backup_id=backup_id) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_delete_interleaved_incremental_chains(self): """complicated case of interleaved backup chains""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -521,9 +484,6 @@ def test_delete_interleaved_incremental_chains(self): print(self.show_pb( backup_dir, 'node', as_json=False, as_text=True)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_delete_multiple_descendants(self): """ @@ -536,12 +496,11 @@ def test_delete_multiple_descendants(self): FULLb | FULLa should be deleted """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -693,9 +652,6 @@ def test_delete_multiple_descendants(self): self.assertEqual(len(self.show_pb(backup_dir, 'node')), 4) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_delete_multiple_descendants_dry_run(self): """ @@ -706,12 +662,11 @@ def test_delete_multiple_descendants_dry_run(self): | FULLa """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -798,17 +753,13 @@ def test_delete_multiple_descendants_dry_run(self): self.validate_pb(backup_dir, 'node') - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_delete_error_backups(self): """delete increment and all after him""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -869,6 +820,3 @@ def test_delete_error_backups(self): self.assertEqual(show_backups[1]['status'], "OK") self.assertEqual(show_backups[2]['status'], "OK") self.assertEqual(show_backups[3]['status'], "OK") - - # Clean after yourself - self.del_test_dir(module_name, fname) diff --git a/tests/delta.py b/tests/delta.py index 82fb714f7..386403151 100644 --- a/tests/delta.py +++ b/tests/delta.py @@ -8,9 +8,6 @@ from threading import Thread -module_name = 'delta' - - class DeltaTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") @@ -21,15 +18,14 @@ def test_basic_delta_vacuum_truncate(self): take delta backup, take second delta backup, restore latest delta backup and check data correctness """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -77,9 +73,6 @@ def test_basic_delta_vacuum_truncate(self): self.set_auto_conf(node_restored, {'port': node_restored.port}) node_restored.slow_start() - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_delta_vacuum_truncate_1(self): """ @@ -88,15 +81,14 @@ def test_delta_vacuum_truncate_1(self): take delta backup, take second delta backup, restore latest delta backup and check data correctness """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], ) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored'), + base_dir=os.path.join(self.module_name, self.fname, 'node_restored'), ) self.init_pb(backup_dir) @@ -161,9 +153,6 @@ def test_delta_vacuum_truncate_1(self): self.set_auto_conf(node_restored, {'port': node_restored.port}) node_restored.slow_start() - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_delta_vacuum_truncate_2(self): """ @@ -172,15 +161,14 @@ def test_delta_vacuum_truncate_2(self): take delta backup, take second delta backup, restore latest delta backup and check data correctness """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], ) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored'), + base_dir=os.path.join(self.module_name, self.fname, 'node_restored'), ) self.init_pb(backup_dir) @@ -223,19 +211,15 @@ def test_delta_vacuum_truncate_2(self): self.set_auto_conf(node_restored, {'port': node_restored.port}) node_restored.slow_start() - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_delta_stream(self): """ make archive node, take full and delta stream backups, restore them and check data correctness """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -306,9 +290,6 @@ def test_delta_stream(self): self.assertEqual(delta_result, delta_result_new) node.cleanup() - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_delta_archive(self): """ @@ -316,10 +297,9 @@ def test_delta_archive(self): restore them and check data correctness """ self.maxDiff = None - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -381,19 +361,15 @@ def test_delta_archive(self): self.assertEqual(delta_result, delta_result_new) node.cleanup() - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_delta_multiple_segments(self): """ Make node, create table with multiple segments, write some data to it, check delta and data correctness """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -434,7 +410,7 @@ def test_delta_multiple_segments(self): # RESTORE NODE restored_node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'restored_node')) + base_dir=os.path.join(self.module_name, self.fname, 'restored_node')) restored_node.cleanup() tblspc_path = self.get_tblspace_path(node, 'somedata') tblspc_path_new = self.get_tblspace_path( @@ -463,9 +439,6 @@ def test_delta_multiple_segments(self): if self.paranoia: self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_delta_vacuum_full(self): """ @@ -474,15 +447,14 @@ def test_delta_vacuum_full(self): """ self._check_gdb_flag_or_skip_test() - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() self.init_pb(backup_dir) @@ -542,19 +514,15 @@ def test_delta_vacuum_full(self): node_restored.slow_start() - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_create_db(self): """ Make node, take full backup, create database db1, take delta backup, restore database and check it presense """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -596,7 +564,7 @@ def test_create_db(self): # RESTORE node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored') + base_dir=os.path.join(self.module_name, self.fname, 'node_restored') ) node_restored.cleanup() @@ -667,19 +635,15 @@ def test_create_db(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_exists_in_previous_backup(self): """ Make node, take full backup, create table, take page backup, take delta backup, check that file is no fully copied to delta backup """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -750,7 +714,7 @@ def test_exists_in_previous_backup(self): # RESTORE node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored') + base_dir=os.path.join(self.module_name, self.fname, 'node_restored') ) node_restored.cleanup() @@ -773,19 +737,15 @@ def test_exists_in_previous_backup(self): self.set_auto_conf(node_restored, {'port': node_restored.port}) node_restored.slow_start() - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_alter_table_set_tablespace_delta(self): """ Make node, create tablespace with table, take full backup, alter tablespace location, take delta backup, restore database. """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ 'checkpoint_timeout': '30s', @@ -826,7 +786,7 @@ def test_alter_table_set_tablespace_delta(self): # RESTORE node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node( @@ -858,9 +818,6 @@ def test_alter_table_set_tablespace_delta(self): self.assertEqual(result, result_new, 'lost some data after restore') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_alter_database_set_tablespace_delta(self): """ @@ -868,10 +825,9 @@ def test_alter_database_set_tablespace_delta(self): take delta backup, alter database tablespace location, take delta backup restore last delta backup. """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], ) @@ -919,7 +875,7 @@ def test_alter_database_set_tablespace_delta(self): # RESTORE node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored') + base_dir=os.path.join(self.module_name, self.fname, 'node_restored') ) node_restored.cleanup() @@ -947,19 +903,15 @@ def test_alter_database_set_tablespace_delta(self): self.set_auto_conf(node_restored, {'port': node_restored.port}) node_restored.slow_start() - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_delta_delete(self): """ Make node, create tablespace with table, take full backup, alter tablespace location, take delta backup, restore database. """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ 'checkpoint_timeout': '30s', @@ -1005,7 +957,7 @@ def test_delta_delete(self): # RESTORE node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored') + base_dir=os.path.join(self.module_name, self.fname, 'node_restored') ) node_restored.cleanup() @@ -1029,20 +981,16 @@ def test_delta_delete(self): self.set_auto_conf(node_restored, {'port': node_restored.port}) node_restored.slow_start() - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_delta_nullified_heap_page_backup(self): """ make node, take full backup, nullify some heap block, take delta backup, restore, physically compare pgdata`s """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1093,7 +1041,7 @@ def test_delta_nullified_heap_page_backup(self): # Restore DELTA backup node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node( @@ -1103,21 +1051,17 @@ def test_delta_nullified_heap_page_backup(self): pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_delta_backup_from_past(self): """ make node, take FULL stream backup, take DELTA stream backup, restore FULL backup, try to take second DELTA stream backup """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -1158,22 +1102,18 @@ def test_delta_backup_from_past(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - @unittest.skip("skip") # @unittest.expectedFailure def test_delta_pg_resetxlog(self): - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ 'shared_buffers': '512MB', 'max_wal_size': '3GB'}) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -1259,6 +1199,3 @@ def test_delta_pg_resetxlog(self): # # pgdata_restored = self.pgdata_content(node_restored.data_dir) # self.compare_pgdata(pgdata, pgdata_restored) - - # Clean after yourself - self.del_test_dir(module_name, fname) diff --git a/tests/exclude.py b/tests/exclude.py index 2c4925881..cb3530cd5 100644 --- a/tests/exclude.py +++ b/tests/exclude.py @@ -3,19 +3,15 @@ from .helpers.ptrack_helpers import ProbackupTest, ProbackupException -module_name = 'exclude' - - class ExcludeTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") def test_exclude_temp_files(self): """ """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -53,9 +49,6 @@ def test_exclude_temp_files(self): # TODO check temporary tablespaces - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_exclude_temp_tables(self): @@ -63,10 +56,9 @@ def test_exclude_temp_tables(self): make node without archiving, create temp table, take full backup, check that temp table not present in backup catalogue """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -139,9 +131,6 @@ def test_exclude_temp_tables(self): "Found temp table file in backup catalogue.\n " "Filepath: {0}".format(file)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_exclude_unlogged_tables_1(self): """ @@ -149,10 +138,9 @@ def test_exclude_unlogged_tables_1(self): alter table to unlogged, take delta backup, restore delta backup, check that PGDATA`s are physically the same """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -186,7 +174,7 @@ def test_exclude_unlogged_tables_1(self): pgdata = self.pgdata_content(node.data_dir) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() @@ -197,9 +185,6 @@ def test_exclude_unlogged_tables_1(self): pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_exclude_unlogged_tables_2(self): """ @@ -208,10 +193,9 @@ def test_exclude_unlogged_tables_2(self): 2. restore FULL, DELTA, PAGE to empty db, ensure unlogged table exist and is epmty """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -279,19 +263,14 @@ def test_exclude_unlogged_tables_2(self): 'select count(*) from test')[0][0], 0) - # Clean after yourself - self.del_test_dir(module_name, fname) - - # @unittest.skip("skip") def test_exclude_log_dir(self): """ check that by default 'log' and 'pg_log' directories are not backed up """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -321,18 +300,14 @@ def test_exclude_log_dir(self): self.assertTrue(os.path.exists(path)) self.assertFalse(os.path.exists(log_file)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_exclude_log_dir_1(self): """ check that "--backup-pg-log" works correctly """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -361,6 +336,3 @@ def test_exclude_log_dir_1(self): log_file = os.path.join(path, 'postgresql.log') self.assertTrue(os.path.exists(path)) self.assertTrue(os.path.exists(log_file)) - - # Clean after yourself - self.del_test_dir(module_name, fname) diff --git a/tests/external.py b/tests/external.py index 530e7fb26..27928a43c 100644 --- a/tests/external.py +++ b/tests/external.py @@ -6,8 +6,6 @@ import shutil -module_name = 'external' - # TODO: add some ptrack tests class ExternalTest(ProbackupTest, unittest.TestCase): @@ -19,15 +17,14 @@ def test_basic_external(self): with external directory, restore backup, check that external directory was successfully copied """ - fname = self.id().split('.')[3] - core_dir = os.path.join(self.tmp_path, module_name, fname) + core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) shutil.rmtree(core_dir, ignore_errors=True) node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums'], set_replication=True) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') external_dir = self.get_tblspace_path(node, 'somedirectory') # create directory in external_directory @@ -91,9 +88,6 @@ def test_basic_external(self): self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_external_none(self): @@ -103,13 +97,12 @@ def test_external_none(self): restore delta backup, check that external directory was not copied """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums'], set_replication=True) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') external_dir = self.get_tblspace_path(node, 'somedirectory') # create directory in external_directory @@ -153,9 +146,6 @@ def test_external_none(self): node.base_dir, exclude_dirs=['logs']) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_external_dirs_overlapping(self): @@ -164,13 +154,12 @@ def test_external_dirs_overlapping(self): take backup with two external directories pointing to the same directory, backup should fail """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums'], set_replication=True) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') external_dir1 = self.get_tblspace_path(node, 'external_dir1') external_dir2 = self.get_tblspace_path(node, 'external_dir2') @@ -207,9 +196,6 @@ def test_external_dirs_overlapping(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_external_dir_mapping(self): """ @@ -218,13 +204,12 @@ def test_external_dir_mapping(self): check that restore with external-dir mapping will end with success """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -247,7 +232,7 @@ def test_external_dir_mapping(self): data_dir=external_dir2, options=["-j", "4"]) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() external_dir1_new = self.get_tblspace_path(node_restored, 'external_dir1') @@ -300,20 +285,16 @@ def test_external_dir_mapping(self): node_restored.base_dir, exclude_dirs=['logs']) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_backup_multiple_external(self): """check that cmdline has priority over config""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -361,9 +342,6 @@ def test_backup_multiple_external(self): node.base_dir, exclude_dirs=['logs']) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_external_backward_compatibility(self): @@ -376,10 +354,9 @@ def test_external_backward_compatibility(self): if not self.probackup_old_path: self.skipTest("You must specify PGPROBACKUPBIN_OLD" " for run this test") - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -447,7 +424,7 @@ def test_external_backward_compatibility(self): # RESTORE chain with new binary node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() @@ -466,9 +443,6 @@ def test_external_backward_compatibility(self): self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_external_backward_compatibility_merge_1(self): @@ -480,10 +454,9 @@ def test_external_backward_compatibility_merge_1(self): if not self.probackup_old_path: self.skipTest("You must specify PGPROBACKUPBIN_OLD" " for run this test") - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -542,7 +515,7 @@ def test_external_backward_compatibility_merge_1(self): # Restore merged backup node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() @@ -561,9 +534,6 @@ def test_external_backward_compatibility_merge_1(self): self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_external_backward_compatibility_merge_2(self): @@ -575,10 +545,9 @@ def test_external_backward_compatibility_merge_2(self): if not self.probackup_old_path: self.skipTest("You must specify PGPROBACKUPBIN_OLD" " for run this test") - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -666,7 +635,7 @@ def test_external_backward_compatibility_merge_2(self): # Restore merged backup node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() @@ -689,9 +658,6 @@ def test_external_backward_compatibility_merge_2(self): self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_external_merge(self): @@ -699,10 +665,9 @@ def test_external_merge(self): if not self.probackup_old_path: self.skipTest("You must specify PGPROBACKUPBIN_OLD" " for run this test") - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -779,17 +744,13 @@ def test_external_merge(self): self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_external_merge_skip_external_dirs(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -877,17 +838,13 @@ def test_external_merge_skip_external_dirs(self): self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_external_merge_1(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -957,17 +914,13 @@ def test_external_merge_1(self): self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_external_merge_3(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1050,17 +1003,13 @@ def test_external_merge_3(self): self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_external_merge_2(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1144,17 +1093,13 @@ def test_external_merge_2(self): self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_restore_external_changed_data(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1242,17 +1187,13 @@ def test_restore_external_changed_data(self): self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_restore_external_changed_data_1(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -1349,17 +1290,13 @@ def test_restore_external_changed_data_1(self): self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_merge_external_changed_data(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -1452,19 +1389,15 @@ def test_merge_external_changed_data(self): self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_restore_skip_external(self): """ Check that --skip-external-dirs works correctly """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1523,9 +1456,6 @@ def test_restore_skip_external(self): self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_external_dir_is_symlink(self): @@ -1537,12 +1467,11 @@ def test_external_dir_is_symlink(self): if os.name == 'nt': return unittest.skip('Skipped for Windows') - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') - core_dir = os.path.join(self.tmp_path, module_name, fname) + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) shutil.rmtree(core_dir, ignore_errors=True) node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1557,7 +1486,7 @@ def test_external_dir_is_symlink(self): backup_dir, 'node', node, options=["-j", "4", "--stream"]) # fill some directory with data - core_dir = os.path.join(self.tmp_path, module_name, fname) + core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) symlinked_dir = os.path.join(core_dir, 'symlinked') self.restore_node( @@ -1581,7 +1510,7 @@ def test_external_dir_is_symlink(self): node.base_dir, exclude_dirs=['logs']) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) # RESTORE node_restored.cleanup() @@ -1606,9 +1535,6 @@ def test_external_dir_is_symlink(self): backup_dir, 'node', backup_id=backup_id)['external-dirs']) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_external_dir_contain_symlink_on_dir(self): @@ -1620,12 +1546,11 @@ def test_external_dir_contain_symlink_on_dir(self): if os.name == 'nt': return unittest.skip('Skipped for Windows') - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') - core_dir = os.path.join(self.tmp_path, module_name, fname) + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) shutil.rmtree(core_dir, ignore_errors=True) node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1641,7 +1566,7 @@ def test_external_dir_contain_symlink_on_dir(self): backup_dir, 'node', node, options=["-j", "4", "--stream"]) # fill some directory with data - core_dir = os.path.join(self.tmp_path, module_name, fname) + core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) symlinked_dir = os.path.join(core_dir, 'symlinked') self.restore_node( @@ -1666,7 +1591,7 @@ def test_external_dir_contain_symlink_on_dir(self): node.base_dir, exclude_dirs=['logs']) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) # RESTORE node_restored.cleanup() @@ -1691,9 +1616,6 @@ def test_external_dir_contain_symlink_on_dir(self): backup_dir, 'node', backup_id=backup_id)['external-dirs']) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_external_dir_contain_symlink_on_file(self): @@ -1705,12 +1627,11 @@ def test_external_dir_contain_symlink_on_file(self): if os.name == 'nt': return unittest.skip('Skipped for Windows') - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') - core_dir = os.path.join(self.tmp_path, module_name, fname) + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) shutil.rmtree(core_dir, ignore_errors=True) node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1726,7 +1647,7 @@ def test_external_dir_contain_symlink_on_file(self): backup_dir, 'node', node, options=["-j", "4", "--stream"]) # fill some directory with data - core_dir = os.path.join(self.tmp_path, module_name, fname) + core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) symlinked_dir = os.path.join(core_dir, 'symlinked') self.restore_node( @@ -1753,7 +1674,7 @@ def test_external_dir_contain_symlink_on_file(self): node.base_dir, exclude_dirs=['logs']) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) # RESTORE node_restored.cleanup() @@ -1778,9 +1699,6 @@ def test_external_dir_contain_symlink_on_file(self): backup_dir, 'node', backup_id=backup_id)['external-dirs']) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_external_dir_is_tablespace(self): @@ -1788,12 +1706,11 @@ def test_external_dir_is_tablespace(self): Check that backup fails with error if external directory points to tablespace """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') - core_dir = os.path.join(self.tmp_path, module_name, fname) + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) shutil.rmtree(core_dir, ignore_errors=True) node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1828,21 +1745,17 @@ def test_external_dir_is_tablespace(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_restore_external_dir_not_empty(self): """ Check that backup fails with error if external directory point to not empty tablespace and if remapped directory also isn`t empty """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') - core_dir = os.path.join(self.tmp_path, module_name, fname) + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) shutil.rmtree(core_dir, ignore_errors=True) node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1908,9 +1821,6 @@ def test_restore_external_dir_not_empty(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_restore_external_dir_is_missing(self): """ take FULL backup with not empty external directory @@ -1918,12 +1828,11 @@ def test_restore_external_dir_is_missing(self): take DELTA backup with external directory, which should fail """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') - core_dir = os.path.join(self.tmp_path, module_name, fname) + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) shutil.rmtree(core_dir, ignore_errors=True) node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1990,9 +1899,6 @@ def test_restore_external_dir_is_missing(self): node.base_dir, exclude_dirs=['logs']) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_merge_external_dir_is_missing(self): """ take FULL backup with not empty external directory @@ -2003,12 +1909,11 @@ def test_merge_external_dir_is_missing(self): merge it into FULL, restore and check data correctness """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') - core_dir = os.path.join(self.tmp_path, module_name, fname) + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) shutil.rmtree(core_dir, ignore_errors=True) node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2078,9 +1983,6 @@ def test_merge_external_dir_is_missing(self): node.base_dir, exclude_dirs=['logs']) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_restore_external_dir_is_empty(self): """ take FULL backup with not empty external directory @@ -2089,12 +1991,11 @@ def test_restore_external_dir_is_empty(self): restore DELRA backup, check that restored external directory is empty """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') - core_dir = os.path.join(self.tmp_path, module_name, fname) + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) shutil.rmtree(core_dir, ignore_errors=True) node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2142,9 +2043,6 @@ def test_restore_external_dir_is_empty(self): node.base_dir, exclude_dirs=['logs']) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_merge_external_dir_is_empty(self): """ take FULL backup with not empty external directory @@ -2153,12 +2051,11 @@ def test_merge_external_dir_is_empty(self): merge backups and restore FULL, check that restored external directory is empty """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') - core_dir = os.path.join(self.tmp_path, module_name, fname) + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) shutil.rmtree(core_dir, ignore_errors=True) node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2209,9 +2106,6 @@ def test_merge_external_dir_is_empty(self): node.base_dir, exclude_dirs=['logs']) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_restore_external_dir_string_order(self): """ take FULL backup with not empty external directory @@ -2220,12 +2114,11 @@ def test_restore_external_dir_string_order(self): restore DELRA backup, check that restored external directory is empty """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') - core_dir = os.path.join(self.tmp_path, module_name, fname) + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) shutil.rmtree(core_dir, ignore_errors=True) node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2289,9 +2182,6 @@ def test_restore_external_dir_string_order(self): self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_merge_external_dir_string_order(self): """ take FULL backup with not empty external directory @@ -2300,12 +2190,11 @@ def test_merge_external_dir_string_order(self): restore DELRA backup, check that restored external directory is empty """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') - core_dir = os.path.join(self.tmp_path, module_name, fname) + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) shutil.rmtree(core_dir, ignore_errors=True) node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2372,9 +2261,6 @@ def test_merge_external_dir_string_order(self): self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_smart_restore_externals(self): """ @@ -2383,13 +2269,12 @@ def test_smart_restore_externals(self): make sure that files from externals are not copied during restore https://github.com/postgrespro/pg_probackup/issues/63 """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -2451,9 +2336,6 @@ def test_smart_restore_externals(self): for file in filelist_diff: self.assertNotIn(file, logfile_content) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_external_validation(self): """ @@ -2462,13 +2344,12 @@ def test_external_validation(self): corrupt external file in backup, run validate which should fail """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -2522,6 +2403,3 @@ def test_external_validation(self): 'CORRUPT', self.show_pb(backup_dir, 'node', full_id)['status'], 'Backup STATUS should be "CORRUPT"') - - # Clean after yourself - self.del_test_dir(module_name, fname) diff --git a/tests/false_positive.py b/tests/false_positive.py index 2ededdf12..8e2e74cc0 100644 --- a/tests/false_positive.py +++ b/tests/false_positive.py @@ -1,13 +1,12 @@ import unittest import os +from asyncio import sleep + from .helpers.ptrack_helpers import ProbackupTest, ProbackupException from datetime import datetime, timedelta import subprocess -module_name = 'false_positive' - - class FalsePositive(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") @@ -16,13 +15,12 @@ def test_validate_wal_lost_segment(self): """ Loose segment located between backups. ExpectedFailure. This is BUG """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -47,19 +45,15 @@ def test_validate_wal_lost_segment(self): backup_dir, 'node')) ######## - # Clean after yourself - self.del_test_dir(module_name, fname) - @unittest.expectedFailure # Need to force validation of ancestor-chain def test_incremental_backup_corrupt_full_1(self): """page-level backup with corrupted full backup""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -104,9 +98,6 @@ def test_incremental_backup_corrupt_full_1(self): self.assertEqual( self.show_pb(backup_dir, 'node')[0]['Status'], "ERROR") - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") @unittest.expectedFailure def test_pg_10_waldir(self): @@ -116,18 +107,18 @@ def test_pg_10_waldir(self): if self.pg_config_version < self.version_to_num('10.0'): return unittest.skip('You need PostgreSQL >= 10 for this test') - fname = self.id().split('.')[3] wal_dir = os.path.join( - os.path.join(self.tmp_path, module_name, fname), 'wal_dir') + os.path.join(self.tmp_path, self.module_name, self.fname), 'wal_dir') + import shutil shutil.rmtree(wal_dir, ignore_errors=True) node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=[ '--data-checksums', '--waldir={0}'.format(wal_dir)]) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -140,7 +131,7 @@ def test_pg_10_waldir(self): # restore backup node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node( @@ -154,9 +145,6 @@ def test_pg_10_waldir(self): os.path.islink(os.path.join(node_restored.data_dir, 'pg_wal')), 'pg_wal should be symlink') - # Clean after yourself - self.del_test_dir(module_name, fname) - @unittest.expectedFailure # @unittest.skip("skip") def test_recovery_target_time_backup_victim(self): @@ -165,10 +153,9 @@ def test_recovery_target_time_backup_victim(self): probackup chooses valid backup https://github.com/postgrespro/pg_probackup/issues/104 """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -216,9 +203,6 @@ def test_recovery_target_time_backup_victim(self): backup_dir, 'node', options=['--recovery-target-time={0}'.format(target_time)]) - # Clean after yourself - self.del_test_dir(module_name, fname) - @unittest.expectedFailure # @unittest.skip("skip") def test_recovery_target_lsn_backup_victim(self): @@ -227,10 +211,9 @@ def test_recovery_target_lsn_backup_victim(self): probackup chooses valid backup https://github.com/postgrespro/pg_probackup/issues/104 """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -280,9 +263,6 @@ def test_recovery_target_lsn_backup_victim(self): backup_dir, 'node', options=['--recovery-target-lsn={0}'.format(target_lsn)]) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") @unittest.expectedFailure def test_streaming_timeout(self): @@ -291,10 +271,9 @@ def test_streaming_timeout(self): message because our WAL streaming engine is "borrowed" from pg_receivexlog """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -331,20 +310,16 @@ def test_streaming_timeout(self): 'ERROR: Problem in receivexlog', log_content) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") @unittest.expectedFailure def test_validate_all_empty_catalog(self): """ """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) try: @@ -360,6 +335,3 @@ def test_validate_all_empty_catalog(self): e.message, '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - - # Clean after yourself - self.del_test_dir(module_name, fname) diff --git a/tests/incr_restore.py b/tests/incr_restore.py index cb684a23a..55d59fa99 100644 --- a/tests/incr_restore.py +++ b/tests/incr_restore.py @@ -12,20 +12,16 @@ from testgres import QueryException -module_name = 'incr_restore' - - class IncrRestoreTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") def test_basic_incr_restore(self): """incremental restore in CHECKSUM mode""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -76,18 +72,14 @@ def test_basic_incr_restore(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_basic_incr_restore_into_missing_directory(self): """""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -124,19 +116,15 @@ def test_basic_incr_restore_into_missing_directory(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_checksum_corruption_detection(self): """ """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -181,20 +169,16 @@ def test_checksum_corruption_detection(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_incr_restore_with_tablespace(self): """ """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -224,19 +208,15 @@ def test_incr_restore_with_tablespace(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_incr_restore_with_tablespace_1(self): """recovery to target timeline""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums'], set_replication=True) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -282,22 +262,18 @@ def test_incr_restore_with_tablespace_1(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_incr_restore_with_tablespace_2(self): """ If "--tablespace-mapping" option is used with incremental restore, then new directory must be empty. """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums'], set_replication=True) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -305,7 +281,7 @@ def test_incr_restore_with_tablespace_2(self): self.backup_node(backup_dir, 'node', node, options=['--stream']) node_1 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_1')) + base_dir=os.path.join(self.module_name, self.fname, 'node_1')) # fill node1 with data out = self.restore_node( @@ -355,20 +331,16 @@ def test_incr_restore_with_tablespace_2(self): pgdata_restored = self.pgdata_content(node_1.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_incr_restore_with_tablespace_3(self): """ """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -396,21 +368,17 @@ def test_incr_restore_with_tablespace_3(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_incr_restore_with_tablespace_4(self): """ Check that system ID mismatch is detected, """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -427,7 +395,7 @@ def test_incr_restore_with_tablespace_4(self): # recreate node node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) node.slow_start() @@ -469,9 +437,6 @@ def test_incr_restore_with_tablespace_4(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure @unittest.skip("skip") def test_incr_restore_with_tablespace_5(self): @@ -481,13 +446,12 @@ def test_incr_restore_with_tablespace_5(self): with some old content, that belongs to an instance with different system id. """ - fname = self.id().split('.')[3] node1 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node1'), + base_dir=os.path.join(self.module_name, self.fname, 'node1'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node1) node1.slow_start() @@ -503,7 +467,7 @@ def test_incr_restore_with_tablespace_5(self): # recreate node node2 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node2'), + base_dir=os.path.join(self.module_name, self.fname, 'node2'), set_replication=True, initdb_params=['--data-checksums']) node2.slow_start() @@ -530,21 +494,17 @@ def test_incr_restore_with_tablespace_5(self): pgdata_restored = self.pgdata_content(node1.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_incr_restore_with_tablespace_6(self): """ Empty pgdata, not empty tablespace """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -591,22 +551,18 @@ def test_incr_restore_with_tablespace_6(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_incr_restore_with_tablespace_7(self): """ Restore backup without tablespace into PGDATA with tablespace. """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -647,19 +603,15 @@ def test_incr_restore_with_tablespace_7(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_basic_incr_restore_sanity(self): """recovery to target timeline""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums'], set_replication=True) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -690,7 +642,7 @@ def test_basic_incr_restore_sanity(self): repr(e.message), self.cmd)) node_1 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_1')) + base_dir=os.path.join(self.module_name, self.fname, 'node_1')) try: self.restore_node( @@ -714,9 +666,6 @@ def test_basic_incr_restore_sanity(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_incr_checksum_restore(self): """ @@ -725,13 +674,12 @@ def test_incr_checksum_restore(self): X - is instance, we want to return it to C state. """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums'], pg_options={'wal_log_hints': 'on'}) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -758,7 +706,7 @@ def test_incr_checksum_restore(self): node.stop(['-m', 'immediate', '-D', node.data_dir]) node_1 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_1')) + base_dir=os.path.join(self.module_name, self.fname, 'node_1')) node_1.cleanup() self.restore_node( @@ -803,9 +751,6 @@ def test_incr_checksum_restore(self): self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_incr_lsn_restore(self): @@ -815,13 +760,12 @@ def test_incr_lsn_restore(self): X - is instance, we want to return it to C state. """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums'], pg_options={'wal_log_hints': 'on'}) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -848,7 +792,7 @@ def test_incr_lsn_restore(self): node.stop(['-m', 'immediate', '-D', node.data_dir]) node_1 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_1')) + base_dir=os.path.join(self.module_name, self.fname, 'node_1')) node_1.cleanup() self.restore_node( @@ -892,9 +836,6 @@ def test_incr_lsn_restore(self): self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_incr_lsn_sanity(self): """ @@ -904,13 +845,12 @@ def test_incr_lsn_sanity(self): X - is instance, we want to return it to state B. fail is expected behaviour in case of lsn restore. """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums'], pg_options={'wal_log_hints': 'on'}) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -920,7 +860,7 @@ def test_incr_lsn_sanity(self): node.pgbench_init(scale=10) node_1 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_1')) + base_dir=os.path.join(self.module_name, self.fname, 'node_1')) node_1.cleanup() self.restore_node( @@ -961,9 +901,6 @@ def test_incr_lsn_sanity(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_incr_checksum_sanity(self): """ @@ -972,12 +909,11 @@ def test_incr_checksum_sanity(self): X - is instance, we want to return it to state B. """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -987,7 +923,7 @@ def test_incr_checksum_sanity(self): node.pgbench_init(scale=20) node_1 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_1')) + base_dir=os.path.join(self.module_name, self.fname, 'node_1')) node_1.cleanup() self.restore_node( @@ -1019,22 +955,17 @@ def test_incr_checksum_sanity(self): self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - - # @unittest.skip("skip") def test_incr_checksum_corruption_detection(self): """ check that corrupted page got detected and replaced """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), # initdb_params=['--data-checksums'], pg_options={'wal_log_hints': 'on'}) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1078,21 +1009,17 @@ def test_incr_checksum_corruption_detection(self): self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_incr_lsn_corruption_detection(self): """ check that corrupted page got detected and replaced """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums'], pg_options={'wal_log_hints': 'on'}) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1136,20 +1063,16 @@ def test_incr_lsn_corruption_detection(self): self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_incr_restore_multiple_external(self): """check that cmdline has priority over config""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1207,20 +1130,16 @@ def test_incr_restore_multiple_external(self): node.base_dir, exclude_dirs=['logs']) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_incr_lsn_restore_multiple_external(self): """check that cmdline has priority over config""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1278,22 +1197,18 @@ def test_incr_lsn_restore_multiple_external(self): node.base_dir, exclude_dirs=['logs']) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_incr_lsn_restore_backward(self): """ """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={'wal_log_hints': 'on', 'hot_standby': 'on'}) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1386,23 +1301,19 @@ def test_incr_lsn_restore_backward(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(delta_pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_incr_checksum_restore_backward(self): """ """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ 'hot_standby': 'on'}) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1479,21 +1390,20 @@ def test_incr_checksum_restore_backward(self): self.compare_pgdata(delta_pgdata, pgdata_restored) # Clean after yourself - self.del_test_dir(module_name, fname) + self.del_test_dir(self.module_name, self.fname) # @unittest.skip("skip") def test_make_replica_via_incr_checksum_restore(self): """ """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') master = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'master'), + base_dir=os.path.join(self.module_name, self.fname, 'master'), set_replication=True, initdb_params=['--data-checksums']) if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) + self.del_test_dir(self.module_name, self.fname) return unittest.skip( 'Skipped because backup from replica is not supported in PG 9.5') @@ -1503,7 +1413,7 @@ def test_make_replica_via_incr_checksum_restore(self): master.slow_start() replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() master.pgbench_init(scale=20) @@ -1551,22 +1461,18 @@ def test_make_replica_via_incr_checksum_restore(self): pgbench = new_master.pgbench(options=['-T', '10', '-c', '1']) pgbench.wait() - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_make_replica_via_incr_lsn_restore(self): """ """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') master = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'master'), + base_dir=os.path.join(self.module_name, self.fname, 'master'), set_replication=True, initdb_params=['--data-checksums']) if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) + self.del_test_dir(self.module_name, self.fname) return unittest.skip( 'Skipped because backup from replica is not supported in PG 9.5') @@ -1576,7 +1482,7 @@ def test_make_replica_via_incr_lsn_restore(self): master.slow_start() replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() master.pgbench_init(scale=20) @@ -1624,20 +1530,16 @@ def test_make_replica_via_incr_lsn_restore(self): pgbench = new_master.pgbench(options=['-T', '10', '-c', '1']) pgbench.wait() - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_incr_checksum_long_xact(self): """ """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1694,9 +1596,6 @@ def test_incr_checksum_long_xact(self): 'select count(*) from t1').decode('utf-8').rstrip(), '1') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure # This test will pass with Enterprise @@ -1705,12 +1604,11 @@ def test_incr_checksum_long_xact(self): def test_incr_lsn_long_xact_1(self): """ """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1774,24 +1672,20 @@ def test_incr_lsn_long_xact_1(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_incr_lsn_long_xact_2(self): """ """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ 'full_page_writes': 'off', 'wal_log_hints': 'off'}) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1861,21 +1755,17 @@ def test_incr_lsn_long_xact_2(self): 'select count(*) from t1').decode('utf-8').rstrip(), '1') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_incr_restore_zero_size_file_checksum(self): """ """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -1934,21 +1824,17 @@ def test_incr_restore_zero_size_file_checksum(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata3, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_incr_restore_zero_size_file_lsn(self): """ """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -2013,15 +1899,11 @@ def test_incr_restore_zero_size_file_lsn(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata3, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_incremental_partial_restore_exclude_checksum(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) self.init_pb(backup_dir) @@ -2060,11 +1942,11 @@ def test_incremental_partial_restore_exclude_checksum(self): # restore FULL backup into second node2 node1 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node1')) + base_dir=os.path.join(self.module_name, self.fname, 'node1')) node1.cleanup() node2 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node2')) + base_dir=os.path.join(self.module_name, self.fname, 'node2')) node2.cleanup() # restore some data into node2 @@ -2118,15 +2000,11 @@ def test_incremental_partial_restore_exclude_checksum(self): self.assertNotIn('PANIC', output) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_incremental_partial_restore_exclude_lsn(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) self.init_pb(backup_dir) @@ -2167,11 +2045,11 @@ def test_incremental_partial_restore_exclude_lsn(self): # restore FULL backup into second node2 node1 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node1')) + base_dir=os.path.join(self.module_name, self.fname, 'node1')) node1.cleanup() node2 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node2')) + base_dir=os.path.join(self.module_name, self.fname, 'node2')) node2.cleanup() # restore some data into node2 @@ -2228,15 +2106,11 @@ def test_incremental_partial_restore_exclude_lsn(self): self.assertNotIn('PANIC', output) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_incremental_partial_restore_exclude_tablespace_checksum(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) self.init_pb(backup_dir) @@ -2282,13 +2156,13 @@ def test_incremental_partial_restore_exclude_tablespace_checksum(self): # node1 node1 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node1')) + base_dir=os.path.join(self.module_name, self.fname, 'node1')) node1.cleanup() node1_tablespace = self.get_tblspace_path(node1, 'somedata') # node2 node2 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node2')) + base_dir=os.path.join(self.module_name, self.fname, 'node2')) node2.cleanup() node2_tablespace = self.get_tblspace_path(node2, 'somedata') @@ -2372,17 +2246,13 @@ def test_incremental_partial_restore_exclude_tablespace_checksum(self): self.assertNotIn('PANIC', output) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_incremental_pg_filenode_map(self): """ https://github.com/postgrespro/pg_probackup/issues/320 """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) self.init_pb(backup_dir) @@ -2391,7 +2261,7 @@ def test_incremental_pg_filenode_map(self): node.slow_start() node1 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node1'), + base_dir=os.path.join(self.module_name, self.fname, 'node1'), initdb_params=['--data-checksums']) node1.cleanup() @@ -2432,7 +2302,4 @@ def test_incremental_pg_filenode_map(self): 'postgres', 'select 1') - # Clean after yourself - self.del_test_dir(module_name, fname) - # check that MinRecPoint and BackupStartLsn are correctly used in case of --incrementa-lsn diff --git a/tests/init.py b/tests/init.py index f5715d249..94b076fef 100644 --- a/tests/init.py +++ b/tests/init.py @@ -4,18 +4,14 @@ import shutil -module_name = 'init' - - class InitTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") # @unittest.expectedFailure def test_success(self): """Success normal init""" - fname = self.id().split(".")[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') - node = self.make_simple_node(base_dir=os.path.join(module_name, fname, 'node')) + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node(base_dir=os.path.join(self.module_name, self.fname, 'node')) self.init_pb(backup_dir) self.assertEqual( dir_files(backup_dir), @@ -64,15 +60,11 @@ def test_success(self): e.message, '\n Unexpected Error Message: {0}\n CMD: {1}'.format(e.message, self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_already_exist(self): """Failure with backup catalog already existed""" - fname = self.id().split(".")[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') - node = self.make_simple_node(base_dir=os.path.join(module_name, fname, 'node')) + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node(base_dir=os.path.join(self.module_name, self.fname, 'node')) self.init_pb(backup_dir) try: self.show_pb(backup_dir, 'node') @@ -84,15 +76,11 @@ def test_already_exist(self): e.message, '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_abs_path(self): """failure with backup catalog should be given as absolute path""" - fname = self.id().split(".")[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') - node = self.make_simple_node(base_dir=os.path.join(module_name, fname, 'node')) + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node(base_dir=os.path.join(self.module_name, self.fname, 'node')) try: self.run_pb(["init", "-B", os.path.relpath("%s/backup" % node.base_dir, self.dir_path)]) self.assertEqual(1, 0, 'Expecting Error due to initialization with non-absolute path in --backup-path. Output: {0} \n CMD: {1}'.format( @@ -103,18 +91,14 @@ def test_abs_path(self): e.message, '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_add_instance_idempotence(self): """ https://github.com/postgrespro/pg_probackup/issues/219 """ - fname = self.id().split(".")[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') - node = self.make_simple_node(base_dir=os.path.join(module_name, fname, 'node')) + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node(base_dir=os.path.join(self.module_name, self.fname, 'node')) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -152,6 +136,3 @@ def test_add_instance_idempotence(self): e.message, "\n Unexpected Error Message: {0}\n CMD: {1}".format( repr(e.message), self.cmd)) - - # Clean after yourself - self.del_test_dir(module_name, fname) diff --git a/tests/locking.py b/tests/locking.py index 4042a1462..8531d7de5 100644 --- a/tests/locking.py +++ b/tests/locking.py @@ -4,9 +4,6 @@ from .helpers.ptrack_helpers import ProbackupTest, ProbackupException -module_name = 'locking' - - class LockingTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") @@ -19,12 +16,11 @@ def test_locking_running_validate_1(self): """ self._check_gdb_flag_or_skip_test() - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -64,7 +60,6 @@ def test_locking_running_validate_1(self): # Clean after yourself gdb.kill() - self.del_test_dir(module_name, fname) def test_locking_running_validate_2(self): """ @@ -76,12 +71,11 @@ def test_locking_running_validate_2(self): """ self._check_gdb_flag_or_skip_test() - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -135,7 +129,6 @@ def test_locking_running_validate_2(self): # Clean after yourself gdb.kill() - self.del_test_dir(module_name, fname) def test_locking_running_validate_2_specific_id(self): """ @@ -148,12 +141,11 @@ def test_locking_running_validate_2_specific_id(self): """ self._check_gdb_flag_or_skip_test() - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -236,7 +228,6 @@ def test_locking_running_validate_2_specific_id(self): # Clean after yourself gdb.kill() - self.del_test_dir(module_name, fname) def test_locking_running_3(self): """ @@ -248,12 +239,11 @@ def test_locking_running_3(self): """ self._check_gdb_flag_or_skip_test() - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -308,7 +298,6 @@ def test_locking_running_3(self): # Clean after yourself gdb.kill() - self.del_test_dir(module_name, fname) def test_locking_restore_locked(self): """ @@ -320,12 +309,11 @@ def test_locking_restore_locked(self): """ self._check_gdb_flag_or_skip_test() - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -352,7 +340,6 @@ def test_locking_restore_locked(self): # Clean after yourself gdb.kill() - self.del_test_dir(module_name, fname) def test_concurrent_delete_and_restore(self): """ @@ -364,12 +351,11 @@ def test_concurrent_delete_and_restore(self): """ self._check_gdb_flag_or_skip_test() - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -411,7 +397,6 @@ def test_concurrent_delete_and_restore(self): # Clean after yourself gdb.kill() - self.del_test_dir(module_name, fname) def test_locking_concurrent_validate_and_backup(self): """ @@ -421,12 +406,11 @@ def test_locking_concurrent_validate_and_backup(self): """ self._check_gdb_flag_or_skip_test() - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -449,7 +433,6 @@ def test_locking_concurrent_validate_and_backup(self): # Clean after yourself gdb.kill() - self.del_test_dir(module_name, fname) def test_locking_concurren_restore_and_delete(self): """ @@ -459,12 +442,11 @@ def test_locking_concurren_restore_and_delete(self): """ self._check_gdb_flag_or_skip_test() - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -495,17 +477,15 @@ def test_locking_concurren_restore_and_delete(self): # Clean after yourself gdb.kill() - self.del_test_dir(module_name, fname) def test_backup_directory_name(self): """ """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -549,18 +529,16 @@ def test_backup_directory_name(self): self.show_pb(backup_dir, 'node', page_id_2)) # Clean after yourself - self.del_test_dir(module_name, fname) def test_empty_lock_file(self): """ https://github.com/postgrespro/pg_probackup/issues/308 """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -594,21 +572,17 @@ def test_empty_lock_file(self): # p1.wait() # p2.wait() - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_shared_lock(self): """ Make sure that shared lock leaves no files with pids """ self._check_gdb_flag_or_skip_test() - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.name, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -653,5 +627,3 @@ def test_shared_lock(self): os.path.exists(lockfile_shr), "File should not exist: {0}".format(lockfile_shr)) - # Clean after yourself - self.del_test_dir(module_name, fname) diff --git a/tests/logging.py b/tests/logging.py index 70ebcf6d1..c5cdfa344 100644 --- a/tests/logging.py +++ b/tests/logging.py @@ -3,9 +3,6 @@ from .helpers.ptrack_helpers import ProbackupTest, ProbackupException import datetime -module_name = 'logging' - - class LogTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") @@ -16,13 +13,12 @@ def test_log_rotation(self): """ self._check_gdb_flag_or_skip_test() - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -43,17 +39,13 @@ def test_log_rotation(self): gdb.run_until_break() gdb.continue_execution_until_exit() - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_log_filename_strftime(self): - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -76,17 +68,13 @@ def test_log_filename_strftime(self): self.assertTrue(os.path.isfile(path)) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_truncate_rotation_file(self): - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -151,17 +139,13 @@ def test_truncate_rotation_file(self): self.assertTrue(os.path.isfile(rotation_file_path)) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_unlink_rotation_file(self): - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -223,17 +207,13 @@ def test_unlink_rotation_file(self): os.stat(log_file_path).st_size, log_file_size) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_garbage_in_rotation_file(self): - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -261,9 +241,6 @@ def test_garbage_in_rotation_file(self): # mangle .rotation file with open(rotation_file_path, "w+b", 0) as f: f.write(b"blah") - f.flush() - f.close - output = self.backup_node( backup_dir, 'node', node, options=[ @@ -302,24 +279,20 @@ def test_garbage_in_rotation_file(self): os.stat(log_file_path).st_size, log_file_size) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_issue_274(self): - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) node.slow_start() replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.backup_node(backup_dir, 'node', node, options=['--stream']) @@ -370,6 +343,3 @@ def test_issue_274(self): log_content = f.read() self.assertIn('INFO: command:', log_content) - - # Clean after yourself - self.del_test_dir(module_name, fname) diff --git a/tests/merge.py b/tests/merge.py index 4c374bdfb..fa0da7b2b 100644 --- a/tests/merge.py +++ b/tests/merge.py @@ -9,21 +9,17 @@ import time import subprocess -module_name = "merge" - - class MergeTest(ProbackupTest, unittest.TestCase): def test_basic_merge_full_page(self): """ Test MERGE command, it merges FULL backup with target PAGE backups """ - fname = self.id().split(".")[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, "backup") + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, "backup") # Initialize instance and backup directory node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=["--data-checksums"]) self.init_pb(backup_dir) @@ -100,19 +96,15 @@ def test_basic_merge_full_page(self): count2 = node.execute("postgres", "select count(*) from test") self.assertEqual(count1, count2) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_merge_compressed_backups(self): """ Test MERGE command with compressed backups """ - fname = self.id().split(".")[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, "backup") + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, "backup") # Initialize instance and backup directory node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=["--data-checksums"]) self.init_pb(backup_dir) @@ -163,18 +155,16 @@ def test_merge_compressed_backups(self): # Clean after yourself node.cleanup() - self.del_test_dir(module_name, fname) def test_merge_compressed_backups_1(self): """ Test MERGE command with compressed backups """ - fname = self.id().split(".")[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, "backup") + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, "backup") # Initialize instance and backup directory node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=["--data-checksums"]) self.init_pb(backup_dir) @@ -234,18 +224,16 @@ def test_merge_compressed_backups_1(self): # Clean after yourself node.cleanup() - self.del_test_dir(module_name, fname) def test_merge_compressed_and_uncompressed_backups(self): """ Test MERGE command with compressed and uncompressed backups """ - fname = self.id().split(".")[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, "backup") + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, "backup") # Initialize instance and backup directory node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=["--data-checksums"], ) @@ -306,18 +294,16 @@ def test_merge_compressed_and_uncompressed_backups(self): # Clean after yourself node.cleanup() - self.del_test_dir(module_name, fname) def test_merge_compressed_and_uncompressed_backups_1(self): """ Test MERGE command with compressed and uncompressed backups """ - fname = self.id().split(".")[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, "backup") + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, "backup") # Initialize instance and backup directory node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=["--data-checksums"], ) @@ -380,18 +366,16 @@ def test_merge_compressed_and_uncompressed_backups_1(self): # Clean after yourself node.cleanup() - self.del_test_dir(module_name, fname) def test_merge_compressed_and_uncompressed_backups_2(self): """ Test MERGE command with compressed and uncompressed backups """ - fname = self.id().split(".")[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, "backup") + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, "backup") # Initialize instance and backup directory node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=["--data-checksums"], ) @@ -450,11 +434,6 @@ def test_merge_compressed_and_uncompressed_backups_2(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - node.cleanup() - self.del_test_dir(module_name, fname) - - # @unittest.skip("skip") def test_merge_tablespaces(self): """ @@ -463,10 +442,9 @@ def test_merge_tablespaces(self): tablespace, take page backup, merge it and restore """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], ) @@ -538,10 +516,9 @@ def test_merge_tablespaces_1(self): drop first tablespace and take delta backup, merge it and restore """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], ) @@ -607,9 +584,6 @@ def test_merge_tablespaces_1(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_merge_page_truncate(self): """ make node, create table, take full backup, @@ -617,17 +591,16 @@ def test_merge_page_truncate(self): take page backup, merge full and page, restore last page backup and check data correctness """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ 'checkpoint_timeout': '300s'}) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -696,9 +669,6 @@ def test_merge_page_truncate(self): self.assertEqual(result1, result2) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_merge_delta_truncate(self): """ make node, create table, take full backup, @@ -706,17 +676,16 @@ def test_merge_delta_truncate(self): take page backup, merge full and page, restore last page backup and check data correctness """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ 'checkpoint_timeout': '300s'}) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -786,7 +755,7 @@ def test_merge_delta_truncate(self): self.assertEqual(result1, result2) # Clean after yourself - self.del_test_dir(module_name, fname) + self.del_test_dir(self.module_name, self.fname) def test_merge_ptrack_truncate(self): """ @@ -798,10 +767,9 @@ def test_merge_ptrack_truncate(self): if not self.ptrack: return unittest.skip('Skipped because ptrack support is disabled') - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], ptrack_enable=True) @@ -850,7 +818,7 @@ def test_merge_ptrack_truncate(self): self.validate_pb(backup_dir) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() old_tablespace = self.get_tblspace_path(node, 'somedata') @@ -881,9 +849,6 @@ def test_merge_ptrack_truncate(self): self.assertEqual(result1, result2) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_merge_delta_delete(self): """ @@ -891,10 +856,9 @@ def test_merge_delta_delete(self): alter tablespace location, take delta backup, merge full and delta, restore database. """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ 'checkpoint_timeout': '30s', @@ -943,7 +907,7 @@ def test_merge_delta_delete(self): # RESTORE node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored') + base_dir=os.path.join(self.module_name, self.fname, 'node_restored') ) node_restored.cleanup() @@ -967,9 +931,6 @@ def test_merge_delta_delete(self): self.set_auto_conf(node_restored, {'port': node_restored.port}) node_restored.slow_start() - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_continue_failed_merge(self): """ @@ -977,11 +938,10 @@ def test_continue_failed_merge(self): """ self._check_gdb_flag_or_skip_test() - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( base_dir=os.path.join( - module_name, fname, 'node'), + self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1045,9 +1005,6 @@ def test_continue_failed_merge(self): node.cleanup() self.restore_node(backup_dir, 'node', node) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_continue_failed_merge_with_corrupted_delta_backup(self): """ @@ -1055,10 +1012,9 @@ def test_continue_failed_merge_with_corrupted_delta_backup(self): """ self._check_gdb_flag_or_skip_test() - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) self.init_pb(backup_dir) @@ -1145,19 +1101,15 @@ def test_continue_failed_merge_with_corrupted_delta_backup(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_continue_failed_merge_2(self): """ Check that failed MERGE on delete can be continued """ self._check_gdb_flag_or_skip_test() - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) self.init_pb(backup_dir) @@ -1217,8 +1169,6 @@ def test_continue_failed_merge_2(self): # Try to continue failed MERGE self.merge_backup(backup_dir, "node", backup_id) - # Clean after yourself - self.del_test_dir(module_name, fname) def test_continue_failed_merge_3(self): """ @@ -1227,10 +1177,9 @@ def test_continue_failed_merge_3(self): """ self._check_gdb_flag_or_skip_test() - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) self.init_pb(backup_dir) @@ -1315,17 +1264,13 @@ def test_continue_failed_merge_3(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_merge_different_compression_algo(self): """ Check that backups with different compression algorithms can be merged """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1368,17 +1313,14 @@ def test_merge_different_compression_algo(self): self.merge_backup(backup_dir, "node", backup_id) - self.del_test_dir(module_name, fname) - def test_merge_different_wal_modes(self): """ Check that backups with different wal modes can be merged correctly """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1410,8 +1352,6 @@ def test_merge_different_wal_modes(self): self.assertEqual( 'STREAM', self.show_pb(backup_dir, 'node', backup_id)['wal']) - self.del_test_dir(module_name, fname) - def test_crash_after_opening_backup_control_1(self): """ check that crashing after opening backup.control @@ -1419,10 +1359,9 @@ def test_crash_after_opening_backup_control_1(self): """ self._check_gdb_flag_or_skip_test() - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1462,7 +1401,7 @@ def test_crash_after_opening_backup_control_1(self): self.assertEqual( 'MERGING', self.show_pb(backup_dir, 'node')[1]['status']) - self.del_test_dir(module_name, fname) + self.del_test_dir(self.module_name, self.fname) # @unittest.skip("skip") def test_crash_after_opening_backup_control_2(self): @@ -1473,10 +1412,9 @@ def test_crash_after_opening_backup_control_2(self): """ self._check_gdb_flag_or_skip_test() - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1555,8 +1493,6 @@ def test_crash_after_opening_backup_control_2(self): self.compare_pgdata(pgdata, pgdata_restored) - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_losing_file_after_failed_merge(self): """ @@ -1566,10 +1502,9 @@ def test_losing_file_after_failed_merge(self): """ self._check_gdb_flag_or_skip_test() - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1648,17 +1583,14 @@ def test_losing_file_after_failed_merge(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - self.del_test_dir(module_name, fname) - def test_failed_merge_after_delete(self): """ """ self._check_gdb_flag_or_skip_test() - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1731,17 +1663,14 @@ def test_failed_merge_after_delete(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - self.del_test_dir(module_name, fname) - def test_failed_merge_after_delete_1(self): """ """ self._check_gdb_flag_or_skip_test() - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1809,17 +1738,14 @@ def test_failed_merge_after_delete_1(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - self.del_test_dir(module_name, fname) - def test_failed_merge_after_delete_2(self): """ """ self._check_gdb_flag_or_skip_test() - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1873,17 +1799,14 @@ def test_failed_merge_after_delete_2(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - self.del_test_dir(module_name, fname) - def test_failed_merge_after_delete_3(self): """ """ self._check_gdb_flag_or_skip_test() - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1963,8 +1886,6 @@ def test_failed_merge_after_delete_3(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - self.del_test_dir(module_name, fname) - # Skipped, because backups from the future are invalid. # This cause a "ERROR: Can't assign backup_id, there is already a backup in future" # now (PBCKP-259). We can conduct such a test again when we @@ -1975,13 +1896,12 @@ def test_merge_backup_from_future(self): take FULL backup, table PAGE backup from future, try to merge page with FULL """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -2021,7 +1941,7 @@ def test_merge_backup_from_future(self): 'SELECT * from pgbench_accounts') node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node( @@ -2052,9 +1972,6 @@ def test_merge_backup_from_future(self): self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_merge_multiple_descendants(self): """ @@ -2067,12 +1984,11 @@ def test_merge_multiple_descendants(self): FULLb | FULLa """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -2241,9 +2157,6 @@ def test_merge_multiple_descendants(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_smart_merge(self): """ @@ -2253,13 +2166,12 @@ def test_smart_merge(self): copied during restore https://github.com/postgrespro/pg_probackup/issues/63 """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -2301,18 +2213,14 @@ def test_smart_merge(self): with open(logfile, 'r') as f: logfile_content = f.read() - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_idempotent_merge(self): """ """ self._check_gdb_flag_or_skip_test() - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2378,18 +2286,15 @@ def test_idempotent_merge(self): self.assertEqual( page_id_2, self.show_pb(backup_dir, 'node')[0]['id']) - self.del_test_dir(module_name, fname) - def test_merge_correct_inheritance(self): """ Make sure that backup metainformation fields 'note' and 'expire-time' are correctly inherited during merge """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2432,18 +2337,15 @@ def test_merge_correct_inheritance(self): page_meta['expire-time'], self.show_pb(backup_dir, 'node', page_id)['expire-time']) - self.del_test_dir(module_name, fname) - def test_merge_correct_inheritance_1(self): """ Make sure that backup metainformation fields 'note' and 'expire-time' are correctly inherited during merge """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2481,8 +2383,6 @@ def test_merge_correct_inheritance_1(self): 'expire-time', self.show_pb(backup_dir, 'node', page_id)) - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_multi_timeline_merge(self): @@ -2497,10 +2397,9 @@ def test_multi_timeline_merge(self): P must have F as parent """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2568,7 +2467,7 @@ def test_multi_timeline_merge(self): "postgres", "select * from pgbench_accounts") node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node(backup_dir, 'node', node_restored) @@ -2598,9 +2497,6 @@ def test_multi_timeline_merge(self): '--amcheck', '-d', 'postgres', '-p', str(node_restored.port)]) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_merge_page_header_map_retry(self): @@ -2610,10 +2506,9 @@ def test_merge_page_header_map_retry(self): """ self._check_gdb_flag_or_skip_test() - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2649,19 +2544,15 @@ def test_merge_page_header_map_retry(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_missing_data_file(self): """ """ self._check_gdb_flag_or_skip_test() - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2710,18 +2601,15 @@ def test_missing_data_file(self): 'ERROR: Cannot open backup file "{0}": No such file or directory'.format(file_to_remove), logfile_content) - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_missing_non_data_file(self): """ """ self._check_gdb_flag_or_skip_test() - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2769,7 +2657,7 @@ def test_missing_non_data_file(self): self.assertEqual( 'MERGING', self.show_pb(backup_dir, 'node')[1]['status']) - self.del_test_dir(module_name, fname) + self.del_test_dir(self.module_name, self.fname) # @unittest.skip("skip") def test_merge_remote_mode(self): @@ -2777,10 +2665,9 @@ def test_merge_remote_mode(self): """ self._check_gdb_flag_or_skip_test() - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2827,16 +2714,13 @@ def test_merge_remote_mode(self): self.assertEqual( 'OK', self.show_pb(backup_dir, 'node')[0]['status']) - self.del_test_dir(module_name, fname) - def test_merge_pg_filenode_map(self): """ https://github.com/postgrespro/pg_probackup/issues/320 """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) self.init_pb(backup_dir) @@ -2845,7 +2729,7 @@ def test_merge_pg_filenode_map(self): node.slow_start() node1 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node1'), + base_dir=os.path.join(self.module_name, self.fname, 'node1'), initdb_params=['--data-checksums']) node1.cleanup() @@ -2878,8 +2762,5 @@ def test_merge_pg_filenode_map(self): 'postgres', 'select 1') - # Clean after yourself - self.del_test_dir(module_name, fname) - # 1. Need new test with corrupted FULL backup # 2. different compression levels diff --git a/tests/option.py b/tests/option.py index 88e72ffd7..a8b964cb0 100644 --- a/tests/option.py +++ b/tests/option.py @@ -4,9 +4,6 @@ import locale -module_name = 'option' - - class OptionTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") @@ -41,14 +38,12 @@ def test_without_backup_path_3(self): e.message, '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) - # @unittest.skip("skip") def test_options_4(self): """check options test""" - fname = self.id().split(".")[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node')) + base_dir=os.path.join(self.module_name, self.fname, 'node')) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -112,16 +107,12 @@ def test_options_4(self): e.message, '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_options_5(self): """check options test""" - fname = self.id().split(".")[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node')) + base_dir=os.path.join(self.module_name, self.fname, 'node')) output = self.init_pb(backup_dir) self.assertIn( @@ -225,9 +216,6 @@ def test_options_5(self): e.message, '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_help_6(self): """help options""" diff --git a/tests/page.py b/tests/page.py index c1cba6b40..b9398ec7a 100644 --- a/tests/page.py +++ b/tests/page.py @@ -7,9 +7,6 @@ import gzip import shutil -module_name = 'page' - - class PageTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") @@ -20,17 +17,16 @@ def test_basic_page_vacuum_truncate(self): take page backup, take second page backup, restore last page backup and check data correctness """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ 'checkpoint_timeout': '300s'}) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -98,9 +94,6 @@ def test_basic_page_vacuum_truncate(self): self.assertEqual(result1, result2) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_page_vacuum_truncate_1(self): """ @@ -109,10 +102,9 @@ def test_page_vacuum_truncate_1(self): take page backup, insert some data, take second page backup and check data correctness """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -159,7 +151,7 @@ def test_page_vacuum_truncate_1(self): pgdata = self.pgdata_content(node.data_dir) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node(backup_dir, 'node', node_restored) @@ -171,9 +163,6 @@ def test_page_vacuum_truncate_1(self): self.set_auto_conf(node_restored, {'port': node_restored.port}) node_restored.slow_start() - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_page_stream(self): """ @@ -181,10 +170,9 @@ def test_page_stream(self): restore them and check data correctness """ self.maxDiff = None - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -258,9 +246,6 @@ def test_page_stream(self): self.assertEqual(page_result, page_result_new) node.cleanup() - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_page_archive(self): """ @@ -268,10 +253,9 @@ def test_page_archive(self): restore them and check data correctness """ self.maxDiff = None - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -352,19 +336,15 @@ def test_page_archive(self): self.assertEqual(page_result, page_result_new) node.cleanup() - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_page_multiple_segments(self): """ Make node, create table with multiple segments, write some data to it, check page and data correctness """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -399,7 +379,7 @@ def test_page_multiple_segments(self): # RESTORE NODE restored_node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'restored_node')) + base_dir=os.path.join(self.module_name, self.fname, 'restored_node')) restored_node.cleanup() tblspc_path = self.get_tblspace_path(node, 'somedata') tblspc_path_new = self.get_tblspace_path( @@ -427,9 +407,6 @@ def test_page_multiple_segments(self): if self.paranoia: self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_page_delete(self): """ @@ -437,10 +414,9 @@ def test_page_delete(self): delete everything from table, vacuum table, take page backup, restore page backup, compare . """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ 'checkpoint_timeout': '30s', @@ -477,7 +453,7 @@ def test_page_delete(self): # RESTORE node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node( @@ -499,9 +475,6 @@ def test_page_delete(self): self.set_auto_conf(node_restored, {'port': node_restored.port}) node_restored.slow_start() - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_page_delete_1(self): """ @@ -509,10 +482,9 @@ def test_page_delete_1(self): delete everything from table, vacuum table, take page backup, restore page backup, compare . """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -554,7 +526,7 @@ def test_page_delete_1(self): # RESTORE node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored') + base_dir=os.path.join(self.module_name, self.fname, 'node_restored') ) node_restored.cleanup() @@ -577,26 +549,22 @@ def test_page_delete_1(self): self.set_auto_conf(node_restored, {'port': node_restored.port}) node_restored.slow_start() - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_parallel_pagemap(self): """ Test for parallel WAL segments reading, during which pagemap is built """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') # Initialize instance and backup directory node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums'], pg_options={ "hot_standby": "on" } ) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored'), + base_dir=os.path.join(self.module_name, self.fname, 'node_restored'), ) self.init_pb(backup_dir) @@ -652,18 +620,16 @@ def test_parallel_pagemap(self): # Clean after yourself node.cleanup() node_restored.cleanup() - self.del_test_dir(module_name, fname) def test_parallel_pagemap_1(self): """ Test for parallel WAL segments reading, during which pagemap is built """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') # Initialize instance and backup directory node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums'], pg_options={} ) @@ -704,7 +670,6 @@ def test_parallel_pagemap_1(self): # Clean after yourself node.cleanup() - self.del_test_dir(module_name, fname) # @unittest.skip("skip") def test_page_backup_with_lost_wal_segment(self): @@ -715,12 +680,11 @@ def test_page_backup_with_lost_wal_segment(self): run page backup, expecting error because of missing wal segment make sure that backup status is 'ERROR' """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -785,9 +749,6 @@ def test_page_backup_with_lost_wal_segment(self): self.show_pb(backup_dir, 'node')[2]['status'], 'Backup {0} should have STATUS "ERROR"') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_page_backup_with_corrupted_wal_segment(self): """ @@ -797,12 +758,11 @@ def test_page_backup_with_corrupted_wal_segment(self): run page backup, expecting error because of missing wal segment make sure that backup status is 'ERROR' """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -896,9 +856,6 @@ def test_page_backup_with_corrupted_wal_segment(self): self.show_pb(backup_dir, 'node')[2]['status'], 'Backup {0} should have STATUS "ERROR"') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_page_backup_with_alien_wal_segment(self): """ @@ -910,18 +867,17 @@ def test_page_backup_with_alien_wal_segment(self): expecting error because of alien wal segment make sure that backup status is 'ERROR' """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) alien_node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'alien_node'), + base_dir=os.path.join(self.module_name, self.fname, 'alien_node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1017,20 +973,16 @@ def test_page_backup_with_alien_wal_segment(self): self.show_pb(backup_dir, 'node')[2]['status'], 'Backup {0} should have STATUS "ERROR"') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_multithread_page_backup_with_toast(self): """ make node, create toast, do multithread PAGE backup """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1050,9 +1002,6 @@ def test_multithread_page_backup_with_toast(self): backup_dir, 'node', node, backup_type='page', options=["-j", "4"]) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_page_create_db(self): """ @@ -1060,10 +1009,9 @@ def test_page_create_db(self): restore database and check it presense """ self.maxDiff = None - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -1101,7 +1049,7 @@ def test_page_create_db(self): # RESTORE node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node( @@ -1162,9 +1110,6 @@ def test_page_create_db(self): repr(e.message), self.cmd) ) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_multi_timeline_page(self): @@ -1179,10 +1124,9 @@ def test_multi_timeline_page(self): P must have F as parent """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1251,7 +1195,7 @@ def test_multi_timeline_page(self): "postgres", "select * from pgbench_accounts") node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node(backup_dir, 'node', node_restored) @@ -1303,9 +1247,6 @@ def test_multi_timeline_page(self): backup_list[4]['id']) self.assertEqual(backup_list[5]['current-tli'], 7) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_multitimeline_page_1(self): @@ -1317,10 +1258,9 @@ def test_multitimeline_page_1(self): P must have F as parent """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={'wal_log_hints': 'on'}) @@ -1373,7 +1313,7 @@ def test_multitimeline_page_1(self): pgdata = self.pgdata_content(node.data_dir) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node(backup_dir, 'node', node_restored) @@ -1384,22 +1324,18 @@ def test_multitimeline_page_1(self): self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - @unittest.skip("skip") # @unittest.expectedFailure def test_page_pg_resetxlog(self): - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ 'shared_buffers': '512MB', 'max_wal_size': '3GB'}) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1486,6 +1422,3 @@ def test_page_pg_resetxlog(self): # # pgdata_restored = self.pgdata_content(node_restored.data_dir) # self.compare_pgdata(pgdata, pgdata_restored) - - # Clean after yourself - self.del_test_dir(module_name, fname) diff --git a/tests/pgpro2068.py b/tests/pgpro2068.py index 3baa0ba0b..434ce2800 100644 --- a/tests/pgpro2068.py +++ b/tests/pgpro2068.py @@ -9,9 +9,6 @@ from testgres import ProcessType -module_name = '2068' - - class BugTest(ProbackupTest, unittest.TestCase): def test_minrecpoint_on_replica(self): @@ -20,9 +17,8 @@ def test_minrecpoint_on_replica(self): """ self._check_gdb_flag_or_skip_test() - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -33,7 +29,7 @@ def test_minrecpoint_on_replica(self): 'bgwriter_lru_multiplier': '4.0', 'max_wal_size': '256MB'}) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -45,7 +41,7 @@ def test_minrecpoint_on_replica(self): # start replica replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.restore_node(backup_dir, 'node', replica, options=['-R']) @@ -190,6 +186,3 @@ def test_minrecpoint_on_replica(self): # do basebackup # do pg_probackup, expect error - - # Clean after yourself - self.del_test_dir(module_name, fname) diff --git a/tests/pgpro560.py b/tests/pgpro560.py index 7e10fef6a..eeab59960 100644 --- a/tests/pgpro560.py +++ b/tests/pgpro560.py @@ -6,9 +6,6 @@ from time import sleep -module_name = 'pgpro560' - - class CheckSystemID(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") @@ -20,13 +17,12 @@ def test_pgpro560_control_file_loss(self): make backup check that backup failed """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -52,7 +48,7 @@ def test_pgpro560_control_file_loss(self): # Clean after yourself # Return this file to avoid Postger fail os.rename(os.path.join(node.base_dir, 'data', 'global', 'pg_control_copy'), file) - self.del_test_dir(module_name, fname) + self.del_test_dir(self.module_name, self.fname) def test_pgpro560_systemid_mismatch(self): """ @@ -61,21 +57,20 @@ def test_pgpro560_systemid_mismatch(self): feed to backup PGDATA from node1 and PGPORT from node2 check that backup failed """ - fname = self.id().split('.')[3] node1 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node1'), + base_dir=os.path.join(self.module_name, self.fname, 'node1'), set_replication=True, initdb_params=['--data-checksums']) node1.slow_start() node2 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node2'), + base_dir=os.path.join(self.module_name, self.fname, 'node2'), set_replication=True, initdb_params=['--data-checksums']) node2.slow_start() - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node1', node1) @@ -128,6 +123,3 @@ def test_pgpro560_systemid_mismatch(self): e.message, '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - - # Clean after yourself - self.del_test_dir(module_name, fname) diff --git a/tests/pgpro589.py b/tests/pgpro589.py index d6381a8b5..8ce8e1f56 100644 --- a/tests/pgpro589.py +++ b/tests/pgpro589.py @@ -5,9 +5,6 @@ import subprocess -module_name = 'pgpro589' - - class ArchiveCheck(ProbackupTest, unittest.TestCase): def test_pgpro589(self): @@ -17,12 +14,11 @@ def test_pgpro589(self): check that backup status equal to ERROR check that no files where copied to backup catalogue """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -74,6 +70,3 @@ def test_pgpro589(self): "\n Start LSN was not found in archive but datafiles where " "copied to backup catalogue.\n For example: {0}\n " "It is not optimal".format(file)) - - # Clean after yourself - self.del_test_dir(module_name, fname) diff --git a/tests/ptrack.py b/tests/ptrack.py index a01405d6a..2b8d2d49e 100644 --- a/tests/ptrack.py +++ b/tests/ptrack.py @@ -10,14 +10,10 @@ from threading import Thread -module_name = 'ptrack' - - class PtrackTest(ProbackupTest, unittest.TestCase): def setUp(self): if self.pg_config_version < self.version_to_num('11.0'): return unittest.skip('You need PostgreSQL >= 11 for this test') - self.fname = self.id().split('.')[3] # @unittest.skip("skip") def test_drop_rel_during_backup_ptrack(self): @@ -26,9 +22,9 @@ def test_drop_rel_during_backup_ptrack(self): """ self._check_gdb_flag_or_skip_test() - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=self.ptrack, initdb_params=['--data-checksums']) @@ -85,18 +81,15 @@ def test_drop_rel_during_backup_ptrack(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") def test_ptrack_without_full(self): """ptrack backup without validated full backup""" node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums'], ptrack_enable=True) - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -125,18 +118,15 @@ def test_ptrack_without_full(self): self.show_pb(backup_dir, 'node')[0]['status'], "ERROR") - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") def test_ptrack_threads(self): """ptrack multi thread backup mode""" node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums'], ptrack_enable=True) - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -156,9 +146,6 @@ def test_ptrack_threads(self): backup_type="ptrack", options=["-j", "4"]) self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK") - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") def test_ptrack_stop_pg(self): """ @@ -166,9 +153,9 @@ def test_ptrack_stop_pg(self): restart node, check that ptrack backup can be taken """ - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) @@ -193,18 +180,15 @@ def test_ptrack_stop_pg(self): backup_dir, 'node', node, backup_type='ptrack', options=['--stream']) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") def test_ptrack_multi_timeline_backup(self): """ t2 /------P2 t1 ------F---*-----P1 """ - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) @@ -269,9 +253,6 @@ def test_ptrack_multi_timeline_backup(self): self.assertEqual('0', balance) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") def test_ptrack_multi_timeline_backup_1(self): """ @@ -282,9 +263,9 @@ def test_ptrack_multi_timeline_backup_1(self): t2 /------P2 t1 ---F--------* """ - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) @@ -343,17 +324,14 @@ def test_ptrack_multi_timeline_backup_1(self): self.assertEqual('0', balance) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") def test_ptrack_eat_my_data(self): """ PGPRO-4051 """ - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) @@ -372,7 +350,7 @@ def test_ptrack_eat_my_data(self): self.backup_node(backup_dir, 'node', node) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) pgbench = node.pgbench(options=['-T', '300', '-c', '1', '--no-vacuum']) @@ -422,16 +400,13 @@ def test_ptrack_eat_my_data(self): 'SELECT * FROM pgbench_accounts'), 'Data loss') - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") def test_ptrack_simple(self): """make node, make full and ptrack stream backups," " restore them and check data correctness""" - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) @@ -469,7 +444,7 @@ def test_ptrack_simple(self): result = node.safe_psql("postgres", "SELECT * FROM t_heap") node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node( @@ -491,15 +466,12 @@ def test_ptrack_simple(self): result, node_restored.safe_psql("postgres", "SELECT * FROM t_heap")) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") def test_ptrack_unprivileged(self): """""" - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) @@ -690,9 +662,9 @@ def test_ptrack_unprivileged(self): # @unittest.expectedFailure def test_ptrack_enable(self): """make ptrack without full backup, should result in error""" - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ 'checkpoint_timeout': '30s', @@ -728,9 +700,6 @@ def test_ptrack_enable(self): ' CMD: {1}'.format(repr(e.message), self.cmd) ) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_ptrack_disable(self): @@ -739,9 +708,9 @@ def test_ptrack_disable(self): enable ptrack, restart postgresql, take ptrack backup which should fail """ - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums'], @@ -793,15 +762,12 @@ def test_ptrack_disable(self): ) ) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") def test_ptrack_uncommitted_xact(self): """make ptrack backup while there is uncommitted open transaction""" - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums'], @@ -831,7 +797,7 @@ def test_ptrack_uncommitted_xact(self): pgdata = self.pgdata_content(node.data_dir) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node( @@ -851,18 +817,15 @@ def test_ptrack_uncommitted_xact(self): if self.paranoia: self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") def test_ptrack_vacuum_full(self): """make node, make full and ptrack stream backups, restore them and check data correctness""" self._check_gdb_flag_or_skip_test() - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) @@ -915,7 +878,7 @@ def test_ptrack_vacuum_full(self): process.join() node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() old_tablespace = self.get_tblspace_path(node, 'somedata') @@ -938,18 +901,15 @@ def test_ptrack_vacuum_full(self): node_restored.slow_start() - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") def test_ptrack_vacuum_truncate(self): """make node, create table, take full backup, delete last 3 pages, vacuum relation, take ptrack backup, take second ptrack backup, restore last ptrack backup and check data correctness""" - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) @@ -996,7 +956,7 @@ def test_ptrack_vacuum_truncate(self): pgdata = self.pgdata_content(node.data_dir) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() old_tablespace = self.get_tblspace_path(node, 'somedata') @@ -1021,9 +981,6 @@ def test_ptrack_vacuum_truncate(self): node_restored.slow_start() - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") def test_ptrack_get_block(self): """ @@ -1032,9 +989,9 @@ def test_ptrack_get_block(self): """ self._check_gdb_flag_or_skip_test() - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) @@ -1090,16 +1047,13 @@ def test_ptrack_get_block(self): result, node.safe_psql("postgres", "SELECT * FROM t_heap")) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") def test_ptrack_stream(self): """make node, make full and ptrack stream backups, restore them and check data correctness""" - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums'], @@ -1178,16 +1132,13 @@ def test_ptrack_stream(self): ptrack_result_new = node.safe_psql("postgres", "SELECT * FROM t_heap") self.assertEqual(ptrack_result, ptrack_result_new) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") def test_ptrack_archive(self): """make archive node, make full and ptrack backups, check data correctness in restored instance""" - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums'], @@ -1288,9 +1239,6 @@ def test_ptrack_archive(self): node.cleanup() - # Clean after yourself - self.del_test_dir(module_name, self.fname) - @unittest.skip("skip") def test_ptrack_pgpro417(self): """ @@ -1298,9 +1246,9 @@ def test_ptrack_pgpro417(self): delete ptrack backup. Try to take ptrack backup, which should fail. Actual only for PTRACK 1.x """ - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums'], @@ -1366,9 +1314,6 @@ def test_ptrack_pgpro417(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - @unittest.skip("skip") def test_page_pgpro417(self): """ @@ -1376,9 +1321,9 @@ def test_page_pgpro417(self): delete page backup. Try to take ptrack backup, which should fail. Actual only for PTRACK 1.x """ - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums'], @@ -1432,9 +1377,6 @@ def test_page_pgpro417(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - @unittest.skip("skip") def test_full_pgpro417(self): """ @@ -1442,9 +1384,9 @@ def test_full_pgpro417(self): Try to take ptrack backup, which should fail. Relevant only for PTRACK 1.x """ - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums'], @@ -1504,18 +1446,15 @@ def test_full_pgpro417(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") def test_create_db(self): """ Make node, take full backup, create database db1, take ptrack backup, restore database and check it presense """ - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums'], @@ -1558,7 +1497,7 @@ def test_create_db(self): # RESTORE node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node( @@ -1619,9 +1558,6 @@ def test_create_db(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") def test_create_db_on_replica(self): """ @@ -1630,9 +1566,9 @@ def test_create_db_on_replica(self): create database db1, take ptrack backup from replica, restore database and check it presense """ - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums'], @@ -1654,7 +1590,7 @@ def test_create_db_on_replica(self): "md5(i::text)::tsvector as tsvector from generate_series(0,100) i") replica = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.backup_node( @@ -1707,7 +1643,7 @@ def test_create_db_on_replica(self): # RESTORE node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node( @@ -1720,16 +1656,13 @@ def test_create_db_on_replica(self): node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") def test_alter_table_set_tablespace_ptrack(self): """Make node, create tablespace with table, take full backup, alter tablespace location, take ptrack backup, restore database.""" - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums'], @@ -1776,7 +1709,7 @@ def test_alter_table_set_tablespace_ptrack(self): # RESTORE node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node( @@ -1810,17 +1743,14 @@ def test_alter_table_set_tablespace_ptrack(self): # # self.assertEqual(result, result_new, 'lost some data after restore') - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") def test_alter_database_set_tablespace_ptrack(self): """Make node, create tablespace with database," " take full backup, alter tablespace location," " take ptrack backup, restore database.""" - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums'], @@ -1857,7 +1787,7 @@ def test_alter_database_set_tablespace_ptrack(self): # RESTORE node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node( backup_dir, 'node', @@ -1878,18 +1808,15 @@ def test_alter_database_set_tablespace_ptrack(self): node_restored.port = node.port node_restored.slow_start() - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") def test_drop_tablespace(self): """ Make node, create table, alter table tablespace, take ptrack backup, move table from tablespace, take ptrack backup """ - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums'], @@ -1972,18 +1899,15 @@ def test_drop_tablespace(self): if self.paranoia: self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") def test_ptrack_alter_tablespace(self): """ Make node, create table, alter table tablespace, take ptrack backup, move table from tablespace, take ptrack backup """ - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums'], @@ -2029,7 +1953,7 @@ def test_ptrack_alter_tablespace(self): # Restore ptrack backup restored_node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'restored_node')) + base_dir=os.path.join(self.module_name, self.fname, 'restored_node')) restored_node.cleanup() tblspc_path_new = self.get_tblspace_path( restored_node, 'somedata_restored') @@ -2087,18 +2011,15 @@ def test_ptrack_alter_tablespace(self): "postgres", "select * from t_heap") self.assertEqual(result, result_new) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") def test_ptrack_multiple_segments(self): """ Make node, create table, alter table tablespace, take ptrack backup, move table from tablespace, take ptrack backup """ - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums'], @@ -2163,7 +2084,7 @@ def test_ptrack_multiple_segments(self): # RESTORE NODE restored_node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'restored_node')) + base_dir=os.path.join(self.module_name, self.fname, 'restored_node')) restored_node.cleanup() tblspc_path = self.get_tblspace_path(node, 'somedata') tblspc_path_new = self.get_tblspace_path( @@ -2196,9 +2117,6 @@ def test_ptrack_multiple_segments(self): if self.paranoia: self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - @unittest.skip("skip") def test_atexit_fail(self): """ @@ -2206,14 +2124,14 @@ def test_atexit_fail(self): Relevant only for PTRACK 1.x """ node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums'], pg_options={ 'max_connections': '15'}) - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -2248,9 +2166,6 @@ def test_atexit_fail(self): "select * from pg_is_in_backup()").rstrip(), "f") - # Clean after yourself - self.del_test_dir(module_name, self.fname) - @unittest.skip("skip") # @unittest.expectedFailure def test_ptrack_clean(self): @@ -2259,12 +2174,12 @@ def test_ptrack_clean(self): Relevant only for PTRACK 1.x """ node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -2356,9 +2271,6 @@ def test_ptrack_clean(self): # check that ptrack bits are cleaned self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size']) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - @unittest.skip("skip") def test_ptrack_clean_replica(self): """ @@ -2367,14 +2279,14 @@ def test_ptrack_clean_replica(self): Relevant only for PTRACK 1.x """ master = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'master'), + base_dir=os.path.join(self.module_name, self.fname, 'master'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums'], pg_options={ 'archive_timeout': '30s'}) - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) master.slow_start() @@ -2382,7 +2294,7 @@ def test_ptrack_clean_replica(self): self.backup_node(backup_dir, 'master', master, options=['--stream']) replica = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.restore_node(backup_dir, 'master', replica) @@ -2494,18 +2406,16 @@ def test_ptrack_clean_replica(self): # check that ptrack bits are cleaned self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size']) - # Clean after yourself - self.del_test_dir(module_name, self.fname) # @unittest.skip("skip") # @unittest.expectedFailure def test_ptrack_cluster_on_btree(self): node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -2557,18 +2467,15 @@ def test_ptrack_cluster_on_btree(self): if node.major_version < 11: self.check_ptrack_map_sanity(node, idx_ptrack) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") def test_ptrack_cluster_on_gist(self): node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -2628,18 +2535,15 @@ def test_ptrack_cluster_on_gist(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") def test_ptrack_cluster_on_btree_replica(self): master = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'master'), + base_dir=os.path.join(self.module_name, self.fname, 'master'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) master.slow_start() @@ -2652,7 +2556,7 @@ def test_ptrack_cluster_on_btree_replica(self): self.backup_node(backup_dir, 'master', master, options=['--stream']) replica = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.restore_node(backup_dir, 'master', replica) @@ -2716,7 +2620,7 @@ def test_ptrack_cluster_on_btree_replica(self): pgdata = self.pgdata_content(replica.data_dir) node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node')) + base_dir=os.path.join(self.module_name, self.fname, 'node')) node.cleanup() self.restore_node(backup_dir, 'replica', node) @@ -2724,17 +2628,14 @@ def test_ptrack_cluster_on_btree_replica(self): pgdata_restored = self.pgdata_content(replica.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") def test_ptrack_cluster_on_gist_replica(self): master = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'master'), + base_dir=os.path.join(self.module_name, self.fname, 'master'), set_replication=True, ptrack_enable=True) - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) master.slow_start() @@ -2747,7 +2648,7 @@ def test_ptrack_cluster_on_gist_replica(self): self.backup_node(backup_dir, 'master', master, options=['--stream']) replica = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.restore_node(backup_dir, 'master', replica) @@ -2817,7 +2718,7 @@ def test_ptrack_cluster_on_gist_replica(self): pgdata = self.pgdata_content(replica.data_dir) node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node')) + base_dir=os.path.join(self.module_name, self.fname, 'node')) node.cleanup() self.restore_node(backup_dir, 'replica', node) @@ -2826,20 +2727,17 @@ def test_ptrack_cluster_on_gist_replica(self): pgdata_restored = self.pgdata_content(replica.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_ptrack_empty(self): """Take backups of every available types and check that PTRACK is clean""" node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -2877,7 +2775,7 @@ def test_ptrack_empty(self): node.safe_psql('postgres', 'checkpoint') node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() tblspace1 = self.get_tblspace_path(node, 'somedata') @@ -2902,9 +2800,6 @@ def test_ptrack_empty(self): pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_ptrack_empty_replica(self): @@ -2913,12 +2808,12 @@ def test_ptrack_empty_replica(self): and check that PTRACK on replica is clean """ master = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'master'), + base_dir=os.path.join(self.module_name, self.fname, 'master'), set_replication=True, initdb_params=['--data-checksums'], ptrack_enable=True) - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) master.slow_start() @@ -2931,7 +2826,7 @@ def test_ptrack_empty_replica(self): self.backup_node(backup_dir, 'master', master, options=['--stream']) replica = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.restore_node(backup_dir, 'master', replica) @@ -2987,7 +2882,7 @@ def test_ptrack_empty_replica(self): pgdata = self.pgdata_content(replica.data_dir) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node( @@ -2998,19 +2893,16 @@ def test_ptrack_empty_replica(self): pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_ptrack_truncate(self): node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -3079,13 +2971,10 @@ def test_ptrack_truncate(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") def test_basic_ptrack_truncate_replica(self): master = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'master'), + base_dir=os.path.join(self.module_name, self.fname, 'master'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums'], @@ -3094,7 +2983,7 @@ def test_basic_ptrack_truncate_replica(self): 'archive_timeout': '10s', 'checkpoint_timeout': '5min'}) - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) master.slow_start() @@ -3107,7 +2996,7 @@ def test_basic_ptrack_truncate_replica(self): self.backup_node(backup_dir, 'master', master, options=['--stream']) replica = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.restore_node(backup_dir, 'master', replica) @@ -3189,7 +3078,7 @@ def test_basic_ptrack_truncate_replica(self): pgdata = self.pgdata_content(replica.data_dir) node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node')) + base_dir=os.path.join(self.module_name, self.fname, 'node')) node.cleanup() self.restore_node(backup_dir, 'replica', node, data_dir=node.data_dir) @@ -3208,18 +3097,18 @@ def test_basic_ptrack_truncate_replica(self): 'select 1') # Clean after yourself - self.del_test_dir(module_name, self.fname) + self.del_test_dir(self.module_name, self.fname) # @unittest.skip("skip") # @unittest.expectedFailure def test_ptrack_vacuum(self): node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -3295,20 +3184,17 @@ def test_ptrack_vacuum(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored, comparision_exclusion) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") def test_ptrack_vacuum_replica(self): master = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'master'), + base_dir=os.path.join(self.module_name, self.fname, 'master'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums'], pg_options={ 'checkpoint_timeout': '30'}) - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) master.slow_start() @@ -3321,7 +3207,7 @@ def test_ptrack_vacuum_replica(self): self.backup_node(backup_dir, 'master', master, options=['--stream']) replica = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.restore_node(backup_dir, 'master', replica) @@ -3394,7 +3280,7 @@ def test_ptrack_vacuum_replica(self): pgdata = self.pgdata_content(replica.data_dir) node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node')) + base_dir=os.path.join(self.module_name, self.fname, 'node')) node.cleanup() self.restore_node(backup_dir, 'replica', node, data_dir=node.data_dir) @@ -3402,19 +3288,16 @@ def test_ptrack_vacuum_replica(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_ptrack_vacuum_bits_frozen(self): node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -3481,18 +3364,15 @@ def test_ptrack_vacuum_bits_frozen(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored, comparision_exclusion) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") def test_ptrack_vacuum_bits_frozen_replica(self): master = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'master'), + base_dir=os.path.join(self.module_name, self.fname, 'master'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) master.slow_start() @@ -3505,7 +3385,7 @@ def test_ptrack_vacuum_bits_frozen_replica(self): self.backup_node(backup_dir, 'master', master, options=['--stream']) replica = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.restore_node(backup_dir, 'master', replica) @@ -3579,19 +3459,16 @@ def test_ptrack_vacuum_bits_frozen_replica(self): pgdata_restored = self.pgdata_content(replica.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_ptrack_vacuum_bits_visibility(self): node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -3658,19 +3535,16 @@ def test_ptrack_vacuum_bits_visibility(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored, comparision_exclusion) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_ptrack_vacuum_full_2(self): node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, pg_options={ 'wal_log_hints': 'on' }) - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -3736,19 +3610,16 @@ def test_ptrack_vacuum_full_2(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_ptrack_vacuum_full_replica(self): master = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'master'), + base_dir=os.path.join(self.module_name, self.fname, 'master'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) master.slow_start() @@ -3760,7 +3631,7 @@ def test_ptrack_vacuum_full_replica(self): self.backup_node(backup_dir, 'master', master, options=['--stream']) replica = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.restore_node(backup_dir, 'master', replica) @@ -3837,19 +3708,16 @@ def test_ptrack_vacuum_full_replica(self): pgdata_restored = self.pgdata_content(replica.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_ptrack_vacuum_truncate_2(self): node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -3905,7 +3773,7 @@ def test_ptrack_vacuum_truncate_2(self): pgdata = self.pgdata_content(node.data_dir) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node(backup_dir, 'node', node_restored) @@ -3913,19 +3781,16 @@ def test_ptrack_vacuum_truncate_2(self): pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_ptrack_vacuum_truncate_replica(self): master = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'master'), + base_dir=os.path.join(self.module_name, self.fname, 'master'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) master.slow_start() @@ -3938,7 +3803,7 @@ def test_ptrack_vacuum_truncate_replica(self): self.backup_node(backup_dir, 'master', master, options=['--stream']) replica = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.restore_node(backup_dir, 'master', replica) @@ -4010,7 +3875,7 @@ def test_ptrack_vacuum_truncate_replica(self): pgdata = self.pgdata_content(replica.data_dir) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node(backup_dir, 'replica', node_restored) @@ -4018,9 +3883,6 @@ def test_ptrack_vacuum_truncate_replica(self): pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - @unittest.skip("skip") def test_ptrack_recovery(self): """ @@ -4028,12 +3890,12 @@ def test_ptrack_recovery(self): Actual only for PTRACK 1.x """ node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -4079,14 +3941,11 @@ def test_ptrack_recovery(self): # check that ptrack has correct bits after recovery self.check_ptrack_recovery(idx_ptrack[i]) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_ptrack_recovery_1(self): node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums'], @@ -4094,7 +3953,7 @@ def test_ptrack_recovery_1(self): 'shared_buffers': '512MB', 'max_wal_size': '3GB'}) - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -4155,7 +4014,7 @@ def test_ptrack_recovery_1(self): pgdata = self.pgdata_content(node.data_dir) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node( @@ -4164,19 +4023,16 @@ def test_ptrack_recovery_1(self): pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_ptrack_zero_changes(self): node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -4208,14 +4064,11 @@ def test_ptrack_zero_changes(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_ptrack_pg_resetxlog(self): node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums'], @@ -4223,7 +4076,7 @@ def test_ptrack_pg_resetxlog(self): 'shared_buffers': '512MB', 'max_wal_size': '3GB'}) - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -4331,19 +4184,16 @@ def test_ptrack_pg_resetxlog(self): # pgdata_restored = self.pgdata_content(node_restored.data_dir) # self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_corrupt_ptrack_map(self): node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -4495,9 +4345,6 @@ def test_corrupt_ptrack_map(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") def test_horizon_lsn_ptrack(self): """ @@ -4511,9 +4358,9 @@ def test_horizon_lsn_ptrack(self): self.version_to_num('2.4.15'), 'You need pg_probackup old_binary =< 2.4.15 for this test') - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) @@ -4560,6 +4407,3 @@ def test_horizon_lsn_ptrack(self): # make sure that backup size is exactly the same self.assertEqual(delta_bytes, ptrack_bytes) - - # Clean after yourself - self.del_test_dir(module_name, self.fname) diff --git a/tests/remote.py b/tests/remote.py index 4d46447f0..2d36d7346 100644 --- a/tests/remote.py +++ b/tests/remote.py @@ -5,21 +5,17 @@ from .helpers.cfs_helpers import find_by_name -module_name = 'remote' - - class RemoteTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") # @unittest.expectedFailure def test_remote_sanity(self): - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -45,6 +41,3 @@ def test_remote_sanity(self): # e.message, # "\n Unexpected Error Message: {0}\n CMD: {1}".format( # repr(e.message), self.cmd)) - - # Clean after yourself - self.del_test_dir(module_name, fname) diff --git a/tests/replica.py b/tests/replica.py index ea69e2d01..3fb68633f 100644 --- a/tests/replica.py +++ b/tests/replica.py @@ -9,8 +9,6 @@ from time import sleep -module_name = 'replica' - class ReplicaTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") @@ -21,15 +19,14 @@ def test_replica_switchover(self): over the course of several switchovers https://www.postgresql.org/message-id/54b059d4-2b48-13a4-6f43-95a087c92367%40postgrespro.ru """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node1 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node1'), + base_dir=os.path.join(self.module_name, self.fname, 'node1'), set_replication=True, initdb_params=['--data-checksums']) if self.get_version(node1) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) + self.del_test_dir(self.module_name, self.fname) return unittest.skip( 'Skipped because backup from replica is not supported in PG 9.5') @@ -41,7 +38,7 @@ def test_replica_switchover(self): # take full backup and restore it self.backup_node(backup_dir, 'node1', node1, options=['--stream']) node2 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node2')) + base_dir=os.path.join(self.module_name, self.fname, 'node2')) node2.cleanup() # create replica @@ -92,9 +89,6 @@ def test_replica_switchover(self): # https://github.com/postgrespro/pg_probackup/issues/251 self.validate_pb(backup_dir) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_replica_stream_ptrack_backup(self): @@ -109,10 +103,9 @@ def test_replica_stream_ptrack_backup(self): return unittest.skip( 'Skipped because backup from replica is not supported in PG 9.5') - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') master = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'master'), + base_dir=os.path.join(self.module_name, self.fname, 'master'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) @@ -138,7 +131,7 @@ def test_replica_stream_ptrack_backup(self): # take full backup and restore it self.backup_node(backup_dir, 'master', master, options=['--stream']) replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.restore_node(backup_dir, 'master', replica) self.set_replica(master, replica) @@ -172,7 +165,7 @@ def test_replica_stream_ptrack_backup(self): # RESTORE FULL BACKUP TAKEN FROM PREVIOUS STEP node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node')) + base_dir=os.path.join(self.module_name, self.fname, 'node')) node.cleanup() self.restore_node(backup_dir, 'replica', data_dir=node.data_dir) @@ -219,19 +212,15 @@ def test_replica_stream_ptrack_backup(self): after = node.safe_psql("postgres", "SELECT * FROM t_heap") self.assertEqual(before, after) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_replica_archive_page_backup(self): """ make archive master, take full and page archive backups from master, set replica, make archive backup from replica """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') master = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'master'), + base_dir=os.path.join(self.module_name, self.fname, 'master'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -240,7 +229,7 @@ def test_replica_archive_page_backup(self): 'max_wal_size': '32MB'}) if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) + self.del_test_dir(self.module_name, self.fname) return unittest.skip( 'Skipped because backup from replica is not supported in PG 9.5') @@ -250,7 +239,7 @@ def test_replica_archive_page_backup(self): master.slow_start() replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.backup_node(backup_dir, 'master', master) @@ -305,7 +294,7 @@ def test_replica_archive_page_backup(self): # RESTORE FULL BACKUP TAKEN FROM replica node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node')) + base_dir=os.path.join(self.module_name, self.fname, 'node')) node.cleanup() self.restore_node(backup_dir, 'replica', data_dir=node.data_dir) @@ -363,26 +352,22 @@ def test_replica_archive_page_backup(self): self.backup_node( backup_dir, 'node', node, options=['--stream']) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_basic_make_replica_via_restore(self): """ make archive master, take full and page archive backups from master, set replica, make archive backup from replica """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') master = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'master'), + base_dir=os.path.join(self.module_name, self.fname, 'master'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ 'archive_timeout': '10s'}) if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) + self.del_test_dir(self.module_name, self.fname) return unittest.skip( 'Skipped because backup from replica is not supported in PG 9.5') @@ -392,7 +377,7 @@ def test_basic_make_replica_via_restore(self): master.slow_start() replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.backup_node(backup_dir, 'master', master) @@ -421,9 +406,6 @@ def test_basic_make_replica_via_restore(self): backup_dir, 'replica', replica, options=['--archive-timeout=30s', '--stream']) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_take_backup_from_delayed_replica(self): """ @@ -431,16 +413,15 @@ def test_take_backup_from_delayed_replica(self): restore full backup as delayed replica, launch pgbench, take FULL, PAGE and DELTA backups from replica """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') master = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'master'), + base_dir=os.path.join(self.module_name, self.fname, 'master'), set_replication=True, initdb_params=['--data-checksums'], pg_options={'archive_timeout': '10s'}) if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) + self.del_test_dir(self.module_name, self.fname) return unittest.skip( 'Skipped because backup from replica is not supported in PG 9.5') @@ -450,7 +431,7 @@ def test_take_backup_from_delayed_replica(self): master.slow_start() replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.backup_node(backup_dir, 'master', master) @@ -530,9 +511,6 @@ def test_take_backup_from_delayed_replica(self): pgbench.wait() - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_replica_promote(self): """ @@ -541,10 +519,9 @@ def test_replica_promote(self): """ self._check_gdb_flag_or_skip_test() - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') master = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'master'), + base_dir=os.path.join(self.module_name, self.fname, 'master'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -553,7 +530,7 @@ def test_replica_promote(self): 'max_wal_size': '32MB'}) if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) + self.del_test_dir(self.module_name, self.fname) return unittest.skip( 'Skipped because backup from replica is not supported in PG 9.5') @@ -563,7 +540,7 @@ def test_replica_promote(self): master.slow_start() replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.backup_node(backup_dir, 'master', master) @@ -624,19 +601,15 @@ def test_replica_promote(self): 'setting its status to ERROR'.format(backup_id), log_content) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_replica_stop_lsn_null_offset(self): """ """ self._check_gdb_flag_or_skip_test() - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') master = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'master'), + base_dir=os.path.join(self.module_name, self.fname, 'master'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -644,7 +617,7 @@ def test_replica_stop_lsn_null_offset(self): 'wal_level': 'replica'}) if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) + self.del_test_dir(self.module_name, self.fname) return unittest.skip( 'Skipped because backup from replica is not supported in PG 9.5') @@ -661,7 +634,7 @@ def test_replica_stop_lsn_null_offset(self): # Create replica replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.restore_node(backup_dir, 'node', replica) @@ -710,7 +683,6 @@ def test_replica_stop_lsn_null_offset(self): # Clean after yourself gdb_checkpointer.kill() - self.del_test_dir(module_name, fname) # @unittest.skip("skip") def test_replica_stop_lsn_null_offset_next_record(self): @@ -718,10 +690,9 @@ def test_replica_stop_lsn_null_offset_next_record(self): """ self._check_gdb_flag_or_skip_test() - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') master = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'master'), + base_dir=os.path.join(self.module_name, self.fname, 'master'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -729,7 +700,7 @@ def test_replica_stop_lsn_null_offset_next_record(self): 'wal_level': 'replica'}) if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) + self.del_test_dir(self.module_name, self.fname) return unittest.skip( 'Skipped because backup from replica is not supported in PG 9.5') @@ -745,7 +716,7 @@ def test_replica_stop_lsn_null_offset_next_record(self): # Create replica replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.restore_node(backup_dir, 'master', replica) @@ -812,19 +783,15 @@ def test_replica_stop_lsn_null_offset_next_record(self): self.assertTrue(self.show_pb(backup_dir, 'replica')[0]['status'] == 'DONE') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_archive_replica_null_offset(self): """ """ self._check_gdb_flag_or_skip_test() - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') master = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'master'), + base_dir=os.path.join(self.module_name, self.fname, 'master'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -832,7 +799,7 @@ def test_archive_replica_null_offset(self): 'wal_level': 'replica'}) if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) + self.del_test_dir(self.module_name, self.fname) return unittest.skip( 'Skipped because backup from replica is not supported in PG 9.5') @@ -845,7 +812,7 @@ def test_archive_replica_null_offset(self): # Create replica replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.restore_node(backup_dir, 'node', replica) @@ -898,17 +865,13 @@ def test_archive_replica_null_offset(self): print(output) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_archive_replica_not_null_offset(self): """ """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') master = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'master'), + base_dir=os.path.join(self.module_name, self.fname, 'master'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -916,7 +879,7 @@ def test_archive_replica_not_null_offset(self): 'wal_level': 'replica'}) if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) + self.del_test_dir(self.module_name, self.fname) return unittest.skip( 'Skipped because backup from replica is not supported in PG 9.5') @@ -929,7 +892,7 @@ def test_archive_replica_not_null_offset(self): # Create replica replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.restore_node(backup_dir, 'node', replica) @@ -982,9 +945,6 @@ def test_archive_replica_not_null_offset(self): "\n Unexpected Error Message: {0}\n CMD: {1}".format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_replica_toast(self): """ @@ -993,10 +953,9 @@ def test_replica_toast(self): """ self._check_gdb_flag_or_skip_test() - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') master = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'master'), + base_dir=os.path.join(self.module_name, self.fname, 'master'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -1005,7 +964,7 @@ def test_replica_toast(self): 'shared_buffers': '128MB'}) if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) + self.del_test_dir(self.module_name, self.fname) return unittest.skip( 'Skipped because backup from replica is not supported in PG 9.5') @@ -1022,7 +981,7 @@ def test_replica_toast(self): # Create replica replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.restore_node(backup_dir, 'master', replica) @@ -1088,17 +1047,15 @@ def test_replica_toast(self): # Clean after yourself gdb_checkpointer.kill() - self.del_test_dir(module_name, fname) # @unittest.skip("skip") def test_start_stop_lsn_in_the_same_segno(self): """ """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') master = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'master'), + base_dir=os.path.join(self.module_name, self.fname, 'master'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -1107,7 +1064,7 @@ def test_start_stop_lsn_in_the_same_segno(self): 'shared_buffers': '128MB'}) if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) + self.del_test_dir(self.module_name, self.fname) return unittest.skip( 'Skipped because backup from replica is not supported in PG 9.5') @@ -1122,7 +1079,7 @@ def test_start_stop_lsn_in_the_same_segno(self): # Create replica replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.restore_node(backup_dir, 'master', replica) @@ -1167,17 +1124,13 @@ def test_start_stop_lsn_in_the_same_segno(self): '--stream'], return_id=False) - # Clean after yourself - self.del_test_dir(module_name, fname) - @unittest.skip("skip") def test_replica_promote_1(self): """ """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') master = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'master'), + base_dir=os.path.join(self.module_name, self.fname, 'master'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -1185,7 +1138,7 @@ def test_replica_promote_1(self): 'wal_level': 'replica'}) if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) + self.del_test_dir(self.module_name, self.fname) return unittest.skip( 'Skipped because backup from replica is not supported in PG 9.5') @@ -1199,7 +1152,7 @@ def test_replica_promote_1(self): # Create replica replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.restore_node(backup_dir, 'master', replica) @@ -1242,17 +1195,13 @@ def test_replica_promote_1(self): os.path.exists(wal_file_partial), "File {0} disappeared".format(wal_file_partial)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_replica_promote_2(self): """ """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') master = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'master'), + base_dir=os.path.join(self.module_name, self.fname, 'master'), set_replication=True, initdb_params=['--data-checksums']) @@ -1267,7 +1216,7 @@ def test_replica_promote_2(self): # Create replica replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.restore_node(backup_dir, 'master', replica) @@ -1291,9 +1240,6 @@ def test_replica_promote_2(self): backup_dir, 'master', replica, data_dir=replica.data_dir, backup_type='page') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_replica_promote_archive_delta(self): """ @@ -1301,10 +1247,9 @@ def test_replica_promote_archive_delta(self): t2 /-------> t1 --F---D1--D2-- """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node1 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node1'), + base_dir=os.path.join(self.module_name, self.fname, 'node1'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -1312,7 +1257,7 @@ def test_replica_promote_archive_delta(self): 'archive_timeout': '30s'}) if self.get_version(node1) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) + self.del_test_dir(self.module_name, self.fname) return unittest.skip( 'Skipped because backup from replica is not supported in PG 9.5') @@ -1328,7 +1273,7 @@ def test_replica_promote_archive_delta(self): # Create replica node2 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node2')) + base_dir=os.path.join(self.module_name, self.fname, 'node2')) node2.cleanup() self.restore_node(backup_dir, 'node', node2, node2.data_dir) @@ -1416,9 +1361,6 @@ def test_replica_promote_archive_delta(self): pgdata_restored = self.pgdata_content(node1.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_replica_promote_archive_page(self): """ @@ -1426,10 +1368,9 @@ def test_replica_promote_archive_page(self): t2 /-------> t1 --F---P1--P2-- """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node1 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node1'), + base_dir=os.path.join(self.module_name, self.fname, 'node1'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -1437,7 +1378,7 @@ def test_replica_promote_archive_page(self): 'archive_timeout': '30s'}) if self.get_version(node1) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) + self.del_test_dir(self.module_name, self.fname) return unittest.skip( 'Skipped because backup from replica is not supported in PG 9.5') @@ -1453,7 +1394,7 @@ def test_replica_promote_archive_page(self): # Create replica node2 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node2')) + base_dir=os.path.join(self.module_name, self.fname, 'node2')) node2.cleanup() self.restore_node(backup_dir, 'node', node2, node2.data_dir) @@ -1544,22 +1485,18 @@ def test_replica_promote_archive_page(self): pgdata_restored = self.pgdata_content(node1.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_parent_choosing(self): """ """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') master = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'master'), + base_dir=os.path.join(self.module_name, self.fname, 'master'), set_replication=True, initdb_params=['--data-checksums']) if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) + self.del_test_dir(self.module_name, self.fname) return unittest.skip( 'Skipped because backup from replica is not supported in PG 9.5') @@ -1572,7 +1509,7 @@ def test_parent_choosing(self): # Create replica replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.restore_node(backup_dir, 'master', replica) @@ -1614,17 +1551,13 @@ def test_parent_choosing(self): backup_dir, 'replica', replica, backup_type='delta', options=['--stream']) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_instance_from_the_past(self): """ """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1661,17 +1594,13 @@ def test_instance_from_the_past(self): "\n Unexpected Error Message: {0}\n CMD: {1}".format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_replica_via_basebackup(self): """ """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={'hot_standby': 'on'}) @@ -1721,7 +1650,7 @@ def test_replica_via_basebackup(self): node.slow_start() node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() pg_basebackup_path = self.get_bin_path('pg_basebackup') @@ -1735,9 +1664,6 @@ def test_replica_via_basebackup(self): self.set_auto_conf(node_restored, {'port': node_restored.port}) node_restored.slow_start(replica=True) - # Clean after yourself - self.del_test_dir(module_name, fname) - # TODO: # null offset STOP LSN and latest record in previous segment is conrecord (manual only) # archiving from promoted delayed replica diff --git a/tests/restore.py b/tests/restore.py index 49538bd1f..52db63c1c 100644 --- a/tests/restore.py +++ b/tests/restore.py @@ -13,21 +13,17 @@ from stat import S_ISDIR -module_name = 'restore' - - class RestoreTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") # @unittest.expectedFailure def test_restore_full_to_latest(self): """recovery to latest from full backup""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -67,18 +63,14 @@ def test_restore_full_to_latest(self): after = node.execute("postgres", "SELECT * FROM pgbench_branches") self.assertEqual(before, after) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_restore_full_page_to_latest(self): """recovery to latest from full + page backups""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -113,18 +105,14 @@ def test_restore_full_page_to_latest(self): after = node.execute("postgres", "SELECT * FROM pgbench_branches") self.assertEqual(before, after) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_restore_to_specific_timeline(self): """recovery to target timeline""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -179,19 +167,15 @@ def test_restore_to_specific_timeline(self): after = node.execute("postgres", "SELECT * FROM pgbench_branches") self.assertEqual(before, after) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_restore_to_time(self): """recovery to target time""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums'], pg_options={'TimeZone': 'GMT'}) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -229,18 +213,14 @@ def test_restore_to_time(self): after = node.execute("postgres", "SELECT * FROM pgbench_branches") self.assertEqual(before, after) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_restore_to_xid_inclusive(self): """recovery to target xid""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -289,18 +269,14 @@ def test_restore_to_xid_inclusive(self): self.assertEqual( len(node.execute("postgres", "SELECT * FROM tbl0005")), 1) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_restore_to_xid_not_inclusive(self): """recovery with target inclusive false""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -350,22 +326,18 @@ def test_restore_to_xid_not_inclusive(self): self.assertEqual( len(node.execute("postgres", "SELECT * FROM tbl0005")), 0) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_restore_to_lsn_inclusive(self): """recovery to target lsn""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) if self.get_version(node) < self.version_to_num('10.0'): - self.del_test_dir(module_name, fname) + self.del_test_dir(self.module_name, self.fname) return - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -421,22 +393,18 @@ def test_restore_to_lsn_inclusive(self): self.assertEqual( len(node.execute("postgres", "SELECT * FROM tbl0005")), 2) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_restore_to_lsn_not_inclusive(self): """recovery to target lsn""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) if self.get_version(node) < self.version_to_num('10.0'): - self.del_test_dir(module_name, fname) + self.del_test_dir(self.module_name, self.fname) return - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -493,22 +461,18 @@ def test_restore_to_lsn_not_inclusive(self): self.assertEqual( len(node.execute("postgres", "SELECT * FROM tbl0005")), 1) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_restore_full_ptrack_archive(self): """recovery to latest from archive full+ptrack backups""" if not self.ptrack: return unittest.skip('Skipped because ptrack support is disabled') - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums'], ptrack_enable=True) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -547,22 +511,18 @@ def test_restore_full_ptrack_archive(self): after = node.execute("postgres", "SELECT * FROM pgbench_branches") self.assertEqual(before, after) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_restore_ptrack(self): """recovery to latest from archive full+ptrack+ptrack backups""" if not self.ptrack: return unittest.skip('Skipped because ptrack support is disabled') - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums'], ptrack_enable=True) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -608,23 +568,19 @@ def test_restore_ptrack(self): after = node.execute("postgres", "SELECT * FROM pgbench_branches") self.assertEqual(before, after) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_restore_full_ptrack_stream(self): """recovery in stream mode to latest from full + ptrack backups""" if not self.ptrack: return unittest.skip('Skipped because ptrack support is disabled') - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -663,9 +619,6 @@ def test_restore_full_ptrack_stream(self): after = node.execute("postgres", "SELECT * FROM pgbench_branches") self.assertEqual(before, after) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_restore_full_ptrack_under_load(self): """ @@ -675,14 +628,13 @@ def test_restore_full_ptrack_under_load(self): if not self.ptrack: return unittest.skip('Skipped because ptrack support is disabled') - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -732,9 +684,6 @@ def test_restore_full_ptrack_under_load(self): "postgres", "SELECT sum(delta) FROM pgbench_history") self.assertEqual(bbalance, delta) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_restore_full_under_load_ptrack(self): """ @@ -744,14 +693,13 @@ def test_restore_full_under_load_ptrack(self): if not self.ptrack: return unittest.skip('Skipped because ptrack support is disabled') - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -803,18 +751,14 @@ def test_restore_full_under_load_ptrack(self): "postgres", "SELECT sum(delta) FROM pgbench_history") self.assertEqual(bbalance, delta) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_restore_with_tablespace_mapping_1(self): """recovery using tablespace-mapping option""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -940,18 +884,14 @@ def test_restore_with_tablespace_mapping_1(self): result = node.execute("postgres", "SELECT id FROM test OFFSET 1") self.assertEqual(result[0][0], 2) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_restore_with_tablespace_mapping_2(self): """recovery using tablespace-mapping option and page backup""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1017,18 +957,14 @@ def test_restore_with_tablespace_mapping_2(self): count = node.execute("postgres", "SELECT count(*) FROM tbl1") self.assertEqual(count[0][0], 4) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_restore_with_missing_or_corrupted_tablespace_map(self): """restore backup with missing or corrupted tablespace_map""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1051,7 +987,7 @@ def test_restore_with_missing_or_corrupted_tablespace_map(self): pgdata = self.pgdata_content(node.data_dir) node2 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node2')) + base_dir=os.path.join(self.module_name, self.fname, 'node2')) node2.cleanup() olddir = self.get_tblspace_path(node, 'tblspace') @@ -1147,22 +1083,18 @@ def test_restore_with_missing_or_corrupted_tablespace_map(self): pgdata_restored = self.pgdata_content(node2.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_archive_node_backup_stream_restore_to_recovery_time(self): """ make node with archiving, make stream backup, make PITR to Recovery Time """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1195,9 +1127,6 @@ def test_archive_node_backup_stream_restore_to_recovery_time(self): result = node.psql("postgres", 'select * from t_heap') self.assertTrue('does not exist' in result[2].decode("utf-8")) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_archive_node_backup_stream_restore_to_recovery_time(self): @@ -1205,13 +1134,12 @@ def test_archive_node_backup_stream_restore_to_recovery_time(self): make node with archiving, make stream backup, make PITR to Recovery Time """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1242,9 +1170,6 @@ def test_archive_node_backup_stream_restore_to_recovery_time(self): result = node.psql("postgres", 'select * from t_heap') self.assertTrue('does not exist' in result[2].decode("utf-8")) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_archive_node_backup_stream_pitr(self): @@ -1253,13 +1178,12 @@ def test_archive_node_backup_stream_pitr(self): create table t_heap, make pitr to Recovery Time, check that t_heap do not exists """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1290,9 +1214,6 @@ def test_archive_node_backup_stream_pitr(self): result = node.psql("postgres", 'select * from t_heap') self.assertEqual(True, 'does not exist' in result[2].decode("utf-8")) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_archive_node_backup_archive_pitr_2(self): @@ -1301,12 +1222,11 @@ def test_archive_node_backup_archive_pitr_2(self): create table t_heap, make pitr to Recovery Time, check that t_heap do not exists """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1320,7 +1240,7 @@ def test_archive_node_backup_archive_pitr_2(self): node.stop() node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() recovery_time = self.show_pb( @@ -1348,9 +1268,6 @@ def test_archive_node_backup_archive_pitr_2(self): result = node_restored.psql("postgres", 'select * from t_heap') self.assertTrue('does not exist' in result[2].decode("utf-8")) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_archive_restore_to_restore_point(self): @@ -1359,12 +1276,11 @@ def test_archive_restore_to_restore_point(self): create table t_heap, make pitr to Recovery Time, check that t_heap do not exists """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1401,18 +1317,14 @@ def test_archive_restore_to_restore_point(self): self.assertEqual(result, result_new) - # Clean after yourself - self.del_test_dir(module_name, fname) - @unittest.skip("skip") # @unittest.expectedFailure def test_zags_block_corrupt(self): - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1463,7 +1375,7 @@ def test_zags_block_corrupt(self): "insert into tbl select i from generate_series(0,100) as i") node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored'), + base_dir=os.path.join(self.module_name, self.fname, 'node_restored'), initdb_params=['--data-checksums']) node_restored.cleanup() @@ -1480,14 +1392,13 @@ def test_zags_block_corrupt(self): @unittest.skip("skip") # @unittest.expectedFailure def test_zags_block_corrupt_1(self): - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums'], pg_options={ 'full_page_writes': 'on'} ) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1538,7 +1449,7 @@ def test_zags_block_corrupt_1(self): self.switch_wal_segment(node) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored'), + base_dir=os.path.join(self.module_name, self.fname, 'node_restored'), initdb_params=['--data-checksums']) pgdata = self.pgdata_content(node.data_dir) @@ -1588,13 +1499,12 @@ def test_restore_chain(self): ERROR delta backups, take valid delta backup, restore must be successfull """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1670,19 +1580,15 @@ def test_restore_chain(self): self.restore_node(backup_dir, 'node', node) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_restore_chain_with_corrupted_backup(self): """more complex test_restore_chain()""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1850,9 +1756,6 @@ def test_restore_chain_with_corrupted_backup(self): node.cleanup() - # Clean after yourself - self.del_test_dir(module_name, fname) - # Skipped, because backups from the future are invalid. # This cause a "ERROR: Can't assign backup_id, there is already a backup in future" # now (PBCKP-259). We can conduct such a test again when we @@ -1860,13 +1763,12 @@ def test_restore_chain_with_corrupted_backup(self): @unittest.skip("skip") def test_restore_backup_from_future(self): """more complex test_restore_chain()""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1909,22 +1811,18 @@ def test_restore_backup_from_future(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_restore_target_immediate_stream(self): """ correct handling of immediate recovery target for STREAM backups """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -1966,22 +1864,18 @@ def test_restore_target_immediate_stream(self): os.path.isfile(recovery_conf), "File {0} do not exists".format(recovery_conf)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_restore_target_immediate_archive(self): """ correct handling of immediate recovery target for ARCHIVE backups """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -2026,22 +1920,18 @@ def test_restore_target_immediate_archive(self): with open(recovery_conf, 'r') as f: self.assertIn("recovery_target = 'immediate'", f.read()) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_restore_target_latest_archive(self): """ make sure that recovery_target 'latest' is default recovery target """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -2092,22 +1982,18 @@ def test_restore_target_latest_archive(self): self.assertEqual(content_1, content_2) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_restore_target_new_options(self): """ check that new --recovery-target-* options are working correctly """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -2272,9 +2158,6 @@ def test_restore_target_new_options(self): node.slow_start() - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_smart_restore(self): """ @@ -2284,13 +2167,12 @@ def test_smart_restore(self): copied during restore https://github.com/postgrespro/pg_probackup/issues/63 """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -2336,9 +2218,6 @@ def test_smart_restore(self): for file in filelist_diff: self.assertNotIn(file, logfile_content) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_pg_11_group_access(self): """ @@ -2347,15 +2226,14 @@ def test_pg_11_group_access(self): if self.pg_config_version < self.version_to_num('11.0'): return unittest.skip('You need PostgreSQL >= 11 for this test') - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=[ '--data-checksums', '--allow-group-access']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -2367,7 +2245,7 @@ def test_pg_11_group_access(self): # restore backup node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node( @@ -2377,18 +2255,14 @@ def test_pg_11_group_access(self): pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_restore_concurrent_drop_table(self): """""" self._check_gdb_flag_or_skip_test() - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2434,16 +2308,12 @@ def test_restore_concurrent_drop_table(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_lost_non_data_file(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2481,15 +2351,11 @@ def test_lost_non_data_file(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_partial_restore_exclude(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) self.init_pb(backup_dir) @@ -2520,7 +2386,7 @@ def test_partial_restore_exclude(self): # restore FULL backup node_restored_1 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored_1')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored_1')) node_restored_1.cleanup() try: @@ -2557,7 +2423,7 @@ def test_partial_restore_exclude(self): pgdata_restored_1 = self.pgdata_content(node_restored_1.data_dir) node_restored_2 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored_2')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored_2')) node_restored_2.cleanup() self.restore_node( @@ -2596,15 +2462,11 @@ def test_partial_restore_exclude(self): self.assertNotIn('PANIC', output) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_partial_restore_exclude_tablespace(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) self.init_pb(backup_dir) @@ -2651,7 +2513,7 @@ def test_partial_restore_exclude_tablespace(self): # restore FULL backup node_restored_1 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored_1')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored_1')) node_restored_1.cleanup() node1_tablespace = self.get_tblspace_path(node_restored_1, 'somedata') @@ -2677,7 +2539,7 @@ def test_partial_restore_exclude_tablespace(self): pgdata_restored_1 = self.pgdata_content(node_restored_1.data_dir) node_restored_2 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored_2')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored_2')) node_restored_2.cleanup() node2_tablespace = self.get_tblspace_path(node_restored_2, 'somedata') @@ -2719,16 +2581,12 @@ def test_partial_restore_exclude_tablespace(self): self.assertNotIn('PANIC', output) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_partial_restore_include(self): """ """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) self.init_pb(backup_dir) @@ -2759,7 +2617,7 @@ def test_partial_restore_include(self): # restore FULL backup node_restored_1 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored_1')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored_1')) node_restored_1.cleanup() try: @@ -2798,7 +2656,7 @@ def test_partial_restore_include(self): pgdata_restored_1 = self.pgdata_content(node_restored_1.data_dir) node_restored_2 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored_2')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored_2')) node_restored_2.cleanup() self.restore_node( @@ -2845,9 +2703,6 @@ def test_partial_restore_include(self): self.assertNotIn('PANIC', output) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_partial_restore_backward_compatibility_1(self): """ old binary should be of version < 2.2.0 @@ -2855,10 +2710,9 @@ def test_partial_restore_backward_compatibility_1(self): if not self.probackup_old_path: self.skipTest("You must specify PGPROBACKUPBIN_OLD" " for run this test") - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2881,7 +2735,7 @@ def test_partial_restore_backward_compatibility_1(self): pgdata = self.pgdata_content(node.data_dir) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() try: @@ -2941,7 +2795,7 @@ def test_partial_restore_backward_compatibility_1(self): # get new node node_restored_1 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored_1')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored_1')) node_restored_1.cleanup() self.restore_node( @@ -2961,10 +2815,9 @@ def test_partial_restore_backward_compatibility_merge(self): if not self.probackup_old_path: self.skipTest("You must specify PGPROBACKUPBIN_OLD" " for run this test") - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2987,7 +2840,7 @@ def test_partial_restore_backward_compatibility_merge(self): pgdata = self.pgdata_content(node.data_dir) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() try: @@ -3047,7 +2900,7 @@ def test_partial_restore_backward_compatibility_merge(self): # get new node node_restored_1 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored_1')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored_1')) node_restored_1.cleanup() # merge @@ -3065,10 +2918,9 @@ def test_partial_restore_backward_compatibility_merge(self): def test_empty_and_mangled_database_map(self): """ """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -3096,7 +2948,7 @@ def test_empty_and_mangled_database_map(self): f.close() node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() try: @@ -3178,10 +3030,9 @@ def test_empty_and_mangled_database_map(self): def test_missing_database_map(self): """ """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=self.ptrack, initdb_params=['--data-checksums']) @@ -3375,7 +3226,7 @@ def test_missing_database_map(self): pgdata = self.pgdata_content(node.data_dir) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() # backup has missing database_map and that is legal @@ -3419,9 +3270,6 @@ def test_missing_database_map(self): pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_stream_restore_command_option(self): """ @@ -3436,14 +3284,13 @@ def test_stream_restore_command_option(self): as replica, check that PostgreSQL recovery uses restore_command to obtain WAL from archive. """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={'max_wal_size': '32MB'}) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -3500,20 +3347,16 @@ def test_stream_restore_command_option(self): self.assertEqual('2', timeline_id) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_restore_primary_conninfo(self): """ """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -3526,7 +3369,7 @@ def test_restore_primary_conninfo(self): #primary_conninfo = 'host=192.168.1.50 port=5432 user=foo password=foopass' replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() str_conninfo='host=192.168.1.50 port=5432 user=foo password=foopass' @@ -3553,20 +3396,16 @@ def test_restore_primary_conninfo(self): self.assertIn(str_conninfo, recovery_conf_content) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_restore_primary_slot_info(self): """ """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -3577,7 +3416,7 @@ def test_restore_primary_slot_info(self): node.pgbench_init(scale=1) replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() node.safe_psql( @@ -3598,17 +3437,13 @@ def test_restore_primary_slot_info(self): replica.slow_start(replica=True) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_issue_249(self): """ https://github.com/postgrespro/pg_probackup/issues/249 """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) self.init_pb(backup_dir) @@ -3643,7 +3478,7 @@ def test_issue_249(self): # restore FULL backup node_restored_1 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored_1')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored_1')) node_restored_1.cleanup() self.restore_node( @@ -3667,9 +3502,6 @@ def test_issue_249(self): except QueryException as e: self.assertIn('FATAL', e.message) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_pg_12_probackup_recovery_conf_compatibility(self): """ https://github.com/postgrespro/pg_probackup/issues/249 @@ -3685,10 +3517,9 @@ def test_pg_12_probackup_recovery_conf_compatibility(self): if self.version_to_num(self.old_probackup_version) >= self.version_to_num('2.4.5'): self.assertTrue(False, 'You need pg_probackup < 2.4.5 for this test') - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) self.init_pb(backup_dir) @@ -3741,9 +3572,6 @@ def test_pg_12_probackup_recovery_conf_compatibility(self): node.slow_start() - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_drop_postgresql_auto_conf(self): """ https://github.com/postgrespro/pg_probackup/issues/249 @@ -3754,10 +3582,9 @@ def test_drop_postgresql_auto_conf(self): if self.pg_config_version < self.version_to_num('12.0'): return unittest.skip('You need PostgreSQL >= 12 for this test') - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) self.init_pb(backup_dir) @@ -3786,9 +3613,6 @@ def test_drop_postgresql_auto_conf(self): self.assertTrue(os.path.exists(auto_path)) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_truncate_postgresql_auto_conf(self): """ https://github.com/postgrespro/pg_probackup/issues/249 @@ -3799,10 +3623,9 @@ def test_truncate_postgresql_auto_conf(self): if self.pg_config_version < self.version_to_num('12.0'): return unittest.skip('You need PostgreSQL >= 12 for this test') - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) self.init_pb(backup_dir) @@ -3831,18 +3654,14 @@ def test_truncate_postgresql_auto_conf(self): self.assertTrue(os.path.exists(auto_path)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_concurrent_restore(self): """""" self._check_gdb_flag_or_skip_test() - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -3869,7 +3688,7 @@ def test_concurrent_restore(self): pgdata1 = self.pgdata_content(node.data_dir) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node.cleanup() node_restored.cleanup() @@ -3892,19 +3711,15 @@ def test_concurrent_restore(self): self.compare_pgdata(pgdata1, pgdata2) self.compare_pgdata(pgdata2, pgdata3) - # Clean after yourself - self.del_test_dir(module_name, fname) - # skip this test until https://github.com/postgrespro/pg_probackup/pull/399 @unittest.skip("skip") def test_restore_issue_313(self): """ Check that partially restored PostgreSQL instance cannot be started """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -3925,7 +3740,7 @@ def test_restore_issue_313(self): count += 1 node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node(backup_dir, 'node', node_restored) @@ -3956,18 +3771,14 @@ def test_restore_issue_313(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_restore_with_waldir(self): """recovery using tablespace-mapping option and page backup""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -4011,6 +3822,3 @@ def test_restore_with_waldir(self): wal_path=os.path.join(node.data_dir, "pg_xlog") self.assertEqual(os.path.islink(wal_path), True) - - # Clean after yourself - self.del_test_dir(module_name, fname) diff --git a/tests/retention.py b/tests/retention.py index 122ab28ad..5043366f4 100644 --- a/tests/retention.py +++ b/tests/retention.py @@ -6,21 +6,17 @@ from distutils.dir_util import copy_tree -module_name = 'retention' - - class RetentionTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") # @unittest.expectedFailure def test_retention_redundancy_1(self): """purge backups using redundancy-based retention policy""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -71,18 +67,14 @@ def test_retention_redundancy_1(self): self.assertTrue(wal_name >= min_wal) self.assertTrue(wal_name <= max_wal) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_retention_window_2(self): """purge backups using window-based retention policy""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -124,18 +116,14 @@ def test_retention_window_2(self): self.delete_expired(backup_dir, 'node', options=['--expired']) self.assertEqual(len(self.show_pb(backup_dir, 'node')), 2) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_retention_window_3(self): """purge all backups using window-based retention policy""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -171,18 +159,14 @@ def test_retention_window_3(self): # count wal files in ARCHIVE - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_retention_window_4(self): """purge all backups using window-based retention policy""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -232,18 +216,14 @@ def test_retention_window_4(self): n_wals = len(os.listdir(wals_dir)) self.assertTrue(n_wals == 0) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_window_expire_interleaved_incremental_chains(self): """complicated case of interleaved backup chains""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -356,18 +336,14 @@ def test_window_expire_interleaved_incremental_chains(self): print(self.show_pb( backup_dir, 'node', as_json=False, as_text=True)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_redundancy_expire_interleaved_incremental_chains(self): """complicated case of interleaved backup chains""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -466,18 +442,14 @@ def test_redundancy_expire_interleaved_incremental_chains(self): print(self.show_pb( backup_dir, 'node', as_json=False, as_text=True)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_window_merge_interleaved_incremental_chains(self): """complicated case of interleaved backup chains""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -603,9 +575,6 @@ def test_window_merge_interleaved_incremental_chains(self): self.assertEqual(len(self.show_pb(backup_dir, 'node')), 2) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_window_merge_interleaved_incremental_chains_1(self): """ @@ -616,12 +585,11 @@ def test_window_merge_interleaved_incremental_chains_1(self): FULLb FULLa """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -745,9 +713,6 @@ def test_window_merge_interleaved_incremental_chains_1(self): pgdata_restored_b3 = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata_b3, pgdata_restored_b3) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_basic_window_merge_multiple_descendants(self): """ @@ -761,12 +726,11 @@ def test_basic_window_merge_multiple_descendants(self): FULLb | FULLa """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1005,9 +969,6 @@ def test_basic_window_merge_multiple_descendants(self): self.show_pb(backup_dir, 'node')[0]['backup-mode'], 'FULL') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_basic_window_merge_multiple_descendants_1(self): """ @@ -1021,12 +982,11 @@ def test_basic_window_merge_multiple_descendants_1(self): FULLb | FULLa """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1271,9 +1231,6 @@ def test_basic_window_merge_multiple_descendants_1(self): '--retention-window=1', '--delete-expired', '--log-level-console=log']) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_window_chains(self): """ @@ -1286,12 +1243,11 @@ def test_window_chains(self): PAGE FULL """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1357,9 +1313,6 @@ def test_window_chains(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_window_chains_1(self): """ @@ -1372,12 +1325,11 @@ def test_window_chains_1(self): PAGE FULL """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1451,9 +1403,6 @@ def test_window_chains_1(self): "Purging finished", output) - # Clean after yourself - self.del_test_dir(module_name, fname) - @unittest.skip("skip") def test_window_error_backups(self): """ @@ -1466,12 +1415,11 @@ def test_window_error_backups(self): FULL -------redundancy """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1488,9 +1436,6 @@ def test_window_error_backups(self): # Change FULLb backup status to ERROR # self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_window_error_backups_1(self): """ @@ -1501,12 +1446,11 @@ def test_window_error_backups_1(self): """ self._check_gdb_flag_or_skip_test() - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1538,9 +1482,6 @@ def test_window_error_backups_1(self): self.assertEqual(len(self.show_pb(backup_dir, 'node')), 4) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_window_error_backups_2(self): """ @@ -1551,12 +1492,11 @@ def test_window_error_backups_2(self): """ self._check_gdb_flag_or_skip_test() - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1589,23 +1529,19 @@ def test_window_error_backups_2(self): self.assertEqual(len(self.show_pb(backup_dir, 'node')), 3) - # Clean after yourself - # self.del_test_dir(module_name, fname) - def test_retention_redundancy_overlapping_chains(self): """""" self._check_gdb_flag_or_skip_test() - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) if self.get_version(node) < 90600: - self.del_test_dir(module_name, fname) + self.del_test_dir(self.module_name, self.fname) return unittest.skip('Skipped because ptrack support is disabled') - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1639,23 +1575,19 @@ def test_retention_redundancy_overlapping_chains(self): self.validate_pb(backup_dir, 'node') - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_retention_redundancy_overlapping_chains_1(self): """""" self._check_gdb_flag_or_skip_test() - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) if self.get_version(node) < 90600: - self.del_test_dir(module_name, fname) + self.del_test_dir(self.module_name, self.fname) return unittest.skip('Skipped because ptrack support is disabled') - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1689,19 +1621,15 @@ def test_retention_redundancy_overlapping_chains_1(self): self.validate_pb(backup_dir, 'node') - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_wal_purge_victim(self): """ https://github.com/postgrespro/pg_probackup/issues/103 """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1746,9 +1674,6 @@ def test_wal_purge_victim(self): "WARNING: Backup {0} has missing parent 0".format(page_id), e.message) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_failed_merge_redundancy_retention(self): """ @@ -1756,11 +1681,10 @@ def test_failed_merge_redundancy_retention(self): """ self._check_gdb_flag_or_skip_test() - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( base_dir=os.path.join( - module_name, fname, 'node'), + self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1844,9 +1768,6 @@ def test_failed_merge_redundancy_retention(self): self.assertEqual(len(self.show_pb(backup_dir, 'node')), 10) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_wal_depth_1(self): """ |-------------B5----------> WAL timeline3 @@ -1855,10 +1776,9 @@ def test_wal_depth_1(self): wal-depth=2 """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -1901,7 +1821,7 @@ def test_wal_depth_1(self): # Timeline 2 node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() @@ -1960,8 +1880,6 @@ def test_wal_depth_1(self): self.validate_pb(backup_dir, 'node') - self.del_test_dir(module_name, fname) - def test_wal_purge(self): """ -------------------------------------> tli5 @@ -1982,10 +1900,9 @@ def test_wal_purge(self): wal-depth=2 """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2022,7 +1939,7 @@ def test_wal_purge(self): # TLI 2 node_tli2 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_tli2')) + base_dir=os.path.join(self.module_name, self.fname, 'node_tli2')) node_tli2.cleanup() output = self.restore_node( @@ -2056,7 +1973,7 @@ def test_wal_purge(self): # TLI3 node_tli3 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_tli3')) + base_dir=os.path.join(self.module_name, self.fname, 'node_tli3')) node_tli3.cleanup() # Note, that successful validation here is a happy coincidence @@ -2077,7 +1994,7 @@ def test_wal_purge(self): # TLI4 node_tli4 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_tli4')) + base_dir=os.path.join(self.module_name, self.fname, 'node_tli4')) node_tli4.cleanup() self.restore_node( @@ -2099,7 +2016,7 @@ def test_wal_purge(self): # TLI5 node_tli5 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_tli5')) + base_dir=os.path.join(self.module_name, self.fname, 'node_tli5')) node_tli5.cleanup() self.restore_node( @@ -2182,8 +2099,6 @@ def test_wal_purge(self): self.validate_pb(backup_dir, 'node') - self.del_test_dir(module_name, fname) - def test_wal_depth_2(self): """ -------------------------------------> tli5 @@ -2205,10 +2120,9 @@ def test_wal_depth_2(self): wal-depth=2 """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2243,7 +2157,7 @@ def test_wal_depth_2(self): # TLI 2 node_tli2 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_tli2')) + base_dir=os.path.join(self.module_name, self.fname, 'node_tli2')) node_tli2.cleanup() output = self.restore_node( @@ -2277,7 +2191,7 @@ def test_wal_depth_2(self): # TLI3 node_tli3 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_tli3')) + base_dir=os.path.join(self.module_name, self.fname, 'node_tli3')) node_tli3.cleanup() # Note, that successful validation here is a happy coincidence @@ -2298,7 +2212,7 @@ def test_wal_depth_2(self): # TLI4 node_tli4 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_tli4')) + base_dir=os.path.join(self.module_name, self.fname, 'node_tli4')) node_tli4.cleanup() self.restore_node( @@ -2320,7 +2234,7 @@ def test_wal_depth_2(self): # TLI5 node_tli5 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_tli5')) + base_dir=os.path.join(self.module_name, self.fname, 'node_tli5')) node_tli5.cleanup() self.restore_node( @@ -2439,8 +2353,6 @@ def test_wal_depth_2(self): self.validate_pb(backup_dir, 'node') - self.del_test_dir(module_name, fname) - def test_basic_wal_depth(self): """ B1---B1----B3-----B4----B5------> tli1 @@ -2450,10 +2362,9 @@ def test_basic_wal_depth(self): wal-depth=1 """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2542,18 +2453,15 @@ def test_basic_wal_depth(self): self.validate_pb(backup_dir, 'node') - self.del_test_dir(module_name, fname) - def test_concurrent_running_full_backup(self): """ https://github.com/postgrespro/pg_probackup/issues/328 """ self._check_gdb_flag_or_skip_test() - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2621,5 +2529,3 @@ def test_concurrent_running_full_backup(self): self.assertEqual( len(self.show_pb(backup_dir, 'node')), 6) - - self.del_test_dir(module_name, fname) diff --git a/tests/set_backup.py b/tests/set_backup.py index 02ce007bf..e789d174a 100644 --- a/tests/set_backup.py +++ b/tests/set_backup.py @@ -5,8 +5,6 @@ from sys import exit from datetime import datetime, timedelta -module_name = 'set_backup' - class SetBackupTest(ProbackupTest, unittest.TestCase): @@ -14,10 +12,9 @@ class SetBackupTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") def test_set_backup_sanity(self): """general sanity for set-backup command""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -120,19 +117,15 @@ def test_set_backup_sanity(self): # parse string to datetime object #new_expire_time = datetime.strptime(new_expire_time, '%Y-%m-%d %H:%M:%S%z') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_retention_redundancy_pinning(self): """""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -174,18 +167,14 @@ def test_retention_redundancy_pinning(self): '{1} is guarded by retention'.format(full_id, page_id), log) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_retention_window_pinning(self): """purge all backups using window-based retention policy""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -237,9 +226,6 @@ def test_retention_window_pinning(self): '{1} is guarded by retention'.format(backup_id_1, page1), out) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_wal_retention_and_pinning(self): """ @@ -251,13 +237,12 @@ def test_wal_retention_and_pinning(self): B1 B2---P---B3---> """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -317,9 +302,6 @@ def test_wal_retention_and_pinning(self): '000000010000000000000004') self.assertEqual(timeline['status'], 'OK') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_wal_retention_and_pinning_1(self): """ @@ -331,12 +313,11 @@ def test_wal_retention_and_pinning_1(self): P---B1---> """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -383,19 +364,15 @@ def test_wal_retention_and_pinning_1(self): self.validate_pb(backup_dir) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_add_note_newlines(self): """""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -418,19 +395,15 @@ def test_add_note_newlines(self): backup_meta = self.show_pb(backup_dir, 'node', backup_id) self.assertNotIn('note', backup_meta) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_add_big_note(self): """""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -472,19 +445,16 @@ def test_add_big_note(self): backup_meta = self.show_pb(backup_dir, 'node', backup_id) self.assertEqual(backup_meta['note'], note) - # Clean after yourself - self.del_test_dir(module_name, fname) # @unittest.skip("skip") def test_add_big_note_1(self): """""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -504,6 +474,3 @@ def test_add_big_note_1(self): print(backup_meta) self.assertEqual(backup_meta['note'], note) - - # Clean after yourself - self.del_test_dir(module_name, fname) diff --git a/tests/show.py b/tests/show.py index 5a46e5ef7..c4b96499d 100644 --- a/tests/show.py +++ b/tests/show.py @@ -3,19 +3,15 @@ from .helpers.ptrack_helpers import ProbackupTest, ProbackupException -module_name = 'show' - - class ShowTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") # @unittest.expectedFailure def test_show_1(self): """Status DONE and OK""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) self.init_pb(backup_dir) @@ -31,17 +27,13 @@ def test_show_1(self): ) self.assertIn("OK", self.show_pb(backup_dir, 'node', as_text=True)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_show_json(self): """Status DONE and OK""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) self.init_pb(backup_dir) @@ -58,16 +50,12 @@ def test_show_json(self): self.backup_node(backup_dir, 'node', node) self.assertIn("OK", self.show_pb(backup_dir, 'node', as_text=True)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_corrupt_2(self): """Status CORRUPT""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) self.init_pb(backup_dir) @@ -102,16 +90,12 @@ def test_corrupt_2(self): ) self.assertIn("CORRUPT", self.show_pb(backup_dir, as_text=True)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_no_control_file(self): """backup.control doesn't exist""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) self.init_pb(backup_dir) @@ -137,16 +121,12 @@ def test_no_control_file(self): 'doesn\'t exist', output) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_empty_control_file(self): """backup.control is empty""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) self.init_pb(backup_dir) @@ -173,17 +153,13 @@ def test_empty_control_file(self): 'is empty', output) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_corrupt_control_file(self): """backup.control contains invalid option""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) self.init_pb(backup_dir) @@ -205,9 +181,6 @@ def test_corrupt_control_file(self): 'WARNING: Invalid option "statuss" in file', self.show_pb(backup_dir, 'node', as_json=False, as_text=True)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_corrupt_correctness(self): @@ -215,10 +188,9 @@ def test_corrupt_correctness(self): if not self.remote: self.skipTest("You must enable PGPROBACKUP_SSH_REMOTE" " for run this test") - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) self.init_pb(backup_dir) @@ -299,9 +271,6 @@ def test_corrupt_correctness(self): output_local['uncompressed-bytes'], output_remote['uncompressed-bytes']) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_corrupt_correctness_1(self): @@ -309,10 +278,9 @@ def test_corrupt_correctness_1(self): if not self.remote: self.skipTest("You must enable PGPROBACKUP_SSH_REMOTE" " for run this test") - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) self.init_pb(backup_dir) @@ -397,9 +365,6 @@ def test_corrupt_correctness_1(self): output_local['uncompressed-bytes'], output_remote['uncompressed-bytes']) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_corrupt_correctness_2(self): @@ -407,10 +372,9 @@ def test_corrupt_correctness_2(self): if not self.remote: self.skipTest("You must enable PGPROBACKUP_SSH_REMOTE" " for run this test") - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) self.init_pb(backup_dir) @@ -512,17 +476,13 @@ def test_corrupt_correctness_2(self): output_local['uncompressed-bytes'], output_remote['uncompressed-bytes']) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_color_with_no_terminal(self): """backup.control contains invalid option""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums'], pg_options={'autovacuum': 'off'}) @@ -547,6 +507,3 @@ def test_color_with_no_terminal(self): '[0m', e.message, '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - - # Clean after yourself - self.del_test_dir(module_name, fname) diff --git a/tests/time_consuming.py b/tests/time_consuming.py index c778b9bc3..8270298cc 100644 --- a/tests/time_consuming.py +++ b/tests/time_consuming.py @@ -4,7 +4,6 @@ import subprocess from time import sleep -module_name = 'time_consuming' class TimeConsumingTests(ProbackupTest, unittest.TestCase): def test_pbckp150(self): @@ -20,9 +19,8 @@ def test_pbckp150(self): if not self.ptrack: return unittest.skip('Skipped because ptrack support is disabled') - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=self.ptrack, initdb_params=['--data-checksums'], @@ -39,7 +37,7 @@ def test_pbckp150(self): self.set_auto_conf(node, {'wal_keep_segments': '1000'}) # init probackup and add an instance - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -77,6 +75,3 @@ def test_pbckp150(self): backups = self.show_pb(backup_dir, 'node') for b in backups: self.assertEqual("OK", b['status']) - - # Clean after yourself - self.del_test_dir(module_name, fname) diff --git a/tests/time_stamp.py b/tests/time_stamp.py index c49d183da..170c62cd4 100644 --- a/tests/time_stamp.py +++ b/tests/time_stamp.py @@ -5,22 +5,19 @@ from time import sleep -module_name = 'time_stamp' - class TimeStamp(ProbackupTest, unittest.TestCase): def test_start_time_format(self): """Test backup ID changing after start-time editing in backup.control. We should convert local time in UTC format""" # Create simple node - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir="{0}/{1}/node".format(module_name, fname), + base_dir="{0}/{1}/node".format(self.module_name, self.fname), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -58,19 +55,16 @@ def test_start_time_format(self): self.assertNotIn("backup ID in control file", output) node.stop() - # Clean after yourself - self.del_test_dir(module_name, fname) def test_server_date_style(self): """Issue #112""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir="{0}/{1}/node".format(module_name, fname), + base_dir="{0}/{1}/node".format(self.module_name, self.fname), set_replication=True, initdb_params=['--data-checksums'], pg_options={"datestyle": "GERMAN, DMY"}) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.start() @@ -78,18 +72,14 @@ def test_server_date_style(self): self.backup_node( backup_dir, 'node', node, options=['--stream', '-j 2']) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_handling_of_TZ_env_variable(self): """Issue #284""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir="{0}/{1}/node".format(module_name, fname), + base_dir="{0}/{1}/node".format(self.module_name, self.fname), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.start() @@ -104,17 +94,13 @@ def test_handling_of_TZ_env_variable(self): self.assertNotIn("backup ID in control file", output) - # Clean after yourself - self.del_test_dir(module_name, fname) - @unittest.skip("skip") # @unittest.expectedFailure def test_dst_timezone_handling(self): """for manual testing""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) self.init_pb(backup_dir) @@ -180,16 +166,12 @@ def test_dst_timezone_handling(self): stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() - # Clean after yourself - self.del_test_dir(module_name, fname) - @unittest.skip("skip") def test_dst_timezone_handling_backward_compatibilty(self): """for manual testing""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) self.init_pb(backup_dir) @@ -252,6 +234,3 @@ def test_dst_timezone_handling_backward_compatibilty(self): ['sudo', 'timedatectl', 'set-timezone', 'US/Moscow'], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() - - # Clean after yourself - self.del_test_dir(module_name, fname) diff --git a/tests/validate.py b/tests/validate.py index 966ad81a8..5c3e31fe3 100644 --- a/tests/validate.py +++ b/tests/validate.py @@ -9,9 +9,6 @@ import hashlib -module_name = 'validate' - - class ValidateTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") @@ -20,12 +17,11 @@ def test_basic_validate_nullified_heap_page_backup(self): """ make node with nullified heap block """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -71,9 +67,6 @@ def test_basic_validate_nullified_heap_page_backup(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_validate_wal_unreal_values(self): @@ -81,12 +74,11 @@ def test_validate_wal_unreal_values(self): make node with archiving, make archive backup validate to both real and unreal values """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -213,9 +205,6 @@ def test_validate_wal_unreal_values(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(self.output), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_basic_validate_corrupted_intermediate_backup(self): """ @@ -224,12 +213,11 @@ def test_basic_validate_corrupted_intermediate_backup(self): run validate on PAGE1, expect PAGE1 to gain status CORRUPT and PAGE2 gain status ORPHAN """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -298,9 +286,6 @@ def test_basic_validate_corrupted_intermediate_backup(self): self.show_pb(backup_dir, 'node', backup_id_3)['status'], 'Backup STATUS should be "ORPHAN"') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_validate_corrupted_intermediate_backups(self): """ @@ -309,12 +294,11 @@ def test_validate_corrupted_intermediate_backups(self): expect FULL and PAGE1 to gain status CORRUPT and PAGE2 gain status ORPHAN """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -419,9 +403,6 @@ def test_validate_corrupted_intermediate_backups(self): self.show_pb(backup_dir, 'node', backup_id_3)['status'], 'Backup STATUS should be "ORPHAN"') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_validate_specific_error_intermediate_backups(self): """ @@ -431,12 +412,11 @@ def test_validate_specific_error_intermediate_backups(self): purpose of this test is to be sure that not only CORRUPT backup descendants can be orphanized """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -507,9 +487,6 @@ def test_validate_specific_error_intermediate_backups(self): self.show_pb(backup_dir, 'node', backup_id_3)['status'], 'Backup STATUS should be "ORPHAN"') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_validate_error_intermediate_backups(self): """ @@ -519,12 +496,11 @@ def test_validate_error_intermediate_backups(self): purpose of this test is to be sure that not only CORRUPT backup descendants can be orphanized """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -591,9 +567,6 @@ def test_validate_error_intermediate_backups(self): self.show_pb(backup_dir, 'node', backup_id_3)['status'], 'Backup STATUS should be "ORPHAN"') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_validate_corrupted_intermediate_backups_1(self): """ @@ -602,12 +575,11 @@ def test_validate_corrupted_intermediate_backups_1(self): expect PAGE1 to gain status CORRUPT, PAGE2, PAGE3, PAGE4 and PAGE5 to gain status ORPHAN """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -788,9 +760,6 @@ def test_validate_corrupted_intermediate_backups_1(self): 'OK', self.show_pb(backup_dir, 'node', backup_id_8)['status'], 'Backup STATUS should be "OK"') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_validate_specific_target_corrupted_intermediate_backups(self): """ @@ -799,12 +768,11 @@ def test_validate_specific_target_corrupted_intermediate_backups(self): expect PAGE1 to gain status CORRUPT, PAGE2, PAGE3, PAGE4 and PAGE5 to gain status ORPHAN """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -980,9 +948,6 @@ def test_validate_specific_target_corrupted_intermediate_backups(self): self.assertEqual('ORPHAN', self.show_pb(backup_dir, 'node', backup_id_7)['status'], 'Backup STATUS should be "ORPHAN"') self.assertEqual('OK', self.show_pb(backup_dir, 'node', backup_id_8)['status'], 'Backup STATUS should be "OK"') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_validate_instance_with_several_corrupt_backups(self): """ @@ -991,12 +956,11 @@ def test_validate_instance_with_several_corrupt_backups(self): expect FULL1 to gain status CORRUPT, PAGE1_1 to gain status ORPHAN FULL2 to gain status CORRUPT, PAGE2_1 to gain status ORPHAN """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1081,9 +1045,6 @@ def test_validate_instance_with_several_corrupt_backups(self): 'OK', self.show_pb(backup_dir, 'node', backup_id_6)['status'], 'Backup STATUS should be "OK"') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_validate_instance_with_several_corrupt_backups_interrupt(self): """ @@ -1091,12 +1052,11 @@ def test_validate_instance_with_several_corrupt_backups_interrupt(self): """ self._check_gdb_flag_or_skip_test() - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1180,9 +1140,6 @@ def test_validate_instance_with_several_corrupt_backups_interrupt(self): self.assertNotIn( 'Interrupted while locking backup', log_content) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_validate_instance_with_corrupted_page(self): """ @@ -1190,12 +1147,11 @@ def test_validate_instance_with_corrupted_page(self): corrupt file in PAGE1 backup and run validate on instance, expect PAGE1 to gain status CORRUPT, PAGE2 to gain status ORPHAN """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1327,20 +1283,16 @@ def test_validate_instance_with_corrupted_page(self): 'OK', self.show_pb(backup_dir, 'node', backup_id_5)['status'], 'Backup STATUS should be "OK"') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_validate_instance_with_corrupted_full_and_try_restore(self): """make archive node, take FULL, PAGE1, PAGE2, FULL2, PAGE3 backups, corrupt file in FULL backup and run validate on instance, expect FULL to gain status CORRUPT, PAGE1 and PAGE2 to gain status ORPHAN, try to restore backup with --no-validation option""" - fname = self.id().split('.')[3] - node = self.make_simple_node(base_dir=os.path.join(module_name, fname, 'node'), + node = self.make_simple_node(base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1423,19 +1375,15 @@ def test_validate_instance_with_corrupted_full_and_try_restore(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(self.output), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_validate_instance_with_corrupted_full(self): """make archive node, take FULL, PAGE1, PAGE2, FULL2, PAGE3 backups, corrupt file in FULL backup and run validate on instance, expect FULL to gain status CORRUPT, PAGE1 and PAGE2 to gain status ORPHAN""" - fname = self.id().split('.')[3] - node = self.make_simple_node(base_dir=os.path.join(module_name, fname, 'node'), + node = self.make_simple_node(base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1517,18 +1465,14 @@ def test_validate_instance_with_corrupted_full(self): self.assertEqual('OK', self.show_pb(backup_dir, 'node', backup_id_4)['status'], 'Backup STATUS should be "OK"') self.assertEqual('OK', self.show_pb(backup_dir, 'node', backup_id_5)['status'], 'Backup STATUS should be "OK"') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_validate_corrupt_wal_1(self): """make archive node, take FULL1, PAGE1,PAGE2,FULL2,PAGE3,PAGE4 backups, corrupt all wal files, run validate, expect errors""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1579,17 +1523,13 @@ def test_validate_corrupt_wal_1(self): self.show_pb(backup_dir, 'node', backup_id_2)['status'], 'Backup STATUS should be "CORRUPT"') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_validate_corrupt_wal_2(self): """make archive node, make full backup, corrupt all wal files, run validate to real xid, expect errors""" - fname = self.id().split('.')[3] - node = self.make_simple_node(base_dir=os.path.join(module_name, fname, 'node'), + node = self.make_simple_node(base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1645,9 +1585,6 @@ def test_validate_corrupt_wal_2(self): self.show_pb(backup_dir, 'node', backup_id)['status'], 'Backup STATUS should be "CORRUPT"') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_validate_wal_lost_segment_1(self): """make archive node, make archive full backup, @@ -1655,12 +1592,11 @@ def test_validate_wal_lost_segment_1(self): run validate, expecting error because of missing wal segment make sure that backup status is 'CORRUPT' """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1722,21 +1658,17 @@ def test_validate_wal_lost_segment_1(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_validate_corrupt_wal_between_backups(self): """ make archive node, make full backup, corrupt all wal files, run validate to real xid, expect errors """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1815,22 +1747,18 @@ def test_validate_corrupt_wal_between_backups(self): self.show_pb(backup_dir, 'node')[1]['status'], 'Backup STATUS should be "OK"') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_pgpro702_688(self): """ make node without archiving, make stream backup, get Recovery Time, validate to Recovery Time """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -1856,22 +1784,18 @@ def test_pgpro702_688(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_pgpro688(self): """ make node with archiving, make backup, get Recovery Time, validate to Recovery Time. Waiting PGPRO-688. RESOLVED """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1885,9 +1809,6 @@ def test_pgpro688(self): backup_dir, 'node', options=["--time={0}".format(recovery_time), "-j", "4"]) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_pgpro561(self): @@ -1895,13 +1816,12 @@ def test_pgpro561(self): make node with archiving, make stream backup, restore it to node1, check that archiving is not successful on node1 """ - fname = self.id().split('.')[3] node1 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node1'), + base_dir=os.path.join(self.module_name, self.fname, 'node1'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node1', node1) self.set_archiving(backup_dir, 'node1', node1) @@ -1911,7 +1831,7 @@ def test_pgpro561(self): backup_dir, 'node1', node1, options=["--stream"]) node2 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node2')) + base_dir=os.path.join(self.module_name, self.fname, 'node2')) node2.cleanup() node1.psql( @@ -1993,9 +1913,6 @@ def test_pgpro561(self): self.assertFalse( 'pg_probackup archive-push completed successfully' in log_content) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_validate_corrupted_full(self): """ @@ -2006,15 +1923,14 @@ def test_validate_corrupted_full(self): remove corruption and run valudate again, check that second full backup and his page backups are OK """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ 'checkpoint_timeout': '30'}) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -2114,9 +2030,6 @@ def test_validate_corrupted_full(self): self.show_pb(backup_dir, 'node')[6]['status'] == 'ERROR') self.assertTrue(self.show_pb(backup_dir, 'node')[7]['status'] == 'OK') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_validate_corrupted_full_1(self): """ @@ -2130,13 +2043,12 @@ def test_validate_corrupted_full_1(self): second page should be CORRUPT third page should be ORPHAN """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -2216,9 +2128,6 @@ def test_validate_corrupted_full_1(self): self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'CORRUPT') self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'ORPHAN') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_validate_corrupted_full_2(self): """ @@ -2241,13 +2150,12 @@ def test_validate_corrupted_full_2(self): remove corruption from PAGE2_2 and run validate on PAGE2_4 """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -2578,9 +2486,6 @@ def test_validate_corrupted_full_2(self): self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_validate_corrupted_full_missing(self): """ @@ -2593,13 +2498,12 @@ def test_validate_corrupted_full_missing(self): second full backup and his firts page backups are OK, third page should be ORPHAN """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -2811,18 +2715,14 @@ def test_validate_corrupted_full_missing(self): self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_file_size_corruption_no_validate(self): - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), # initdb_params=['--data-checksums'], ) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -2873,9 +2773,6 @@ def test_file_size_corruption_no_validate(self): "ERROR: Backup files restoring failed" in e.message, repr(e.message)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_validate_specific_backup_with_missing_backup(self): """ @@ -2894,11 +2791,11 @@ def test_validate_specific_backup_with_missing_backup(self): """ fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -3016,9 +2913,6 @@ def test_validate_specific_backup_with_missing_backup(self): self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_validate_specific_backup_with_missing_backup_1(self): """ @@ -3035,13 +2929,12 @@ def test_validate_specific_backup_with_missing_backup_1(self): PAGE1_1 FULL1 """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -3137,9 +3030,6 @@ def test_validate_specific_backup_with_missing_backup_1(self): self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_validate_with_missing_backup_1(self): """ @@ -3156,13 +3046,12 @@ def test_validate_with_missing_backup_1(self): PAGE1_1 FULL1 """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -3326,9 +3215,6 @@ def test_validate_with_missing_backup_1(self): self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_validate_with_missing_backup_2(self): """ @@ -3345,13 +3231,12 @@ def test_validate_with_missing_backup_2(self): PAGE1_1 FULL1 """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -3486,19 +3371,15 @@ def test_validate_with_missing_backup_2(self): self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_corrupt_pg_control_via_resetxlog(self): """ PGPRO-2096 """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -3556,18 +3437,14 @@ def test_corrupt_pg_control_via_resetxlog(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_validation_after_backup(self): """""" self._check_gdb_flag_or_skip_test() - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -3596,19 +3473,15 @@ def test_validation_after_backup(self): self.show_pb(backup_dir, 'node', backup_id)['status'], 'Backup STATUS should be "ERROR"') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_validate_corrupt_tablespace_map(self): """ Check that corruption in tablespace_map is detected """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -3651,9 +3524,6 @@ def test_validate_corrupt_tablespace_map(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - #TODO fix the test @unittest.expectedFailure # @unittest.skip("skip") @@ -3661,10 +3531,9 @@ def test_validate_target_lsn(self): """ Check validation to specific LSN """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -3683,7 +3552,7 @@ def test_validate_target_lsn(self): "from generate_series(0,10000) i") node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node(backup_dir, 'node', node_restored) @@ -3709,17 +3578,13 @@ def test_validate_target_lsn(self): '--recovery-target-timeline=2', '--recovery-target-lsn={0}'.format(target_lsn)]) - # Clean after yourself - self.del_test_dir(module_name, fname) - @unittest.skip("skip") def test_partial_validate_empty_and_mangled_database_map(self): """ """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -3783,16 +3648,12 @@ def test_partial_validate_empty_and_mangled_database_map(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - @unittest.skip("skip") def test_partial_validate_exclude(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) self.init_pb(backup_dir) @@ -3855,17 +3716,13 @@ def test_partial_validate_exclude(self): self.assertIn( "VERBOSE: Skip file validation due to partial restore", output) - # Clean after yourself - self.del_test_dir(module_name, fname) - @unittest.skip("skip") def test_partial_validate_include(self): """ """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) self.init_pb(backup_dir) @@ -3917,18 +3774,14 @@ def test_partial_validate_include(self): self.assertNotIn( "VERBOSE: Skip file validation due to partial restore", output) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_not_validate_diffenent_pg_version(self): """Do not validate backup, if binary is compiled with different PG version""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -3971,19 +3824,15 @@ def test_not_validate_diffenent_pg_version(self): "\n Unexpected Error Message: {0}\n CMD: {1}".format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_validate_corrupt_page_header_map(self): """ Check that corruption in page_header_map is detected """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -4045,19 +3894,15 @@ def test_validate_corrupt_page_header_map(self): self.assertIn("WARNING: Some backups are not valid", e.message) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_validate_truncated_page_header_map(self): """ Check that corruption in page_header_map is detected """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -4108,19 +3953,15 @@ def test_validate_truncated_page_header_map(self): self.assertIn("INFO: Backup {0} data files are valid".format(ok_2), e.message) self.assertIn("WARNING: Some backups are not valid", e.message) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_validate_missing_page_header_map(self): """ Check that corruption in page_header_map is detected """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -4168,19 +4009,15 @@ def test_validate_missing_page_header_map(self): self.assertIn("INFO: Backup {0} data files are valid".format(ok_2), e.message) self.assertIn("WARNING: Some backups are not valid", e.message) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_no_validate_tablespace_map(self): """ Check that --no-validate is propagated to tablespace_map """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -4233,9 +4070,6 @@ def test_no_validate_tablespace_map(self): tblspace_new, "Symlink '{0}' do not points to '{1}'".format(tablespace_link, tblspace_new)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # validate empty backup list # page from future during validate # page from future during backup From 39e06f576bc1b3f73e5243afeb668befb412ef54 Mon Sep 17 00:00:00 2001 From: Ivan Lazarev Date: Wed, 9 Nov 2022 01:56:11 +0300 Subject: [PATCH 371/525] [PBCKP-304] test dirs cleanup fixed - added logic for nodes GC and dirs cleanup --- tests/helpers/ptrack_helpers.py | 34 +++++++++++++++++++++++++++++---- 1 file changed, 30 insertions(+), 4 deletions(-) diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index bd5ea01fd..11966dd22 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -205,6 +205,8 @@ class ProbackupTest(object): def __init__(self, *args, **kwargs): super(ProbackupTest, self).__init__(*args, **kwargs) + self.nodes_to_cleanup = [] + if isinstance(self, unittest.TestCase): self.module_name = self.id().split('.')[1] self.fname = self.id().split('.')[3] @@ -373,11 +375,23 @@ def __init__(self, *args, **kwargs): os.environ["PGAPPNAME"] = "pg_probackup" def tearDown(self): - if isinstance(self, unittest.TestCase): - module_name = self.id().split('.')[1] - fname = self.id().split('.')[3] - if is_test_result_ok(self): + if is_test_result_ok(self): + for node in self.nodes_to_cleanup: + node.cleanup() + # we do clear refs to nodes to gather them by gc inside self.del_test_dir() + self.nodes_to_cleanup.clear() + + if isinstance(self, unittest.TestCase): + module_name = self.id().split('.')[1] + fname = self.id().split('.')[3] self.del_test_dir(module_name, fname) + else: + for node in self.nodes_to_cleanup: + # TODO VERIFY do we want to remain failed test's db data for further investigations? + # TODO VERIFY or just to leave logs only without node/data? + # node._try_shutdown(max_attempts=1) + node.cleanup() + self.nodes_to_cleanup.clear() @property def pg_config_version(self): @@ -475,6 +489,9 @@ def make_simple_node( if node.major_version >= 13: self.set_auto_conf( node, {}, 'postgresql.conf', ['wal_keep_segments']) + + self.nodes_to_cleanup.append(node) + return node def simple_bootstrap(self, node, role) -> None: @@ -1689,6 +1706,15 @@ def get_bin_path(self, binary): return testgres.get_bin_path(binary) def clean_all(self): + # pre gc.collect() all dropped nodes + for o in gc.get_referrers(testgres.PostgresNode): + if o.__class__ is testgres.PostgresNode: + # removing node from slow_start enclosure + # after this the node is collectable by gc + o.slow_start = None + gc.collect() + + # only when there are unhandled nodes left we do the cleanup for them for o in gc.get_referrers(testgres.PostgresNode): if o.__class__ is testgres.PostgresNode: o.cleanup() From a2c1e6d6b4adade6d386d603f2652b60f996fb93 Mon Sep 17 00:00:00 2001 From: Ivan Lazarev Date: Wed, 9 Nov 2022 02:50:36 +0300 Subject: [PATCH 372/525] [PBCKP-304] removed direct calls to ProbackupTest.del_test_dir() from tests --- tests/archive.py | 7 ------- tests/backup.py | 1 - tests/catchup.py | 2 -- tests/compatibility.py | 3 --- tests/incr_restore.py | 5 ----- tests/merge.py | 7 ------- tests/pgpro560.py | 2 -- tests/ptrack.py | 3 --- tests/replica.py | 15 --------------- tests/restore.py | 2 -- tests/retention.py | 2 -- 11 files changed, 49 deletions(-) diff --git a/tests/archive.py b/tests/archive.py index f40cf3c5d..acdf39eca 100644 --- a/tests/archive.py +++ b/tests/archive.py @@ -78,7 +78,6 @@ def test_pgpro434_2(self): ) if self.get_version(node) < self.version_to_num('9.6.0'): - self.del_test_dir(self.module_name, self.fname) return unittest.skip( 'Skipped because pg_control_checkpoint() is not supported in PG 9.5') @@ -681,7 +680,6 @@ def test_replica_archive(self): 'max_wal_size': '32MB'}) if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(self.module_name, self.fname) return unittest.skip( 'Skipped because backup from replica is not supported in PG 9.5') @@ -808,7 +806,6 @@ def test_master_and_replica_parallel_archiving(self): ) if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(self.module_name, self.fname) return unittest.skip( 'Skipped because backup from replica is not supported in PG 9.5') @@ -899,7 +896,6 @@ def test_basic_master_and_replica_concurrent_archiving(self): 'archive_timeout': '10s'}) if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(self.module_name, self.fname) return unittest.skip( 'Skipped because backup from replica is not supported in PG 9.5') @@ -1221,7 +1217,6 @@ def test_archive_catalog(self): 'checkpoint_timeout': '30s'}) if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(self.module_name, self.fname) return unittest.skip( 'Skipped because backup from replica is not supported in PG 9.5') @@ -2083,7 +2078,6 @@ def test_archive_pg_receivexlog_partial_handling(self): initdb_params=['--data-checksums']) if self.get_version(node) < self.version_to_num('9.6.0'): - self.del_test_dir(self.module_name, self.fname) return unittest.skip( 'Skipped because backup from replica is not supported in PG 9.5') @@ -2284,7 +2278,6 @@ def test_archive_get_batching_sanity(self): initdb_params=['--data-checksums']) if self.get_version(node) < self.version_to_num('9.6.0'): - self.del_test_dir(self.module_name, self.fname) return unittest.skip( 'Skipped because backup from replica is not supported in PG 9.5') diff --git a/tests/backup.py b/tests/backup.py index b7bb1b8b4..5f8a9038c 100644 --- a/tests/backup.py +++ b/tests/backup.py @@ -2253,7 +2253,6 @@ def test_backup_with_less_privileges_role(self): datname='backupdb', options=['--stream', '-U', 'backup']) if self.get_version(node) < 90600: - self.del_test_dir(self.module_name, self.fname) return # Restore as replica diff --git a/tests/catchup.py b/tests/catchup.py index 12622207a..d0dd11e81 100644 --- a/tests/catchup.py +++ b/tests/catchup.py @@ -53,7 +53,6 @@ def test_basic_full_catchup(self): # Cleanup dst_pg.stop() #self.assertEqual(1, 0, 'Stop test') - self.del_test_dir(self.module_name, self.fname) def test_full_catchup_with_tablespace(self): """ @@ -1533,7 +1532,6 @@ def test_dry_run_catchup_ptrack(self): # Cleanup src_pg.stop() - self.del_test_dir(self.module_name, self.fname) def test_dry_run_catchup_delta(self): """ diff --git a/tests/compatibility.py b/tests/compatibility.py index 8a7812c57..a244ce687 100644 --- a/tests/compatibility.py +++ b/tests/compatibility.py @@ -86,9 +86,6 @@ def test_catchup_with_different_remote_major_pg(self): options=['-d', 'postgres', '-p', str(src_pg.port), '--stream', '--remote-path=' + pgprobackup_ssh_agent_path] ) - # Clean after yourself - self.del_test_dir(self.module_name, self.fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_backward_compatibility_page(self): diff --git a/tests/incr_restore.py b/tests/incr_restore.py index 55d59fa99..08a92b5b0 100644 --- a/tests/incr_restore.py +++ b/tests/incr_restore.py @@ -1389,9 +1389,6 @@ def test_incr_checksum_restore_backward(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(delta_pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(self.module_name, self.fname) - # @unittest.skip("skip") def test_make_replica_via_incr_checksum_restore(self): """ @@ -1403,7 +1400,6 @@ def test_make_replica_via_incr_checksum_restore(self): initdb_params=['--data-checksums']) if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(self.module_name, self.fname) return unittest.skip( 'Skipped because backup from replica is not supported in PG 9.5') @@ -1472,7 +1468,6 @@ def test_make_replica_via_incr_lsn_restore(self): initdb_params=['--data-checksums']) if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(self.module_name, self.fname) return unittest.skip( 'Skipped because backup from replica is not supported in PG 9.5') diff --git a/tests/merge.py b/tests/merge.py index fa0da7b2b..a323bbba2 100644 --- a/tests/merge.py +++ b/tests/merge.py @@ -754,9 +754,6 @@ def test_merge_delta_truncate(self): self.assertEqual(result1, result2) - # Clean after yourself - self.del_test_dir(self.module_name, self.fname) - def test_merge_ptrack_truncate(self): """ make node, create table, take full backup, @@ -1401,8 +1398,6 @@ def test_crash_after_opening_backup_control_1(self): self.assertEqual( 'MERGING', self.show_pb(backup_dir, 'node')[1]['status']) - self.del_test_dir(self.module_name, self.fname) - # @unittest.skip("skip") def test_crash_after_opening_backup_control_2(self): """ @@ -2657,8 +2652,6 @@ def test_missing_non_data_file(self): self.assertEqual( 'MERGING', self.show_pb(backup_dir, 'node')[1]['status']) - self.del_test_dir(self.module_name, self.fname) - # @unittest.skip("skip") def test_merge_remote_mode(self): """ diff --git a/tests/pgpro560.py b/tests/pgpro560.py index eeab59960..b665fd200 100644 --- a/tests/pgpro560.py +++ b/tests/pgpro560.py @@ -45,10 +45,8 @@ def test_pgpro560_control_file_loss(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - # Clean after yourself # Return this file to avoid Postger fail os.rename(os.path.join(node.base_dir, 'data', 'global', 'pg_control_copy'), file) - self.del_test_dir(self.module_name, self.fname) def test_pgpro560_systemid_mismatch(self): """ diff --git a/tests/ptrack.py b/tests/ptrack.py index 2b8d2d49e..74db2e554 100644 --- a/tests/ptrack.py +++ b/tests/ptrack.py @@ -3096,9 +3096,6 @@ def test_basic_ptrack_truncate_replica(self): 'postgres', 'select 1') - # Clean after yourself - self.del_test_dir(self.module_name, self.fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_ptrack_vacuum(self): diff --git a/tests/replica.py b/tests/replica.py index 3fb68633f..c74385c5c 100644 --- a/tests/replica.py +++ b/tests/replica.py @@ -26,7 +26,6 @@ def test_replica_switchover(self): initdb_params=['--data-checksums']) if self.get_version(node1) < self.version_to_num('9.6.0'): - self.del_test_dir(self.module_name, self.fname) return unittest.skip( 'Skipped because backup from replica is not supported in PG 9.5') @@ -229,7 +228,6 @@ def test_replica_archive_page_backup(self): 'max_wal_size': '32MB'}) if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(self.module_name, self.fname) return unittest.skip( 'Skipped because backup from replica is not supported in PG 9.5') @@ -367,7 +365,6 @@ def test_basic_make_replica_via_restore(self): 'archive_timeout': '10s'}) if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(self.module_name, self.fname) return unittest.skip( 'Skipped because backup from replica is not supported in PG 9.5') @@ -421,7 +418,6 @@ def test_take_backup_from_delayed_replica(self): pg_options={'archive_timeout': '10s'}) if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(self.module_name, self.fname) return unittest.skip( 'Skipped because backup from replica is not supported in PG 9.5') @@ -530,7 +526,6 @@ def test_replica_promote(self): 'max_wal_size': '32MB'}) if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(self.module_name, self.fname) return unittest.skip( 'Skipped because backup from replica is not supported in PG 9.5') @@ -617,7 +612,6 @@ def test_replica_stop_lsn_null_offset(self): 'wal_level': 'replica'}) if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(self.module_name, self.fname) return unittest.skip( 'Skipped because backup from replica is not supported in PG 9.5') @@ -700,7 +694,6 @@ def test_replica_stop_lsn_null_offset_next_record(self): 'wal_level': 'replica'}) if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(self.module_name, self.fname) return unittest.skip( 'Skipped because backup from replica is not supported in PG 9.5') @@ -799,7 +792,6 @@ def test_archive_replica_null_offset(self): 'wal_level': 'replica'}) if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(self.module_name, self.fname) return unittest.skip( 'Skipped because backup from replica is not supported in PG 9.5') @@ -879,7 +871,6 @@ def test_archive_replica_not_null_offset(self): 'wal_level': 'replica'}) if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(self.module_name, self.fname) return unittest.skip( 'Skipped because backup from replica is not supported in PG 9.5') @@ -964,7 +955,6 @@ def test_replica_toast(self): 'shared_buffers': '128MB'}) if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(self.module_name, self.fname) return unittest.skip( 'Skipped because backup from replica is not supported in PG 9.5') @@ -1064,7 +1054,6 @@ def test_start_stop_lsn_in_the_same_segno(self): 'shared_buffers': '128MB'}) if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(self.module_name, self.fname) return unittest.skip( 'Skipped because backup from replica is not supported in PG 9.5') @@ -1138,7 +1127,6 @@ def test_replica_promote_1(self): 'wal_level': 'replica'}) if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(self.module_name, self.fname) return unittest.skip( 'Skipped because backup from replica is not supported in PG 9.5') @@ -1257,7 +1245,6 @@ def test_replica_promote_archive_delta(self): 'archive_timeout': '30s'}) if self.get_version(node1) < self.version_to_num('9.6.0'): - self.del_test_dir(self.module_name, self.fname) return unittest.skip( 'Skipped because backup from replica is not supported in PG 9.5') @@ -1378,7 +1365,6 @@ def test_replica_promote_archive_page(self): 'archive_timeout': '30s'}) if self.get_version(node1) < self.version_to_num('9.6.0'): - self.del_test_dir(self.module_name, self.fname) return unittest.skip( 'Skipped because backup from replica is not supported in PG 9.5') @@ -1496,7 +1482,6 @@ def test_parent_choosing(self): initdb_params=['--data-checksums']) if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(self.module_name, self.fname) return unittest.skip( 'Skipped because backup from replica is not supported in PG 9.5') diff --git a/tests/restore.py b/tests/restore.py index 52db63c1c..5cd389e8b 100644 --- a/tests/restore.py +++ b/tests/restore.py @@ -334,7 +334,6 @@ def test_restore_to_lsn_inclusive(self): initdb_params=['--data-checksums']) if self.get_version(node) < self.version_to_num('10.0'): - self.del_test_dir(self.module_name, self.fname) return backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') @@ -401,7 +400,6 @@ def test_restore_to_lsn_not_inclusive(self): initdb_params=['--data-checksums']) if self.get_version(node) < self.version_to_num('10.0'): - self.del_test_dir(self.module_name, self.fname) return backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') diff --git a/tests/retention.py b/tests/retention.py index 5043366f4..f9969c31a 100644 --- a/tests/retention.py +++ b/tests/retention.py @@ -1538,7 +1538,6 @@ def test_retention_redundancy_overlapping_chains(self): initdb_params=['--data-checksums']) if self.get_version(node) < 90600: - self.del_test_dir(self.module_name, self.fname) return unittest.skip('Skipped because ptrack support is disabled') backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') @@ -1584,7 +1583,6 @@ def test_retention_redundancy_overlapping_chains_1(self): initdb_params=['--data-checksums']) if self.get_version(node) < 90600: - self.del_test_dir(self.module_name, self.fname) return unittest.skip('Skipped because ptrack support is disabled') backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') From cda016c9552d48fd809c99fff4221d0779e87f2b Mon Sep 17 00:00:00 2001 From: Ivan Lazarev Date: Wed, 9 Nov 2022 03:44:47 +0300 Subject: [PATCH 373/525] [PBCKP-304] removed ProbackupTest.clean_all() GC magic, simplified ProbackupTest.tearDown() --- tests/helpers/ptrack_helpers.py | 33 ++++++--------------------------- 1 file changed, 6 insertions(+), 27 deletions(-) diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index 11966dd22..2729b7a4f 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -378,20 +378,15 @@ def tearDown(self): if is_test_result_ok(self): for node in self.nodes_to_cleanup: node.cleanup() - # we do clear refs to nodes to gather them by gc inside self.del_test_dir() - self.nodes_to_cleanup.clear() + self.del_test_dir(self.module_name, self.fname) - if isinstance(self, unittest.TestCase): - module_name = self.id().split('.')[1] - fname = self.id().split('.')[3] - self.del_test_dir(module_name, fname) else: for node in self.nodes_to_cleanup: - # TODO VERIFY do we want to remain failed test's db data for further investigations? - # TODO VERIFY or just to leave logs only without node/data? - # node._try_shutdown(max_attempts=1) - node.cleanup() - self.nodes_to_cleanup.clear() + # TODO make decorator with proper stop() vs cleanup() + node._try_shutdown(max_attempts=1) + # node.cleanup() + + self.nodes_to_cleanup.clear() @property def pg_config_version(self): @@ -1705,25 +1700,9 @@ def get_ptrack_version(self, node): def get_bin_path(self, binary): return testgres.get_bin_path(binary) - def clean_all(self): - # pre gc.collect() all dropped nodes - for o in gc.get_referrers(testgres.PostgresNode): - if o.__class__ is testgres.PostgresNode: - # removing node from slow_start enclosure - # after this the node is collectable by gc - o.slow_start = None - gc.collect() - - # only when there are unhandled nodes left we do the cleanup for them - for o in gc.get_referrers(testgres.PostgresNode): - if o.__class__ is testgres.PostgresNode: - o.cleanup() - def del_test_dir(self, module_name, fname): """ Del testdir and optimistically try to del module dir""" - self.clean_all() - shutil.rmtree( os.path.join( self.tmp_path, From 04e05b151dd26878ba64b9650be6db3b7ebf5a7e Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Wed, 9 Nov 2022 12:17:10 +0300 Subject: [PATCH 374/525] Fix auth_test.py --- tests/__init__.py | 2 +- tests/auth_test.py | 189 ++++++++++++++++++++------------------------- 2 files changed, 86 insertions(+), 105 deletions(-) diff --git a/tests/__init__.py b/tests/__init__.py index c02788e29..40d5faf65 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -27,7 +27,7 @@ def load_tests(loader, tests, pattern): if os.environ['PG_PROBACKUP_LONG'] == 'ON': suite.addTests(loader.loadTestsFromModule(time_consuming)) -# suite.addTests(loader.loadTestsFromModule(auth_test)) + suite.addTests(loader.loadTestsFromModule(auth_test)) suite.addTests(loader.loadTestsFromModule(archive)) suite.addTests(loader.loadTestsFromModule(backup)) suite.addTests(loader.loadTestsFromModule(catchup)) diff --git a/tests/auth_test.py b/tests/auth_test.py index 4b0c4a5b2..7e0b6fcfb 100644 --- a/tests/auth_test.py +++ b/tests/auth_test.py @@ -126,24 +126,16 @@ def test_backup_via_unprivileged_user(self): node.safe_psql( "postgres", "GRANT EXECUTE ON FUNCTION pg_stop_backup(boolean) TO backup") - elif self.get_vestion(node) < self.version_to_num('15.0'): + elif self.get_version(node) < self.version_to_num('15.0'): node.safe_psql( "postgres", - "GRANT EXECUTE ON FUNCTION " - "pg_stop_backup(boolean, boolean) TO backup") - # Do this for ptrack backups - node.safe_psql( - "postgres", - "GRANT EXECUTE ON FUNCTION pg_stop_backup() TO backup") + "GRANT EXECUTE ON FUNCTION pg_stop_backup() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_stop_backup(boolean, boolean) TO backup;") else: node.safe_psql( "postgres", - "GRANT EXECUTE ON FUNCTION " - "pg_backup_stop(boolean) TO backup") - # Do this for ptrack backups - node.safe_psql( - "postgres", - "GRANT EXECUTE ON FUNCTION pg_backup_stop() TO backup") + "GRANT EXECUTE ON FUNCTION pg_backup_stop() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_backup_stop(boolean) TO backup;") self.backup_node( backup_dir, 'node', node, options=['-U', 'backup']) @@ -193,7 +185,10 @@ def setUpClass(cls): set_replication=True, initdb_params=['--data-checksums', '--auth-host=md5'] ) - modify_pg_hba(cls.node) + + cls.username = cls.pb.get_username() + + cls.modify_pg_hba(cls.node) cls.pb.init_pb(cls.backup_dir) cls.pb.add_instance(cls.backup_dir, cls.node.name, cls.node) @@ -203,7 +198,7 @@ def setUpClass(cls): except StartNodeException: raise unittest.skip("Node hasn't started") - if cls.pb.get_version(cls.node) < 150000: + if cls.pb.get_version(cls.node) < 100000: cls.node.safe_psql( "postgres", "CREATE ROLE backup WITH LOGIN PASSWORD 'password'; " @@ -218,6 +213,21 @@ def setUpClass(cls): "GRANT EXECUTE ON FUNCTION txid_current() TO backup; " "GRANT EXECUTE ON FUNCTION txid_current_snapshot() TO backup; " "GRANT EXECUTE ON FUNCTION txid_snapshot_xmax(txid_snapshot) TO backup;") + elif cls.pb.get_version(cls.node) < 150000: + cls.node.safe_psql( + "postgres", + "CREATE ROLE backup WITH LOGIN PASSWORD 'password'; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT EXECUTE ON FUNCTION current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_start_backup(text, boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_stop_backup() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_stop_backup(boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_switch_wal() TO backup; " + "GRANT EXECUTE ON FUNCTION txid_current() TO backup; " + "GRANT EXECUTE ON FUNCTION txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION txid_snapshot_xmax(txid_snapshot) TO backup;") else: cls.node.safe_psql( "postgres", @@ -229,7 +239,7 @@ def setUpClass(cls): "GRANT EXECUTE ON FUNCTION pg_backup_stop() TO backup; " "GRANT EXECUTE ON FUNCTION pg_backup_stop(boolean) TO backup; " "GRANT EXECUTE ON FUNCTION pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_switch_xlog() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_switch_wal() TO backup; " "GRANT EXECUTE ON FUNCTION txid_current() TO backup; " "GRANT EXECUTE ON FUNCTION txid_current_snapshot() TO backup; " "GRANT EXECUTE ON FUNCTION txid_snapshot_xmax(txid_snapshot) TO backup;") @@ -244,12 +254,13 @@ def tearDownClass(cls): @unittest.skipIf(skip_test, "Module pexpect isn't installed. You need to install it.") def setUp(self): - self.cmd = ['backup', + self.pb_cmd = ['backup', '-B', self.backup_dir, '--instance', self.node.name, '-h', '127.0.0.1', '-p', str(self.node.port), '-U', 'backup', + '-d', 'postgres', '-b', 'FULL' ] @@ -269,44 +280,31 @@ def test_empty_password(self): """ Test case: PGPB_AUTH03 - zero password length """ try: self.assertIn("ERROR: no password supplied", - str(run_pb_with_auth([self.pb.probackup_path] + self.cmd, '\0\r\n')) - ) + self.run_pb_with_auth('\0\r\n')) except (TIMEOUT, ExceptionPexpect) as e: self.fail(e.value) def test_wrong_password(self): """ Test case: PGPB_AUTH04 - incorrect password """ - try: - self.assertIn("password authentication failed", - str(run_pb_with_auth([self.pb.probackup_path] + self.cmd, 'wrong_password\r\n')) - ) - except (TIMEOUT, ExceptionPexpect) as e: - self.fail(e.value) + self.assertIn("password authentication failed", + self.run_pb_with_auth('wrong_password\r\n')) def test_right_password(self): """ Test case: PGPB_AUTH01 - correct password """ - try: - self.assertIn("completed", - str(run_pb_with_auth([self.pb.probackup_path] + self.cmd, 'password\r\n')) - ) - except (TIMEOUT, ExceptionPexpect) as e: - self.fail(e.value) + self.assertIn("completed", + self.run_pb_with_auth('password\r\n')) def test_right_password_and_wrong_pgpass(self): """ Test case: PGPB_AUTH05 - correct password and incorrect .pgpass (-W)""" line = ":".join(['127.0.0.1', str(self.node.port), 'postgres', 'backup', 'wrong_password']) - create_pgpass(self.pgpass_file, line) - try: - self.assertIn("completed", - str(run_pb_with_auth([self.pb.probackup_path] + self.cmd + ['-W'], 'password\r\n')) - ) - except (TIMEOUT, ExceptionPexpect) as e: - self.fail(e.value) + self.create_pgpass(self.pgpass_file, line) + self.assertIn("completed", + self.run_pb_with_auth('password\r\n', add_args=["-W"])) def test_ctrl_c_event(self): """ Test case: PGPB_AUTH02 - send interrupt signal """ try: - run_pb_with_auth([self.pb.probackup_path] + self.cmd, kill=True) + self.run_pb_with_auth(kill=True) except TIMEOUT: self.fail("Error: CTRL+C event ignored") @@ -314,91 +312,74 @@ def test_pgpassfile_env(self): """ Test case: PGPB_AUTH06 - set environment var PGPASSFILE """ path = os.path.join(self.pb.tmp_path, module_name, 'pgpass.conf') line = ":".join(['127.0.0.1', str(self.node.port), 'postgres', 'backup', 'password']) - create_pgpass(path, line) + self.create_pgpass(path, line) self.pb.test_env["PGPASSFILE"] = path - try: - self.assertEqual( - "OK", - self.pb.show_pb(self.backup_dir, self.node.name, self.pb.run_pb(self.cmd + ['-w']))["status"], - "ERROR: Full backup status is not valid." - ) - except ProbackupException as e: - self.fail(e) + self.assertEqual( + "OK", + self.pb.show_pb(self.backup_dir, self.node.name, self.pb.run_pb(self.pb_cmd + ['-w']))["status"], + "ERROR: Full backup status is not valid." + ) def test_pgpass(self): """ Test case: PGPB_AUTH07 - Create file .pgpass in home dir. """ line = ":".join(['127.0.0.1', str(self.node.port), 'postgres', 'backup', 'password']) - create_pgpass(self.pgpass_file, line) - try: - self.assertEqual( - "OK", - self.pb.show_pb(self.backup_dir, self.node.name, self.pb.run_pb(self.cmd + ['-w']))["status"], - "ERROR: Full backup status is not valid." - ) - except ProbackupException as e: - self.fail(e) + self.create_pgpass(self.pgpass_file, line) + self.assertEqual( + "OK", + self.pb.show_pb(self.backup_dir, self.node.name, self.pb.run_pb(self.pb_cmd + ['-w']))["status"], + "ERROR: Full backup status is not valid." + ) def test_pgpassword(self): """ Test case: PGPB_AUTH08 - set environment var PGPASSWORD """ self.pb.test_env["PGPASSWORD"] = "password" - try: - self.assertEqual( - "OK", - self.pb.show_pb(self.backup_dir, self.node.name, self.pb.run_pb(self.cmd + ['-w']))["status"], - "ERROR: Full backup status is not valid." - ) - except ProbackupException as e: - self.fail(e) + self.assertEqual( + "OK", + self.pb.show_pb(self.backup_dir, self.node.name, self.pb.run_pb(self.pb_cmd + ['-w']))["status"], + "ERROR: Full backup status is not valid." + ) def test_pgpassword_and_wrong_pgpass(self): """ Test case: PGPB_AUTH09 - Check priority between PGPASSWORD and .pgpass file""" line = ":".join(['127.0.0.1', str(self.node.port), 'postgres', 'backup', 'wrong_password']) - create_pgpass(self.pgpass_file, line) + self.create_pgpass(self.pgpass_file, line) self.pb.test_env["PGPASSWORD"] = "password" - try: - self.assertEqual( - "OK", - self.pb.show_pb(self.backup_dir, self.node.name, self.pb.run_pb(self.cmd + ['-w']))["status"], - "ERROR: Full backup status is not valid." - ) - except ProbackupException as e: - self.fail(e) - + self.assertEqual( + "OK", + self.pb.show_pb(self.backup_dir, self.node.name, self.pb.run_pb(self.pb_cmd + ['-w']))["status"], + "ERROR: Full backup status is not valid." + ) -def run_pb_with_auth(cmd, password=None, kill=False): - try: - with spawn(" ".join(cmd), encoding='utf-8', timeout=10) as probackup: + def run_pb_with_auth(self, password=None, add_args = [], kill=False): + with spawn(self.pb.probackup_path, self.pb_cmd + add_args, encoding='utf-8', timeout=10) as probackup: result = probackup.expect(u"Password for user .*:", 5) if kill: probackup.kill(signal.SIGINT) elif result == 0: probackup.sendline(password) probackup.expect(EOF) - return probackup.before + return str(probackup.before) else: raise ExceptionPexpect("Other pexpect errors.") - except TIMEOUT: - raise TIMEOUT("Timeout error.") - except ExceptionPexpect: - raise ExceptionPexpect("Pexpect error.") - - -def modify_pg_hba(node): - """ - Description: - Add trust authentication for user postgres. Need for add new role and set grant. - :param node: - :return None: - """ - hba_conf = os.path.join(node.data_dir, "pg_hba.conf") - with open(hba_conf, 'r+') as fio: - data = fio.read() - fio.seek(0) - fio.write('host\tall\tpostgres\t127.0.0.1/0\ttrust\n' + data) - - -def create_pgpass(path, line): - with open(path, 'w') as passfile: - # host:port:db:username:password - passfile.write(line) - os.chmod(path, 0o600) + + + @classmethod + def modify_pg_hba(cls, node): + """ + Description: + Add trust authentication for user postgres. Need for add new role and set grant. + :param node: + :return None: + """ + hba_conf = os.path.join(node.data_dir, "pg_hba.conf") + with open(hba_conf, 'r+') as fio: + data = fio.read() + fio.seek(0) + fio.write('host\tall\t%s\t127.0.0.1/0\ttrust\n%s' % (cls.username, data)) + + + def create_pgpass(self, path, line): + with open(path, 'w') as passfile: + # host:port:db:username:password + passfile.write(line) + os.chmod(path, 0o600) From d7cc00b3585515c975d05a471e49d733924da588 Mon Sep 17 00:00:00 2001 From: Alexey Savchkov Date: Wed, 9 Nov 2022 23:02:29 +0700 Subject: [PATCH 375/525] Check pg_probackup binary in PATH --- tests/helpers/ptrack_helpers.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index abb715b7e..1b90ac3ff 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -242,10 +242,7 @@ def __init__(self, *args, **kwargs): self.user = self.get_username() self.probackup_path = None if 'PGPROBACKUPBIN' in self.test_env: - if ( - os.path.isfile(self.test_env["PGPROBACKUPBIN"]) and - os.access(self.test_env["PGPROBACKUPBIN"], os.X_OK) - ): + if shutil.which(self.test_env["PGPROBACKUPBIN"]): self.probackup_path = self.test_env["PGPROBACKUPBIN"] else: if self.verbose: From 4869a564d0017a6ba58c293e9273f1413f68469c Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Fri, 11 Nov 2022 22:34:46 +0300 Subject: [PATCH 376/525] [PBCKP-336] Fix segno calculation. It were lost during fork-name detection fix at eaf3b14c22 . And since there were no basic test for this, it were not detected. --- src/dir.c | 13 ++++++++++++- tests/backup.py | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 44 insertions(+), 1 deletion(-) diff --git a/src/dir.c b/src/dir.c index b55f25e18..6609b9f19 100644 --- a/src/dir.c +++ b/src/dir.c @@ -1786,6 +1786,8 @@ is_forkname(char *name, size_t *pos, const char *forkname) } #define OIDCHARS 10 +#define MAXSEGNO (((uint64_t)1<<32)/RELSEG_SIZE-1) +#define SEGNOCHARS 5 /* when BLCKSZ == (1<<15) */ /* Set forkName if possible */ bool @@ -1793,6 +1795,7 @@ set_forkname(pgFile *file) { size_t i = 0; uint64_t oid = 0; /* use 64bit to not check for overflow in a loop */ + uint64_t segno = 0; /* pretend it is not relation file */ file->relOid = 0; @@ -1823,8 +1826,15 @@ set_forkname(pgFile *file) /* /^\d+(_(vm|fsm|init|ptrack))?\.\d+$/ */ if (file->name[i] == '.' && isdigit(file->name[i+1])) { + size_t start = i+1; for (i++; isdigit(file->name[i]); i++) - ; + { + if (i == start && file->name[i] == '0') + return false; + segno = segno * 10 + file->name[i] - '0'; + } + if (i - start > SEGNOCHARS || segno > MAXSEGNO) + return false; } /* CFS "fork name" */ @@ -1843,6 +1853,7 @@ set_forkname(pgFile *file) } file->relOid = oid; + file->segno = segno; file->is_datafile = file->forkName == none; return true; } diff --git a/tests/backup.py b/tests/backup.py index 6028a3ff6..ae7852da9 100644 --- a/tests/backup.py +++ b/tests/backup.py @@ -13,6 +13,38 @@ class BackupTest(ProbackupTest, unittest.TestCase): + def test_basic_full_backup(self): + """ + Just test full backup with at least two segments + """ + fname = self.id().split('.')[3] + node = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node'), + initdb_params=['--data-checksums'], + # we need to write a lot. Lets speedup a bit. + pg_options={"fsync": "off", "synchronous_commit": "off"}) + + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # Fill with data + # Have to use scale=100 to create second segment. + node.pgbench_init(scale=100, no_vacuum=True) + + # FULL + backup_id = self.backup_node(backup_dir, 'node', node) + + out = self.validate_pb(backup_dir, 'node', backup_id) + self.assertIn( + "INFO: Backup {0} is valid".format(backup_id), + out) + + # Clean after yourself + self.del_test_dir(module_name, fname) + # @unittest.skip("skip") # @unittest.expectedFailure # PGPRO-707 From 4505def249841374797fc85679c633f7b8d1893f Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 15 Nov 2022 11:13:06 +0300 Subject: [PATCH 377/525] properly skip ptrack tests. --- tests/ptrack.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ptrack.py b/tests/ptrack.py index a01405d6a..c02aba17c 100644 --- a/tests/ptrack.py +++ b/tests/ptrack.py @@ -16,7 +16,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase): def setUp(self): if self.pg_config_version < self.version_to_num('11.0'): - return unittest.skip('You need PostgreSQL >= 11 for this test') + self.skipTest('You need PostgreSQL >= 11 for this test') self.fname = self.id().split('.')[3] # @unittest.skip("skip") From b7bd831b8d9c45b7b555bd2d2033125d033987f0 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 15 Nov 2022 11:23:49 +0300 Subject: [PATCH 378/525] correctly test skips --- tests/archive.py | 22 +++++++++++----------- tests/backup.py | 8 ++++---- tests/catchup.py | 14 +++++++------- tests/compatibility.py | 2 +- tests/delete.py | 2 +- tests/external.py | 6 +++--- tests/false_positive.py | 2 +- tests/helpers/__init__.py | 9 ++++++++- tests/incr_restore.py | 4 ++-- tests/merge.py | 2 +- tests/option.py | 2 +- tests/ptrack.py | 2 +- tests/replica.py | 34 +++++++++++++++++----------------- tests/restore.py | 18 +++++++++--------- tests/retention.py | 4 ++-- tests/time_consuming.py | 4 ++-- 16 files changed, 71 insertions(+), 64 deletions(-) diff --git a/tests/archive.py b/tests/archive.py index 81d013f6b..a65b85dba 100644 --- a/tests/archive.py +++ b/tests/archive.py @@ -86,7 +86,7 @@ def test_pgpro434_2(self): if self.get_version(node) < self.version_to_num('9.6.0'): self.del_test_dir(module_name, fname) - return unittest.skip( + self.skipTest( 'Skipped because pg_control_checkpoint() is not supported in PG 9.5') self.init_pb(backup_dir) @@ -717,7 +717,7 @@ def test_replica_archive(self): if self.get_version(master) < self.version_to_num('9.6.0'): self.del_test_dir(module_name, fname) - return unittest.skip( + self.skipTest( 'Skipped because backup from replica is not supported in PG 9.5') self.init_pb(backup_dir) @@ -848,7 +848,7 @@ def test_master_and_replica_parallel_archiving(self): if self.get_version(master) < self.version_to_num('9.6.0'): self.del_test_dir(module_name, fname) - return unittest.skip( + self.skipTest( 'Skipped because backup from replica is not supported in PG 9.5') replica = self.make_simple_node( @@ -929,7 +929,7 @@ def test_basic_master_and_replica_concurrent_archiving(self): make sure that archiving on both node is working. """ if self.pg_config_version < self.version_to_num('9.6.0'): - return unittest.skip('You need PostgreSQL >= 9.6 for this test') + self.skipTest('You need PostgreSQL >= 9.6 for this test') fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') @@ -943,7 +943,7 @@ def test_basic_master_and_replica_concurrent_archiving(self): if self.get_version(master) < self.version_to_num('9.6.0'): self.del_test_dir(module_name, fname) - return unittest.skip( + self.skipTest( 'Skipped because backup from replica is not supported in PG 9.5') replica = self.make_simple_node( @@ -1035,7 +1035,7 @@ def test_concurrent_archiving(self): """ if self.pg_config_version < self.version_to_num('11.0'): - return unittest.skip('You need PostgreSQL >= 11 for this test') + self.skipTest('You need PostgreSQL >= 11 for this test') fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') @@ -1196,7 +1196,7 @@ def test_archive_pg_receivexlog_compression_pg10(self): self.add_instance(backup_dir, 'node', node) node.slow_start() if self.get_version(node) < self.version_to_num('10.0'): - return unittest.skip('You need PostgreSQL >= 10 for this test') + self.skipTest('You need PostgreSQL >= 10 for this test') else: pg_receivexlog_path = self.get_bin_path('pg_receivewal') @@ -1278,7 +1278,7 @@ def test_archive_catalog(self): if self.get_version(master) < self.version_to_num('9.6.0'): self.del_test_dir(module_name, fname) - return unittest.skip( + self.skipTest( 'Skipped because backup from replica is not supported in PG 9.5') self.init_pb(backup_dir) @@ -1938,7 +1938,7 @@ def test_waldir_outside_pgdata_archiving(self): check that archive-push works correct with symlinked waldir """ if self.pg_config_version < self.version_to_num('10.0'): - return unittest.skip( + self.skipTest( 'Skipped because waldir outside pgdata is supported since PG 10') fname = self.id().split('.')[3] @@ -2176,7 +2176,7 @@ def test_archive_pg_receivexlog_partial_handling(self): if self.get_version(node) < self.version_to_num('9.6.0'): self.del_test_dir(module_name, fname) - return unittest.skip( + self.skipTest( 'Skipped because backup from replica is not supported in PG 9.5') self.init_pb(backup_dir) @@ -2385,7 +2385,7 @@ def test_archive_get_batching_sanity(self): if self.get_version(node) < self.version_to_num('9.6.0'): self.del_test_dir(module_name, fname) - return unittest.skip( + self.skipTest( 'Skipped because backup from replica is not supported in PG 9.5') self.init_pb(backup_dir) diff --git a/tests/backup.py b/tests/backup.py index 6028a3ff6..2b099613a 100644 --- a/tests/backup.py +++ b/tests/backup.py @@ -1428,7 +1428,7 @@ def test_basic_temp_slot_for_stream_backup(self): pg_options={'max_wal_size': '40MB'}) if self.get_version(node) < self.version_to_num('10.0'): - return unittest.skip('You need PostgreSQL >= 10 for this test') + self.skipTest('You need PostgreSQL >= 10 for this test') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -1500,7 +1500,7 @@ def test_backup_concurrent_drop_table(self): def test_pg_11_adjusted_wal_segment_size(self): """""" if self.pg_config_version < self.version_to_num('11.0'): - return unittest.skip('You need PostgreSQL >= 11 for this test') + self.skipTest('You need PostgreSQL >= 11 for this test') fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') @@ -1743,7 +1743,7 @@ def test_drop_table(self): def test_basic_missing_file_permissions(self): """""" if os.name == 'nt': - return unittest.skip('Skipped because it is POSIX only test') + self.skipTest('Skipped because it is POSIX only test') fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') @@ -1790,7 +1790,7 @@ def test_basic_missing_file_permissions(self): def test_basic_missing_dir_permissions(self): """""" if os.name == 'nt': - return unittest.skip('Skipped because it is POSIX only test') + self.skipTest('Skipped because it is POSIX only test') fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') diff --git a/tests/catchup.py b/tests/catchup.py index 7ecd84697..baef9d29f 100644 --- a/tests/catchup.py +++ b/tests/catchup.py @@ -190,7 +190,7 @@ def test_basic_ptrack_catchup(self): Test ptrack catchup """ if not self.ptrack: - return unittest.skip('Skipped because ptrack support is disabled') + self.skipTest('Skipped because ptrack support is disabled') # preparation 1: source src_pg = self.make_simple_node( @@ -336,7 +336,7 @@ def test_tli_ptrack_catchup(self): Test that we correctly follow timeline change with ptrack catchup """ if not self.ptrack: - return unittest.skip('Skipped because ptrack support is disabled') + self.skipTest('Skipped because ptrack support is disabled') # preparation 1: source src_pg = self.make_simple_node( @@ -475,7 +475,7 @@ def test_table_drop_with_ptrack(self): Test that dropped table in source will be dropped in ptrack catchup'ed instance too """ if not self.ptrack: - return unittest.skip('Skipped because ptrack support is disabled') + self.skipTest('Skipped because ptrack support is disabled') # preparation 1: source src_pg = self.make_simple_node( @@ -590,7 +590,7 @@ def test_tablefile_truncation_with_ptrack(self): Test that truncated table in source will be truncated in ptrack catchup'ed instance too """ if not self.ptrack: - return unittest.skip('Skipped because ptrack support is disabled') + self.skipTest('Skipped because ptrack support is disabled') # preparation 1: source src_pg = self.make_simple_node( @@ -655,7 +655,7 @@ def test_local_tablespace_without_mapping(self): Test that we detect absence of needed --tablespace-mapping option """ if self.remote: - return unittest.skip('Skipped because this test tests local catchup error handling') + self.skipTest('Skipped because this test tests local catchup error handling') src_pg = self.make_simple_node(base_dir = os.path.join(module_name, self.fname, 'src')) src_pg.slow_start() @@ -1035,7 +1035,7 @@ def test_unclean_ptrack_catchup(self): Test that we correctly recover uncleanly shutdowned destination """ if not self.ptrack: - return unittest.skip('Skipped because ptrack support is disabled') + self.skipTest('Skipped because ptrack support is disabled') # preparation 1: source src_pg = self.make_simple_node( @@ -1507,7 +1507,7 @@ def test_dry_run_catchup_ptrack(self): Test dry-run option for catchup in incremental ptrack mode """ if not self.ptrack: - return unittest.skip('Skipped because ptrack support is disabled') + self.skipTest('Skipped because ptrack support is disabled') # preparation 1: source src_pg = self.make_simple_node( diff --git a/tests/compatibility.py b/tests/compatibility.py index 6c2bc9204..3b913aba5 100644 --- a/tests/compatibility.py +++ b/tests/compatibility.py @@ -366,7 +366,7 @@ def test_backward_compatibility_ptrack(self): """Description in jira issue PGPRO-434""" if not self.ptrack: - return unittest.skip('Skipped because ptrack support is disabled') + self.skipTest('Skipped because ptrack support is disabled') fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') diff --git a/tests/delete.py b/tests/delete.py index 345a70284..55d08f23b 100644 --- a/tests/delete.py +++ b/tests/delete.py @@ -189,7 +189,7 @@ def test_delete_increment_page(self): def test_delete_increment_ptrack(self): """delete increment and all after him""" if not self.ptrack: - return unittest.skip('Skipped because ptrack support is disabled') + self.skipTest('Skipped because ptrack support is disabled') fname = self.id().split('.')[3] node = self.make_simple_node( diff --git a/tests/external.py b/tests/external.py index 530e7fb26..a4e3d58f4 100644 --- a/tests/external.py +++ b/tests/external.py @@ -1535,7 +1535,7 @@ def test_external_dir_is_symlink(self): but restored as directory """ if os.name == 'nt': - return unittest.skip('Skipped for Windows') + self.skipTest('Skipped for Windows') fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') @@ -1618,7 +1618,7 @@ def test_external_dir_contain_symlink_on_dir(self): but restored as directory """ if os.name == 'nt': - return unittest.skip('Skipped for Windows') + self.skipTest('Skipped for Windows') fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') @@ -1703,7 +1703,7 @@ def test_external_dir_contain_symlink_on_file(self): but restored as directory """ if os.name == 'nt': - return unittest.skip('Skipped for Windows') + self.skipTest('Skipped for Windows') fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') diff --git a/tests/false_positive.py b/tests/false_positive.py index 2ededdf12..6ffc4db10 100644 --- a/tests/false_positive.py +++ b/tests/false_positive.py @@ -114,7 +114,7 @@ def test_pg_10_waldir(self): test group access for PG >= 11 """ if self.pg_config_version < self.version_to_num('10.0'): - return unittest.skip('You need PostgreSQL >= 10 for this test') + self.skipTest('You need PostgreSQL >= 10 for this test') fname = self.id().split('.')[3] wal_dir = os.path.join( diff --git a/tests/helpers/__init__.py b/tests/helpers/__init__.py index ac64c4230..4ae3ef8c4 100644 --- a/tests/helpers/__init__.py +++ b/tests/helpers/__init__.py @@ -1,2 +1,9 @@ __all__ = ['ptrack_helpers', 'cfs_helpers', 'expected_errors'] -#from . import * \ No newline at end of file + +import unittest + +# python 2.7 compatibility +if not hasattr(unittest.TestCase, "skipTest"): + def skipTest(self, reason): + raise unittest.SkipTest(reason) + unittest.TestCase.skipTest = skipTest \ No newline at end of file diff --git a/tests/incr_restore.py b/tests/incr_restore.py index cb684a23a..5bd3711a1 100644 --- a/tests/incr_restore.py +++ b/tests/incr_restore.py @@ -1494,7 +1494,7 @@ def test_make_replica_via_incr_checksum_restore(self): if self.get_version(master) < self.version_to_num('9.6.0'): self.del_test_dir(module_name, fname) - return unittest.skip( + self.skipTest( 'Skipped because backup from replica is not supported in PG 9.5') self.init_pb(backup_dir) @@ -1567,7 +1567,7 @@ def test_make_replica_via_incr_lsn_restore(self): if self.get_version(master) < self.version_to_num('9.6.0'): self.del_test_dir(module_name, fname) - return unittest.skip( + self.skipTest( 'Skipped because backup from replica is not supported in PG 9.5') self.init_pb(backup_dir) diff --git a/tests/merge.py b/tests/merge.py index 4c374bdfb..566de549b 100644 --- a/tests/merge.py +++ b/tests/merge.py @@ -796,7 +796,7 @@ def test_merge_ptrack_truncate(self): restore last page backup and check data correctness """ if not self.ptrack: - return unittest.skip('Skipped because ptrack support is disabled') + self.skipTest('Skipped because ptrack support is disabled') fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') diff --git a/tests/option.py b/tests/option.py index 88e72ffd7..61c60a1dd 100644 --- a/tests/option.py +++ b/tests/option.py @@ -239,5 +239,5 @@ def test_help_6(self): help_out.read().decode("utf-8") ) else: - return unittest.skip( + self.skipTest( 'You need configure PostgreSQL with --enabled-nls option for this test') diff --git a/tests/ptrack.py b/tests/ptrack.py index a01405d6a..c02aba17c 100644 --- a/tests/ptrack.py +++ b/tests/ptrack.py @@ -16,7 +16,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase): def setUp(self): if self.pg_config_version < self.version_to_num('11.0'): - return unittest.skip('You need PostgreSQL >= 11 for this test') + self.skipTest('You need PostgreSQL >= 11 for this test') self.fname = self.id().split('.')[3] # @unittest.skip("skip") diff --git a/tests/replica.py b/tests/replica.py index ea69e2d01..d5c24fbc1 100644 --- a/tests/replica.py +++ b/tests/replica.py @@ -30,7 +30,7 @@ def test_replica_switchover(self): if self.get_version(node1) < self.version_to_num('9.6.0'): self.del_test_dir(module_name, fname) - return unittest.skip( + self.skipTest( 'Skipped because backup from replica is not supported in PG 9.5') self.init_pb(backup_dir) @@ -103,10 +103,10 @@ def test_replica_stream_ptrack_backup(self): take full stream backup from replica """ if not self.ptrack: - return unittest.skip('Skipped because ptrack support is disabled') + self.skipTest('Skipped because ptrack support is disabled') if self.pg_config_version > self.version_to_num('9.6.0'): - return unittest.skip( + self.skipTest( 'Skipped because backup from replica is not supported in PG 9.5') fname = self.id().split('.')[3] @@ -241,7 +241,7 @@ def test_replica_archive_page_backup(self): if self.get_version(master) < self.version_to_num('9.6.0'): self.del_test_dir(module_name, fname) - return unittest.skip( + self.skipTest( 'Skipped because backup from replica is not supported in PG 9.5') self.init_pb(backup_dir) @@ -383,7 +383,7 @@ def test_basic_make_replica_via_restore(self): if self.get_version(master) < self.version_to_num('9.6.0'): self.del_test_dir(module_name, fname) - return unittest.skip( + self.skipTest( 'Skipped because backup from replica is not supported in PG 9.5') self.init_pb(backup_dir) @@ -441,7 +441,7 @@ def test_take_backup_from_delayed_replica(self): if self.get_version(master) < self.version_to_num('9.6.0'): self.del_test_dir(module_name, fname) - return unittest.skip( + self.skipTest( 'Skipped because backup from replica is not supported in PG 9.5') self.init_pb(backup_dir) @@ -554,7 +554,7 @@ def test_replica_promote(self): if self.get_version(master) < self.version_to_num('9.6.0'): self.del_test_dir(module_name, fname) - return unittest.skip( + self.skipTest( 'Skipped because backup from replica is not supported in PG 9.5') self.init_pb(backup_dir) @@ -645,7 +645,7 @@ def test_replica_stop_lsn_null_offset(self): if self.get_version(master) < self.version_to_num('9.6.0'): self.del_test_dir(module_name, fname) - return unittest.skip( + self.skipTest( 'Skipped because backup from replica is not supported in PG 9.5') self.init_pb(backup_dir) @@ -730,7 +730,7 @@ def test_replica_stop_lsn_null_offset_next_record(self): if self.get_version(master) < self.version_to_num('9.6.0'): self.del_test_dir(module_name, fname) - return unittest.skip( + self.skipTest( 'Skipped because backup from replica is not supported in PG 9.5') self.init_pb(backup_dir) @@ -833,7 +833,7 @@ def test_archive_replica_null_offset(self): if self.get_version(master) < self.version_to_num('9.6.0'): self.del_test_dir(module_name, fname) - return unittest.skip( + self.skipTest( 'Skipped because backup from replica is not supported in PG 9.5') self.init_pb(backup_dir) @@ -917,7 +917,7 @@ def test_archive_replica_not_null_offset(self): if self.get_version(master) < self.version_to_num('9.6.0'): self.del_test_dir(module_name, fname) - return unittest.skip( + self.skipTest( 'Skipped because backup from replica is not supported in PG 9.5') self.init_pb(backup_dir) @@ -1006,7 +1006,7 @@ def test_replica_toast(self): if self.get_version(master) < self.version_to_num('9.6.0'): self.del_test_dir(module_name, fname) - return unittest.skip( + self.skipTest( 'Skipped because backup from replica is not supported in PG 9.5') self.init_pb(backup_dir) @@ -1108,7 +1108,7 @@ def test_start_stop_lsn_in_the_same_segno(self): if self.get_version(master) < self.version_to_num('9.6.0'): self.del_test_dir(module_name, fname) - return unittest.skip( + self.skipTest( 'Skipped because backup from replica is not supported in PG 9.5') self.init_pb(backup_dir) @@ -1186,7 +1186,7 @@ def test_replica_promote_1(self): if self.get_version(master) < self.version_to_num('9.6.0'): self.del_test_dir(module_name, fname) - return unittest.skip( + self.skipTest( 'Skipped because backup from replica is not supported in PG 9.5') self.init_pb(backup_dir) @@ -1313,7 +1313,7 @@ def test_replica_promote_archive_delta(self): if self.get_version(node1) < self.version_to_num('9.6.0'): self.del_test_dir(module_name, fname) - return unittest.skip( + self.skipTest( 'Skipped because backup from replica is not supported in PG 9.5') self.init_pb(backup_dir) @@ -1438,7 +1438,7 @@ def test_replica_promote_archive_page(self): if self.get_version(node1) < self.version_to_num('9.6.0'): self.del_test_dir(module_name, fname) - return unittest.skip( + self.skipTest( 'Skipped because backup from replica is not supported in PG 9.5') self.init_pb(backup_dir) @@ -1560,7 +1560,7 @@ def test_parent_choosing(self): if self.get_version(master) < self.version_to_num('9.6.0'): self.del_test_dir(module_name, fname) - return unittest.skip( + self.skipTest( 'Skipped because backup from replica is not supported in PG 9.5') self.init_pb(backup_dir) diff --git a/tests/restore.py b/tests/restore.py index 49538bd1f..e99f1158e 100644 --- a/tests/restore.py +++ b/tests/restore.py @@ -500,7 +500,7 @@ def test_restore_to_lsn_not_inclusive(self): def test_restore_full_ptrack_archive(self): """recovery to latest from archive full+ptrack backups""" if not self.ptrack: - return unittest.skip('Skipped because ptrack support is disabled') + self.skipTest('Skipped because ptrack support is disabled') fname = self.id().split('.')[3] node = self.make_simple_node( @@ -554,7 +554,7 @@ def test_restore_full_ptrack_archive(self): def test_restore_ptrack(self): """recovery to latest from archive full+ptrack+ptrack backups""" if not self.ptrack: - return unittest.skip('Skipped because ptrack support is disabled') + self.skipTest('Skipped because ptrack support is disabled') fname = self.id().split('.')[3] node = self.make_simple_node( @@ -615,7 +615,7 @@ def test_restore_ptrack(self): def test_restore_full_ptrack_stream(self): """recovery in stream mode to latest from full + ptrack backups""" if not self.ptrack: - return unittest.skip('Skipped because ptrack support is disabled') + self.skipTest('Skipped because ptrack support is disabled') fname = self.id().split('.')[3] node = self.make_simple_node( @@ -673,7 +673,7 @@ def test_restore_full_ptrack_under_load(self): with loads when ptrack backup do """ if not self.ptrack: - return unittest.skip('Skipped because ptrack support is disabled') + self.skipTest('Skipped because ptrack support is disabled') fname = self.id().split('.')[3] node = self.make_simple_node( @@ -742,7 +742,7 @@ def test_restore_full_under_load_ptrack(self): with loads when full backup do """ if not self.ptrack: - return unittest.skip('Skipped because ptrack support is disabled') + self.skipTest('Skipped because ptrack support is disabled') fname = self.id().split('.')[3] node = self.make_simple_node( @@ -2345,7 +2345,7 @@ def test_pg_11_group_access(self): test group access for PG >= 11 """ if self.pg_config_version < self.version_to_num('11.0'): - return unittest.skip('You need PostgreSQL >= 11 for this test') + self.skipTest('You need PostgreSQL >= 11 for this test') fname = self.id().split('.')[3] node = self.make_simple_node( @@ -3680,7 +3680,7 @@ def test_pg_12_probackup_recovery_conf_compatibility(self): self.skipTest("You must specify PGPROBACKUPBIN_OLD" " for run this test") if self.pg_config_version < self.version_to_num('12.0'): - return unittest.skip('You need PostgreSQL >= 12 for this test') + self.skipTest('You need PostgreSQL >= 12 for this test') if self.version_to_num(self.old_probackup_version) >= self.version_to_num('2.4.5'): self.assertTrue(False, 'You need pg_probackup < 2.4.5 for this test') @@ -3752,7 +3752,7 @@ def test_drop_postgresql_auto_conf(self): """ if self.pg_config_version < self.version_to_num('12.0'): - return unittest.skip('You need PostgreSQL >= 12 for this test') + self.skipTest('You need PostgreSQL >= 12 for this test') fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') @@ -3797,7 +3797,7 @@ def test_truncate_postgresql_auto_conf(self): """ if self.pg_config_version < self.version_to_num('12.0'): - return unittest.skip('You need PostgreSQL >= 12 for this test') + self.skipTest('You need PostgreSQL >= 12 for this test') fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') diff --git a/tests/retention.py b/tests/retention.py index 122ab28ad..4f95ad5e2 100644 --- a/tests/retention.py +++ b/tests/retention.py @@ -1603,7 +1603,7 @@ def test_retention_redundancy_overlapping_chains(self): if self.get_version(node) < 90600: self.del_test_dir(module_name, fname) - return unittest.skip('Skipped because ptrack support is disabled') + self.skipTest('Skipped because ptrack support is disabled') backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') self.init_pb(backup_dir) @@ -1653,7 +1653,7 @@ def test_retention_redundancy_overlapping_chains_1(self): if self.get_version(node) < 90600: self.del_test_dir(module_name, fname) - return unittest.skip('Skipped because ptrack support is disabled') + self.skipTest('Skipped because ptrack support is disabled') backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') self.init_pb(backup_dir) diff --git a/tests/time_consuming.py b/tests/time_consuming.py index c778b9bc3..8908dfd34 100644 --- a/tests/time_consuming.py +++ b/tests/time_consuming.py @@ -16,9 +16,9 @@ def test_pbckp150(self): """ # init node if self.pg_config_version < self.version_to_num('11.0'): - return unittest.skip('You need PostgreSQL >= 11 for this test') + self.skipTest('You need PostgreSQL >= 11 for this test') if not self.ptrack: - return unittest.skip('Skipped because ptrack support is disabled') + self.skipTest('Skipped because ptrack support is disabled') fname = self.id().split('.')[3] node = self.make_simple_node( From 348283b3074e286e779e221d2cb044e616898bcd Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 15 Nov 2022 13:43:09 +0300 Subject: [PATCH 379/525] update version to 2.5.10 --- src/pg_probackup.h | 2 +- tests/expected/option_version.out | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 6aeba189e..44b33d16f 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -345,7 +345,7 @@ typedef enum ShowFormat #define BYTES_INVALID (-1) /* file didn`t changed since previous backup, DELTA backup do not rely on it */ #define FILE_NOT_FOUND (-2) /* file disappeared during backup */ #define BLOCKNUM_INVALID (-1) -#define PROGRAM_VERSION "2.5.9" +#define PROGRAM_VERSION "2.5.10" /* update when remote agent API or behaviour changes */ #define AGENT_PROTOCOL_VERSION 20509 diff --git a/tests/expected/option_version.out b/tests/expected/option_version.out index 7c9fcbfe0..8abfe7fdd 100644 --- a/tests/expected/option_version.out +++ b/tests/expected/option_version.out @@ -1 +1 @@ -pg_probackup 2.5.9 +pg_probackup 2.5.10 From 9924ab0142f9a3192104eff143fd87613e1648dd Mon Sep 17 00:00:00 2001 From: Ivan Lazarev Date: Tue, 15 Nov 2022 17:45:28 +0300 Subject: [PATCH 380/525] [PBCKP-304] extended testgres.PosgresNode to filter excessive close() and its log entries calls --- tests/helpers/ptrack_helpers.py | 36 +++++++++++++++++++++++++-------- 1 file changed, 28 insertions(+), 8 deletions(-) diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index 9e2a10c24..bd65f7962 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -140,7 +140,7 @@ def __str__(self): return '\n ERROR: {0}\n CMD: {1}'.format(repr(self.message), self.cmd) -def slow_start(self, replica=False): +def _slow_start(self, replica=False): # wait for https://github.com/postgrespro/testgres/pull/50 # self.start() @@ -174,7 +174,7 @@ def slow_start(self, replica=False): sleep(0.5) -def is_test_result_ok(test_case): +def _is_test_result_ok(test_case): # sources of solution: # 1. python versions 2.7 - 3.10, verified on 3.10, 3.7, 2.7, taken from: # https://tousu.in/qa/?qa=555402/unit-testing-getting-pythons-unittest-results-in-a-teardown-method&show=555403#a555403 @@ -197,6 +197,28 @@ def is_test_result_ok(test_case): return ok +class PostgresNodeExtended(testgres.PostgresNode): + + def __init__(self, base_dir=None, *args, **kwargs): + super(PostgresNodeExtended, self).__init__(name='test', base_dir=base_dir, *args, **kwargs) + self.is_started = False + + def slow_start(self, replica=False): + _slow_start(self, replica=replica) + + def start(self, *args, **kwargs): + if not self.is_started: + super(PostgresNodeExtended, self).start(*args, **kwargs) + self.is_started = True + return self + + def stop(self, *args, **kwargs): + if self.is_started: + result = super(PostgresNodeExtended, self).stop(*args, **kwargs) + self.is_started = False + return result + + class ProbackupTest(object): # Class attributes enterprise = is_enterprise() @@ -375,7 +397,7 @@ def __init__(self, *args, **kwargs): os.environ["PGAPPNAME"] = "pg_probackup" def tearDown(self): - if is_test_result_ok(self): + if _is_test_result_ok(self): for node in self.nodes_to_cleanup: node.cleanup() self.del_test_dir(self.module_name, self.fname) @@ -418,10 +440,10 @@ def make_empty_node( shutil.rmtree(real_base_dir, ignore_errors=True) os.makedirs(real_base_dir) - node = testgres.get_new_node('test', base_dir=real_base_dir) - # bound method slow_start() to 'node' class instance - node.slow_start = slow_start.__get__(node) + node = PostgresNodeExtended(base_dir=real_base_dir) node.should_rm_dirs = True + self.nodes_to_cleanup.append(node) + return node def make_simple_node( @@ -485,8 +507,6 @@ def make_simple_node( self.set_auto_conf( node, {}, 'postgresql.conf', ['wal_keep_segments']) - self.nodes_to_cleanup.append(node) - return node def simple_bootstrap(self, node, role) -> None: From e3189e425db7917dbdde862787085b64afb30183 Mon Sep 17 00:00:00 2001 From: Ivan Lazarev Date: Tue, 15 Nov 2022 17:56:40 +0300 Subject: [PATCH 381/525] [PBCKP-304] missed self.module_name & self.fname fixes --- tests/cfs_catchup.py | 14 ++------------ tests/cfs_validate_backup.py | 1 - tests/delta.py | 2 +- tests/locking.py | 2 +- tests/page.py | 2 +- tests/ptrack.py | 2 +- tests/validate.py | 3 +-- 7 files changed, 7 insertions(+), 19 deletions(-) diff --git a/tests/cfs_catchup.py b/tests/cfs_catchup.py index 2cbb46729..43c3f18f1 100644 --- a/tests/cfs_catchup.py +++ b/tests/cfs_catchup.py @@ -6,13 +6,8 @@ from .helpers.cfs_helpers import find_by_extensions, find_by_name, find_by_pattern, corrupt_file from .helpers.ptrack_helpers import ProbackupTest, ProbackupException -module_name = 'cfs_catchup' -tblspace_name = 'cfs_tblspace' - class CfsCatchupNoEncTest(ProbackupTest, unittest.TestCase): - def setUp(self): - self.fname = self.id().split('.')[3] @unittest.skipUnless(ProbackupTest.enterprise, 'skip') def test_full_catchup_with_tablespace(self): @@ -21,7 +16,7 @@ def test_full_catchup_with_tablespace(self): """ # preparation src_pg = self.make_simple_node( - base_dir = os.path.join(module_name, self.fname, 'src'), + base_dir = os.path.join(self.module_name, self.fname, 'src'), set_replication = True ) src_pg.slow_start() @@ -36,7 +31,7 @@ def test_full_catchup_with_tablespace(self): "CHECKPOINT") # do full catchup with tablespace mapping - dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) tblspace1_new_path = self.get_tblspace_path(dst_pg, 'tblspace1_new') self.catchup_node( backup_mode = 'FULL', @@ -120,8 +115,3 @@ def test_full_catchup_with_tablespace(self): src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy') - - # Cleanup - src_pg.stop() - dst_pg.stop() - self.del_test_dir(module_name, self.fname) diff --git a/tests/cfs_validate_backup.py b/tests/cfs_validate_backup.py index eea6f0e21..343020dfc 100644 --- a/tests/cfs_validate_backup.py +++ b/tests/cfs_validate_backup.py @@ -5,7 +5,6 @@ from .helpers.cfs_helpers import find_by_extensions, find_by_name, find_by_pattern, corrupt_file from .helpers.ptrack_helpers import ProbackupTest, ProbackupException -module_name = 'cfs_validate_backup' tblspace_name = 'cfs_tblspace' diff --git a/tests/delta.py b/tests/delta.py index 386403151..23583fd93 100644 --- a/tests/delta.py +++ b/tests/delta.py @@ -1191,7 +1191,7 @@ def test_delta_pg_resetxlog(self): # pgdata = self.pgdata_content(node.data_dir) # # node_restored = self.make_simple_node( -# base_dir=os.path.join(module_name, fname, 'node_restored')) +# base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) # node_restored.cleanup() # # self.restore_node( diff --git a/tests/locking.py b/tests/locking.py index 8531d7de5..5367c2610 100644 --- a/tests/locking.py +++ b/tests/locking.py @@ -579,7 +579,7 @@ def test_shared_lock(self): self._check_gdb_flag_or_skip_test() node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.name, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') diff --git a/tests/page.py b/tests/page.py index b9398ec7a..e77e5c827 100644 --- a/tests/page.py +++ b/tests/page.py @@ -1414,7 +1414,7 @@ def test_page_pg_resetxlog(self): # pgdata = self.pgdata_content(node.data_dir) # # node_restored = self.make_simple_node( -# base_dir=os.path.join(module_name, fname, 'node_restored')) +# base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) # node_restored.cleanup() # # self.restore_node( diff --git a/tests/ptrack.py b/tests/ptrack.py index 74db2e554..b5eb95baf 100644 --- a/tests/ptrack.py +++ b/tests/ptrack.py @@ -4172,7 +4172,7 @@ def test_ptrack_pg_resetxlog(self): # pgdata = self.pgdata_content(node.data_dir) # # node_restored = self.make_simple_node( -# base_dir=os.path.join(module_name, self.fname, 'node_restored')) +# base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) # node_restored.cleanup() # # self.restore_node( diff --git a/tests/validate.py b/tests/validate.py index 5c3e31fe3..98a0fd13f 100644 --- a/tests/validate.py +++ b/tests/validate.py @@ -2789,13 +2789,12 @@ def test_validate_specific_backup_with_missing_backup(self): PAGE1_1 FULL1 """ - fname = self.id().split('.')[3] node = self.make_simple_node( base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, self.module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) From af4fb2e8c6bf0c5f18fa862b7eb09e2013752ae0 Mon Sep 17 00:00:00 2001 From: Ivan Lazarev Date: Wed, 16 Nov 2022 02:00:05 +0300 Subject: [PATCH 382/525] [PBCKP-304] moved _is_result_is_ok() into ProbackupTest class scope --- tests/helpers/ptrack_helpers.py | 50 +++++++++++++++++---------------- 1 file changed, 26 insertions(+), 24 deletions(-) diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index bd65f7962..be5d1fcb4 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -174,29 +174,6 @@ def _slow_start(self, replica=False): sleep(0.5) -def _is_test_result_ok(test_case): - # sources of solution: - # 1. python versions 2.7 - 3.10, verified on 3.10, 3.7, 2.7, taken from: - # https://tousu.in/qa/?qa=555402/unit-testing-getting-pythons-unittest-results-in-a-teardown-method&show=555403#a555403 - # - # 2. python versions 3.11+ mixin, verified on 3.11, taken from: https://stackoverflow.com/a/39606065 - - if hasattr(test_case, '_outcome'): # Python 3.4+ - if hasattr(test_case._outcome, 'errors'): - # Python 3.4 - 3.10 (These two methods have no side effects) - result = test_case.defaultTestResult() # These two methods have no side effects - test_case._feedErrorsToResult(result, test_case._outcome.errors) - else: - # Python 3.11+ - result = test_case._outcome.result - else: # Python 2.7, 3.0-3.3 - result = getattr(test_case, '_outcomeForDoCleanups', test_case._resultForDoCleanups) - - ok = all(test != test_case for test, text in result.errors + result.failures) - - return ok - - class PostgresNodeExtended(testgres.PostgresNode): def __init__(self, base_dir=None, *args, **kwargs): @@ -396,8 +373,33 @@ def __init__(self, *args, **kwargs): os.environ["PGAPPNAME"] = "pg_probackup" + def __is_test_result_ok(test_case): + # sources of solution: + # 1. python versions 2.7 - 3.10, verified on 3.10, 3.7, 2.7, taken from: + # https://tousu.in/qa/?qa=555402/unit-testing-getting-pythons-unittest-results-in-a-teardown-method&show=555403#a555403 + # + # 2. python versions 3.11+ mixin, verified on 3.11, taken from: https://stackoverflow.com/a/39606065 + + if not isinstance(test_case, unittest.TestCase): + raise AssertionError("test_case is not instance of unittest.TestCase") + + if hasattr(test_case, '_outcome'): # Python 3.4+ + if hasattr(test_case._outcome, 'errors'): + # Python 3.4 - 3.10 (These two methods have no side effects) + result = test_case.defaultTestResult() # These two methods have no side effects + test_case._feedErrorsToResult(result, test_case._outcome.errors) + else: + # Python 3.11+ + result = test_case._outcome.result + else: # Python 2.7, 3.0-3.3 + result = getattr(test_case, '_outcomeForDoCleanups', test_case._resultForDoCleanups) + + ok = all(test != test_case for test, text in result.errors + result.failures) + + return ok + def tearDown(self): - if _is_test_result_ok(self): + if self.__is_test_result_ok(): for node in self.nodes_to_cleanup: node.cleanup() self.del_test_dir(self.module_name, self.fname) From 8c670f0f9b3e1358976e52e67518d18f305bd973 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Wed, 16 Nov 2022 13:43:20 +0300 Subject: [PATCH 383/525] move test_basic_full_backup => test_full_backup run it in separate travis action + add same test with stream replication --- .travis.yml | 3 ++- tests/backup.py | 34 +++++++++++++++++++++++++++++++++- 2 files changed, 35 insertions(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index bd3c8a09a..8315f7842 100644 --- a/.travis.yml +++ b/.travis.yml @@ -35,7 +35,8 @@ env: - PG_VERSION=10 PG_BRANCH=REL_10_STABLE - PG_VERSION=9.6 PG_BRANCH=REL9_6_STABLE - PG_VERSION=9.5 PG_BRANCH=REL9_5_STABLE -# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=archive + - PG_VERSION=15 PG_BRANCH=REL_15_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=backup.BackupTest.test_full_backup + - PG_VERSION=15 PG_BRANCH=REL_15_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=backup.BackupTest.test_full_backup_stream # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=backup # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=catchup # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=checkdb diff --git a/tests/backup.py b/tests/backup.py index ae7852da9..085476f6d 100644 --- a/tests/backup.py +++ b/tests/backup.py @@ -13,7 +13,7 @@ class BackupTest(ProbackupTest, unittest.TestCase): - def test_basic_full_backup(self): + def test_full_backup(self): """ Just test full backup with at least two segments """ @@ -45,6 +45,38 @@ def test_basic_full_backup(self): # Clean after yourself self.del_test_dir(module_name, fname) + def test_full_backup_stream(self): + """ + Just test full backup with at least two segments in stream mode + """ + fname = self.id().split('.')[3] + node = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node'), + initdb_params=['--data-checksums'], + # we need to write a lot. Lets speedup a bit. + pg_options={"fsync": "off", "synchronous_commit": "off"}) + + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + # Fill with data + # Have to use scale=100 to create second segment. + node.pgbench_init(scale=100, no_vacuum=True) + + # FULL + backup_id = self.backup_node(backup_dir, 'node', node, + options=["--stream"]) + + out = self.validate_pb(backup_dir, 'node', backup_id) + self.assertIn( + "INFO: Backup {0} is valid".format(backup_id), + out) + + # Clean after yourself + self.del_test_dir(module_name, fname) + # @unittest.skip("skip") # @unittest.expectedFailure # PGPRO-707 From 4c823b39303696e32d620b5854018b1ff282d576 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Wed, 16 Nov 2022 16:03:52 +0300 Subject: [PATCH 384/525] ... --- tests/helpers/ptrack_helpers.py | 71 +++++++++++++++------------------ 1 file changed, 33 insertions(+), 38 deletions(-) diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index bd4f20946..e35f57bce 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -139,41 +139,6 @@ def __init__(self, message, cmd): def __str__(self): return '\n ERROR: {0}\n CMD: {1}'.format(repr(self.message), self.cmd) - -def _slow_start(self, replica=False): - - # wait for https://github.com/postgrespro/testgres/pull/50 -# self.start() -# self.poll_query_until( -# "postgres", -# "SELECT not pg_is_in_recovery()", -# suppress={testgres.NodeConnection}) - if replica: - query = 'SELECT pg_is_in_recovery()' - else: - query = 'SELECT not pg_is_in_recovery()' - - self.start() - while True: - try: - output = self.safe_psql('template1', query).decode("utf-8").rstrip() - - if output == 't': - break - - except testgres.QueryException as e: - if 'database system is starting up' in e.message: - pass - elif 'FATAL: the database system is not accepting connections' in e.message: - pass - elif replica and 'Hot standby mode is disabled' in e.message: - raise e - else: - raise e - - sleep(0.5) - - class PostgresNodeExtended(testgres.PostgresNode): def __init__(self, base_dir=None, *args, **kwargs): @@ -181,7 +146,37 @@ def __init__(self, base_dir=None, *args, **kwargs): self.is_started = False def slow_start(self, replica=False): - _slow_start(self, replica=replica) + + # wait for https://github.com/postgrespro/testgres/pull/50 + # self.start() + # self.poll_query_until( + # "postgres", + # "SELECT not pg_is_in_recovery()", + # suppress={testgres.NodeConnection}) + if replica: + query = 'SELECT pg_is_in_recovery()' + else: + query = 'SELECT not pg_is_in_recovery()' + + self.start() + while True: + try: + output = self.safe_psql('template1', query).decode("utf-8").rstrip() + + if output == 't': + break + + except testgres.QueryException as e: + if 'database system is starting up' in e.message: + pass + elif 'FATAL: the database system is not accepting connections' in e.message: + pass + elif replica and 'Hot standby mode is disabled' in e.message: + raise e + else: + raise e + + sleep(0.5) def start(self, *args, **kwargs): if not self.is_started: @@ -370,7 +365,7 @@ def __init__(self, *args, **kwargs): os.environ["PGAPPNAME"] = "pg_probackup" - def __is_test_result_ok(test_case): + def is_test_result_ok(test_case): # sources of solution: # 1. python versions 2.7 - 3.10, verified on 3.10, 3.7, 2.7, taken from: # https://tousu.in/qa/?qa=555402/unit-testing-getting-pythons-unittest-results-in-a-teardown-method&show=555403#a555403 @@ -396,7 +391,7 @@ def __is_test_result_ok(test_case): return ok def tearDown(self): - if self.__is_test_result_ok(): + if self.is_test_result_ok(): for node in self.nodes_to_cleanup: node.cleanup() self.del_test_dir(self.module_name, self.fname) From 97f3e70edb3a8adbfce75a86593e1a453668a198 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 17 Nov 2022 12:55:15 +0300 Subject: [PATCH 385/525] asyncio.sleep -> time.sleep --- tests/false_positive.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/false_positive.py b/tests/false_positive.py index 09dc323a8..fbb785c60 100644 --- a/tests/false_positive.py +++ b/tests/false_positive.py @@ -1,6 +1,6 @@ import unittest import os -from asyncio import sleep +from time import sleep from .helpers.ptrack_helpers import ProbackupTest, ProbackupException from datetime import datetime, timedelta From 8f20e7eb5899c96063b39eca6cd14e0570b8c1db Mon Sep 17 00:00:00 2001 From: Victor Spirin Date: Thu, 17 Nov 2022 18:50:17 +0300 Subject: [PATCH 386/525] Fixed link in README.md file [ci skip] --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index bae1171cb..c5b01ced2 100644 --- a/README.md +++ b/README.md @@ -66,7 +66,7 @@ For detailed release plans check [Milestones](https://github.com/postgrespro/pg_ ## Installation and Setup ### Windows Installation -Installers are available in release **assets**. [Latests](https://github.com/postgrespro/pg_probackup/releases/2.4.15). +Installers are available in release **assets**. [Latests](https://github.com/postgrespro/pg_probackup/releases/latest). ### Linux Installation #### pg_probackup for vanilla PostgreSQL From eed28135b91e790956eab9021fe65bb48989e8e3 Mon Sep 17 00:00:00 2001 From: Ivan Lazarev Date: Wed, 23 Nov 2022 13:47:26 +0300 Subject: [PATCH 387/525] hotfix: cfs_restore.py decorated to postgres enterprise only --- tests/cfs_restore.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tests/cfs_restore.py b/tests/cfs_restore.py index 660cef9c6..6b69b4ffe 100644 --- a/tests/cfs_restore.py +++ b/tests/cfs_restore.py @@ -20,6 +20,7 @@ class CfsRestoreBase(ProbackupTest, unittest.TestCase): + @unittest.skipUnless(ProbackupTest.enterprise, 'skip') def setUp(self): self.backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') @@ -60,6 +61,7 @@ def add_data_in_cluster(self): class CfsRestoreNoencEmptyTablespaceTest(CfsRestoreBase): # @unittest.expectedFailure # @unittest.skip("skip") + @unittest.skipUnless(ProbackupTest.enterprise, 'skip') def test_restore_empty_tablespace_from_fullbackup(self): """ Case: Restore empty tablespace from valid full backup. @@ -118,6 +120,7 @@ def add_data_in_cluster(self): # --- Restore from full backup ---# # @unittest.expectedFailure # @unittest.skip("skip") + @unittest.skipUnless(ProbackupTest.enterprise, 'skip') def test_restore_from_fullbackup_to_old_location(self): """ Case: Restore instance from valid full backup to old location. @@ -157,6 +160,7 @@ def test_restore_from_fullbackup_to_old_location(self): # @unittest.expectedFailure # @unittest.skip("skip") + @unittest.skipUnless(ProbackupTest.enterprise, 'skip') def test_restore_from_fullbackup_to_old_location_3_jobs(self): """ Case: Restore instance from valid full backup to old location. @@ -195,6 +199,7 @@ def test_restore_from_fullbackup_to_old_location_3_jobs(self): # @unittest.expectedFailure # @unittest.skip("skip") + @unittest.skipUnless(ProbackupTest.enterprise, 'skip') def test_restore_from_fullbackup_to_new_location(self): """ Case: Restore instance from valid full backup to new location. @@ -238,6 +243,7 @@ def test_restore_from_fullbackup_to_new_location(self): # @unittest.expectedFailure # @unittest.skip("skip") + @unittest.skipUnless(ProbackupTest.enterprise, 'skip') def test_restore_from_fullbackup_to_new_location_5_jobs(self): """ Case: Restore instance from valid full backup to new location. @@ -281,6 +287,7 @@ def test_restore_from_fullbackup_to_new_location_5_jobs(self): # @unittest.expectedFailure # @unittest.skip("skip") + @unittest.skipUnless(ProbackupTest.enterprise, 'skip') def test_restore_from_fullbackup_to_old_location_tablespace_new_location(self): self.node.stop() self.node.cleanup() @@ -327,6 +334,7 @@ def test_restore_from_fullbackup_to_old_location_tablespace_new_location(self): # @unittest.expectedFailure # @unittest.skip("skip") + @unittest.skipUnless(ProbackupTest.enterprise, 'skip') def test_restore_from_fullbackup_to_old_location_tablespace_new_location_3_jobs(self): self.node.stop() self.node.cleanup() From 22e6c408fe99d05874f49d6a8093157420dcd1ac Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Wed, 23 Nov 2022 16:51:31 +0300 Subject: [PATCH 388/525] base36enc: abuse C99 compound literals lifetime. C99 introduced compound literals (`(char[14]){0}` - literal of array). Compound literals have same lifetime as block local variable, ie till the end of block. There for it is save to initiate several of them in same block and assume they are all live. This way we may rewrite base36enc into macros which uses compound literal instead of static variable to extend its result lifetime. --- src/backup.c | 2 +- src/catalog.c | 11 ++--------- src/delete.c | 18 ++++++------------ src/merge.c | 29 ++++++++--------------------- src/pg_probackup.h | 6 ++++-- src/restore.c | 15 +++++++-------- src/util.c | 33 +++++++++------------------------ src/utils/pgut.h | 6 ++++++ src/validate.c | 25 ++++++++++++------------- 9 files changed, 55 insertions(+), 90 deletions(-) diff --git a/src/backup.c b/src/backup.c index c73ee56c7..639dda63d 100644 --- a/src/backup.c +++ b/src/backup.c @@ -735,7 +735,7 @@ do_backup(InstanceState *instanceState, pgSetBackupParams *set_backup_params, /* don't care about freeing base36enc_dup memory, we exit anyway */ elog(ERROR, "Can't assign backup_id from requested start_time (%s), " "this time must be later that backup %s", - base36enc_dup(start_time), base36enc_dup(latest_backup_id)); + base36enc(start_time), base36enc(latest_backup_id)); current.backup_id = start_time; pgBackupInitDir(¤t, instanceState->instance_backup_subdir_path); diff --git a/src/catalog.c b/src/catalog.c index 488d7349f..7ad62b5d1 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -1219,7 +1219,6 @@ catalog_get_last_data_backup(parray *backup_list, TimeLineID tli, time_t current int i; pgBackup *full_backup = NULL; pgBackup *tmp_backup = NULL; - char *invalid_backup_id; /* backup_list is sorted in order of descending ID */ for (i = 0; i < parray_num(backup_list); i++) @@ -1255,20 +1254,14 @@ catalog_get_last_data_backup(parray *backup_list, TimeLineID tli, time_t current { /* broken chain */ case ChainIsBroken: - invalid_backup_id = base36enc_dup(tmp_backup->parent_backup); - elog(WARNING, "Backup %s has missing parent: %s. Cannot be a parent", - base36enc(backup->start_time), invalid_backup_id); - pg_free(invalid_backup_id); + base36enc(backup->start_time), base36enc(tmp_backup->parent_backup)); continue; /* chain is intact, but at least one parent is invalid */ case ChainIsInvalid: - invalid_backup_id = base36enc_dup(tmp_backup->start_time); - elog(WARNING, "Backup %s has invalid parent: %s. Cannot be a parent", - base36enc(backup->start_time), invalid_backup_id); - pg_free(invalid_backup_id); + base36enc(backup->start_time), base36enc(tmp_backup->start_time)); continue; /* chain is ok */ diff --git a/src/delete.c b/src/delete.c index b86ed43e6..a1ab81331 100644 --- a/src/delete.c +++ b/src/delete.c @@ -451,7 +451,6 @@ do_retention_merge(InstanceState *instanceState, parray *backup_list, /* Merging happens here */ for (i = 0; i < parray_num(to_keep_list); i++) { - char *keep_backup_id = NULL; pgBackup *full_backup = NULL; parray *merge_list = NULL; @@ -488,10 +487,9 @@ do_retention_merge(InstanceState *instanceState, parray *backup_list, * backups from purge_list. */ - keep_backup_id = base36enc_dup(keep_backup->start_time); elog(INFO, "Merge incremental chain between full backup %s and backup %s", - base36enc(full_backup->start_time), keep_backup_id); - pg_free(keep_backup_id); + base36enc(full_backup->start_time), + base36enc(keep_backup->start_time)); merge_list = parray_new(); @@ -599,8 +597,6 @@ do_retention_purge(parray *to_keep_list, parray *to_purge_list) */ for (i = 0; i < parray_num(to_keep_list); i++) { - char *keeped_backup_id; - pgBackup *keep_backup = (pgBackup *) parray_get(to_keep_list, i); /* item could have been nullified in merge */ @@ -611,10 +607,9 @@ do_retention_purge(parray *to_keep_list, parray *to_purge_list) if (keep_backup->backup_mode == BACKUP_MODE_FULL) continue; - keeped_backup_id = base36enc_dup(keep_backup->start_time); - elog(LOG, "Check if backup %s is parent of backup %s", - base36enc(delete_backup->start_time), keeped_backup_id); + base36enc(delete_backup->start_time), + base36enc(keep_backup->start_time)); if (is_parent(delete_backup->start_time, keep_backup, true)) { @@ -622,13 +617,12 @@ do_retention_purge(parray *to_keep_list, parray *to_purge_list) /* We must not delete this backup, evict it from purge list */ elog(LOG, "Retain backup %s because his " "descendant %s is guarded by retention", - base36enc(delete_backup->start_time), keeped_backup_id); + base36enc(delete_backup->start_time), + base36enc(keep_backup->start_time)); purge = false; - pg_free(keeped_backup_id); break; } - pg_free(keeped_backup_id); } /* Retain backup */ diff --git a/src/merge.c b/src/merge.c index 79498f48c..ac1e97e71 100644 --- a/src/merge.c +++ b/src/merge.c @@ -223,11 +223,9 @@ do_merge(InstanceState *instanceState, time_t backup_id, bool no_validate, bool } if (!dest_backup) { - char *tmp_backup_id = base36enc_dup(full_backup->start_time); elog(ERROR, "Full backup %s has unfinished merge with missing backup %s", - tmp_backup_id, + base36enc(full_backup->start_time), base36enc(full_backup->merge_dest_backup)); - pg_free(tmp_backup_id); } } else if (full_backup->status == BACKUP_STATUS_MERGED) @@ -253,11 +251,9 @@ do_merge(InstanceState *instanceState, time_t backup_id, bool no_validate, bool } if (!dest_backup) { - char *tmp_backup_id = base36enc_dup(full_backup->start_time); elog(WARNING, "Full backup %s has unfinished merge with missing backup %s", - tmp_backup_id, + base36enc(full_backup->start_time), base36enc(full_backup->merge_dest_backup)); - pg_free(tmp_backup_id); } } else @@ -344,10 +340,9 @@ do_merge(InstanceState *instanceState, time_t backup_id, bool no_validate, bool full_backup->status == BACKUP_STATUS_MERGED) && dest_backup->start_time != full_backup->merge_dest_backup) { - char *tmp_backup_id = base36enc_dup(full_backup->start_time); elog(ERROR, "Full backup %s has unfinished merge with backup %s", - tmp_backup_id, base36enc(full_backup->merge_dest_backup)); - pg_free(tmp_backup_id); + base36enc(full_backup->start_time), + base36enc(full_backup->merge_dest_backup)); } } @@ -441,7 +436,6 @@ merge_chain(InstanceState *instanceState, bool no_validate, bool no_sync) { int i; - char *dest_backup_id; char full_external_prefix[MAXPGPATH]; char full_database_dir[MAXPGPATH]; parray *full_externals = NULL, @@ -487,17 +481,11 @@ merge_chain(InstanceState *instanceState, if (full_backup->merge_dest_backup != INVALID_BACKUP_ID && full_backup->merge_dest_backup != dest_backup->start_time) { - char *merge_dest_backup_current = base36enc_dup(dest_backup->start_time); - char *merge_dest_backup = base36enc_dup(full_backup->merge_dest_backup); - elog(ERROR, "Cannot run merge for %s, because full backup %s has " "unfinished merge with backup %s", - merge_dest_backup_current, + base36enc(dest_backup->start_time), base36enc(full_backup->start_time), - merge_dest_backup); - - pg_free(merge_dest_backup_current); - pg_free(merge_dest_backup); + base36enc(full_backup->merge_dest_backup)); } /* @@ -880,9 +868,9 @@ merge_chain(InstanceState *instanceState, /* * Merging finished, now we can safely update ID of the FULL backup */ - dest_backup_id = base36enc_dup(full_backup->merge_dest_backup); elog(INFO, "Rename merged full backup %s to %s", - base36enc(full_backup->start_time), dest_backup_id); + base36enc(full_backup->start_time), + base36enc(full_backup->merge_dest_backup)); full_backup->status = BACKUP_STATUS_OK; full_backup->start_time = full_backup->merge_dest_backup; @@ -891,7 +879,6 @@ merge_chain(InstanceState *instanceState, /* Critical section end */ /* Cleanup */ - pg_free(dest_backup_id); if (threads) { pfree(threads_args); diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 44b33d16f..8a12cc488 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -1192,8 +1192,10 @@ extern void time2iso(char *buf, size_t len, time_t time, bool utc); extern const char *status2str(BackupStatus status); const char *status2str_color(BackupStatus status); extern BackupStatus str2status(const char *status); -extern const char *base36enc(long unsigned int value); -extern char *base36enc_dup(long unsigned int value); +#define base36bufsize 14 +extern const char *base36enc_to(long unsigned int value, char buf[ARG_SIZE_HINT base36bufsize]); +/* Abuse C99 Compound Literal's lifetime */ +#define base36enc(value) (base36enc_to((value), (char[base36bufsize]){0})) extern long unsigned int base36dec(const char *text); extern uint32 parse_server_version(const char *server_version_str); extern uint32 parse_program_version(const char *program_version); diff --git a/src/restore.c b/src/restore.c index c877290b1..6bd2ad3b4 100644 --- a/src/restore.c +++ b/src/restore.c @@ -76,11 +76,11 @@ static void set_orphan_status(parray *backups, pgBackup *parent_backup) { /* chain is intact, but at least one parent is invalid */ - char *parent_backup_id; + const char *parent_backup_id; int j; /* parent_backup_id is a human-readable backup ID */ - parent_backup_id = base36enc_dup(parent_backup->start_time); + parent_backup_id = base36enc(parent_backup->start_time); for (j = 0; j < parray_num(backups); j++) { @@ -108,7 +108,6 @@ set_orphan_status(parray *backups, pgBackup *parent_backup) } } } - pg_free(parent_backup_id); } /* @@ -348,11 +347,11 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg /* chain is broken, determine missing backup ID * and orphinize all his descendants */ - char *missing_backup_id; + const char *missing_backup_id; time_t missing_backup_start_time; missing_backup_start_time = tmp_backup->parent_backup; - missing_backup_id = base36enc_dup(tmp_backup->parent_backup); + missing_backup_id = base36enc(tmp_backup->parent_backup); for (j = 0; j < parray_num(backups); j++) { @@ -363,22 +362,22 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg */ if (is_parent(missing_backup_start_time, backup, false)) { + const char *backup_id = base36enc(backup->start_time); if (backup->status == BACKUP_STATUS_OK || backup->status == BACKUP_STATUS_DONE) { write_backup_status(backup, BACKUP_STATUS_ORPHAN, true); elog(WARNING, "Backup %s is orphaned because his parent %s is missing", - base36enc(backup->start_time), missing_backup_id); + backup_id, missing_backup_id); } else { elog(WARNING, "Backup %s has missing parent %s", - base36enc(backup->start_time), missing_backup_id); + backup_id, missing_backup_id); } } } - pg_free(missing_backup_id); /* No point in doing futher */ elog(ERROR, "%s of backup %s failed.", action, base36enc(dest_backup->start_time)); } diff --git a/src/util.c b/src/util.c index d19877f06..420795d72 100644 --- a/src/util.c +++ b/src/util.c @@ -32,38 +32,23 @@ static const char *statusName[] = }; const char * -base36enc(long unsigned int value) +base36enc_to(long unsigned int value, char buf[ARG_SIZE_HINT base36bufsize]) { const char base36[36] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"; /* log(2**64) / log(36) = 12.38 => max 13 char + '\0' */ - static char buffer[14]; - unsigned int offset = sizeof(buffer); + char buffer[14]; + char *p; - buffer[--offset] = '\0'; + p = &buffer[sizeof(buffer)-1]; + *p = '\0'; do { - buffer[--offset] = base36[value % 36]; + *(--p) = base36[value % 36]; } while (value /= 36); - return &buffer[offset]; -} - -/* - * Same as base36enc(), but the result must be released by the user. - */ -char * -base36enc_dup(long unsigned int value) -{ - const char base36[36] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"; - /* log(2**64) / log(36) = 12.38 => max 13 char + '\0' */ - char buffer[14]; - unsigned int offset = sizeof(buffer); - - buffer[--offset] = '\0'; - do { - buffer[--offset] = base36[value % 36]; - } while (value /= 36); + /* I know, it doesn't look safe */ + strncpy(buf, p, base36bufsize); - return strdup(&buffer[offset]); + return buf; } long unsigned int diff --git a/src/utils/pgut.h b/src/utils/pgut.h index fa0efe816..116ee41c0 100644 --- a/src/utils/pgut.h +++ b/src/utils/pgut.h @@ -108,4 +108,10 @@ extern int sleep(unsigned int seconds); extern int usleep(unsigned int usec); #endif +#ifdef _MSC_VER +#define ARG_SIZE_HINT +#else +#define ARG_SIZE_HINT static +#endif + #endif /* PGUT_H */ diff --git a/src/validate.c b/src/validate.c index 4044ac158..139beabd6 100644 --- a/src/validate.c +++ b/src/validate.c @@ -503,10 +503,12 @@ do_validate_instance(InstanceState *instanceState) /* chain is broken */ if (result == ChainIsBroken) { - char *parent_backup_id; + const char *parent_backup_id; + const char *current_backup_id; /* determine missing backup ID */ - parent_backup_id = base36enc_dup(tmp_backup->parent_backup); + parent_backup_id = base36enc(tmp_backup->parent_backup); + current_backup_id = base36enc(current_backup->parent_backup); corrupted_backup_found = true; /* orphanize current_backup */ @@ -515,15 +517,13 @@ do_validate_instance(InstanceState *instanceState) { write_backup_status(current_backup, BACKUP_STATUS_ORPHAN, true); elog(WARNING, "Backup %s is orphaned because his parent %s is missing", - base36enc(current_backup->start_time), - parent_backup_id); + current_backup_id, parent_backup_id); } else { elog(WARNING, "Backup %s has missing parent %s", - base36enc(current_backup->start_time), parent_backup_id); + current_backup_id, parent_backup_id); } - pg_free(parent_backup_id); continue; } /* chain is whole, but at least one parent is invalid */ @@ -532,23 +532,23 @@ do_validate_instance(InstanceState *instanceState) /* Oldest corrupt backup has a chance for revalidation */ if (current_backup->start_time != tmp_backup->start_time) { - char *backup_id = base36enc_dup(tmp_backup->start_time); + const char *tmp_backup_id = base36enc(tmp_backup->start_time); + const char *cur_backup_id = base36enc(current_backup->start_time); /* orphanize current_backup */ if (current_backup->status == BACKUP_STATUS_OK || current_backup->status == BACKUP_STATUS_DONE) { write_backup_status(current_backup, BACKUP_STATUS_ORPHAN, true); elog(WARNING, "Backup %s is orphaned because his parent %s has status: %s", - base36enc(current_backup->start_time), backup_id, + cur_backup_id, tmp_backup_id, status2str(tmp_backup->status)); } else { elog(WARNING, "Backup %s has parent %s with status: %s", - base36enc(current_backup->start_time), backup_id, + cur_backup_id, tmp_backup_id, status2str(tmp_backup->status)); } - pg_free(backup_id); continue; } base_full_backup = find_parent_full_backup(current_backup); @@ -589,7 +589,7 @@ do_validate_instance(InstanceState *instanceState) */ if (current_backup->status != BACKUP_STATUS_OK) { - char *current_backup_id; + const char *current_backup_id; /* This is ridiculous but legal. * PAGE_b2 <- OK * PAGE_a2 <- OK @@ -599,7 +599,7 @@ do_validate_instance(InstanceState *instanceState) */ corrupted_backup_found = true; - current_backup_id = base36enc_dup(current_backup->start_time); + current_backup_id = base36enc(current_backup->start_time); for (j = i - 1; j >= 0; j--) { @@ -619,7 +619,6 @@ do_validate_instance(InstanceState *instanceState) } } } - free(current_backup_id); } /* For every OK backup we try to revalidate all his ORPHAN descendants. */ From 20e12edc807ce59ec4b01234136f91eba9282b8c Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Wed, 23 Nov 2022 16:51:48 +0300 Subject: [PATCH 389/525] fix one of backup_id output. --- src/backup.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/backup.c b/src/backup.c index 639dda63d..4daec866e 100644 --- a/src/backup.c +++ b/src/backup.c @@ -604,7 +604,7 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, "It may indicate that we are trying to backup PostgreSQL instance from the past.", (uint32) (current.stop_lsn >> 32), (uint32) (current.stop_lsn), (uint32) (prev_backup->stop_lsn >> 32), (uint32) (prev_backup->stop_lsn), - base36enc(prev_backup->stop_lsn)); + base36enc(prev_backup->start_time)); /* clean external directories list */ if (external_dirs) From 71f8ccf4cd3df57a2d5e8c11f0a30d3415578d91 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Wed, 23 Nov 2022 17:13:38 +0300 Subject: [PATCH 390/525] replace base36enc(backup->start_time) with helper backup_id_of(backup) Lazily store encoded backup_id in backup itself. --- src/backup.c | 8 ++--- src/catalog.c | 56 +++++++++++++++++------------------ src/data.c | 4 +-- src/delete.c | 42 +++++++++++++------------- src/dir.c | 4 +-- src/merge.c | 48 +++++++++++++++--------------- src/parsexlog.c | 14 ++++----- src/pg_probackup.h | 9 ++++-- src/restore.c | 74 ++++++++++++++++++++++------------------------ src/show.c | 6 ++-- src/util.c | 13 ++++++-- src/validate.c | 44 +++++++++++++-------------- 12 files changed, 163 insertions(+), 159 deletions(-) diff --git a/src/backup.c b/src/backup.c index 4daec866e..5097f46ec 100644 --- a/src/backup.c +++ b/src/backup.c @@ -190,9 +190,9 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, elog(ERROR, "pg_probackup binary version is %s, but backup %s version is %s. " "pg_probackup do not guarantee to be forward compatible. " "Please upgrade pg_probackup binary.", - PROGRAM_VERSION, base36enc(prev_backup->start_time), prev_backup->program_version); + PROGRAM_VERSION, backup_id_of(prev_backup), prev_backup->program_version); - elog(INFO, "Parent backup: %s", base36enc(prev_backup->start_time)); + elog(INFO, "Parent backup: %s", backup_id_of(prev_backup)); /* Files of previous backup needed by DELTA backup */ prev_backup_filelist = get_backup_filelist(prev_backup, true); @@ -233,7 +233,7 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, "It may indicate that we are trying to backup PostgreSQL instance from the past.", (uint32) (current.start_lsn >> 32), (uint32) (current.start_lsn), (uint32) (prev_backup->start_lsn >> 32), (uint32) (prev_backup->start_lsn), - base36enc(prev_backup->start_time)); + backup_id_of(prev_backup)); /* Update running backup meta with START LSN */ write_backup(¤t, true); @@ -604,7 +604,7 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, "It may indicate that we are trying to backup PostgreSQL instance from the past.", (uint32) (current.stop_lsn >> 32), (uint32) (current.stop_lsn), (uint32) (prev_backup->stop_lsn >> 32), (uint32) (prev_backup->stop_lsn), - base36enc(prev_backup->start_time)); + backup_id_of(prev_backup)); /* clean external directories list */ if (external_dirs) diff --git a/src/catalog.c b/src/catalog.c index 7ad62b5d1..d19e4a27d 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -153,7 +153,7 @@ write_backup_status(pgBackup *backup, BackupStatus status, /* lock backup in exclusive mode */ if (!lock_backup(tmp, strict, true)) - elog(ERROR, "Cannot lock backup %s directory", base36enc(backup->start_time)); + elog(ERROR, "Cannot lock backup %s directory", backup_id_of(backup)); write_backup(tmp, strict); @@ -193,7 +193,7 @@ lock_backup(pgBackup *backup, bool strict, bool exclusive) join_path_components(lock_file, backup->root_dir, BACKUP_LOCK_FILE); - rc = grab_excl_lock_file(backup->root_dir, base36enc(backup->start_time), strict); + rc = grab_excl_lock_file(backup->root_dir, backup_id_of(backup), strict); if (rc == LOCK_FAIL_TIMEOUT) return false; @@ -258,7 +258,7 @@ lock_backup(pgBackup *backup, bool strict, bool exclusive) * freed some space on filesystem, thanks to unlinking of BACKUP_RO_LOCK_FILE. * If somebody concurrently acquired exclusive lock file first, then we should give up. */ - if (grab_excl_lock_file(backup->root_dir, base36enc(backup->start_time), strict) == LOCK_FAIL_TIMEOUT) + if (grab_excl_lock_file(backup->root_dir, backup_id_of(backup), strict) == LOCK_FAIL_TIMEOUT) return false; return true; @@ -521,7 +521,7 @@ grab_excl_lock_file(const char *root_dir, const char *backup_id, bool strict) } // elog(LOG, "Acquired exclusive lock for backup %s after %ds", -// base36enc(backup->start_time), +// backup_id_of(backup), // LOCK_TIMEOUT - ntries + LOCK_STALE_TIMEOUT - empty_tries); return LOCK_OK; @@ -561,7 +561,7 @@ wait_shared_owners(pgBackup *backup) { if (interrupted) elog(ERROR, "Interrupted while locking backup %s", - base36enc(backup->start_time)); + backup_id_of(backup)); if (encoded_pid == my_pid) break; @@ -573,10 +573,10 @@ wait_shared_owners(pgBackup *backup) if ((ntries % LOG_FREQ) == 0) { elog(WARNING, "Process %d is using backup %s in shared mode, and is still running", - encoded_pid, base36enc(backup->start_time)); + encoded_pid, backup_id_of(backup)); elog(WARNING, "Waiting %u seconds on lock for backup %s", ntries, - base36enc(backup->start_time)); + backup_id_of(backup)); } sleep(1); @@ -604,7 +604,7 @@ wait_shared_owners(pgBackup *backup) if (ntries <= 0) { elog(WARNING, "Cannot to lock backup %s in exclusive mode, because process %u owns shared lock", - base36enc(backup->start_time), encoded_pid); + backup_id_of(backup), encoded_pid); return 1; } @@ -963,15 +963,15 @@ catalog_get_backup_list(InstanceState *instanceState, time_t requested_backup_id if (!backup) { - backup = pgut_new(pgBackup); + backup = pgut_new0(pgBackup); pgBackupInit(backup); backup->start_time = base36dec(data_ent->d_name); } - else if (strcmp(base36enc(backup->start_time), data_ent->d_name) != 0) + else if (strcmp(backup_id_of(backup), data_ent->d_name) != 0) { /* TODO there is no such guarantees */ elog(WARNING, "backup ID in control file \"%s\" doesn't match name of the backup folder \"%s\"", - base36enc(backup->start_time), backup_conf_path); + backup_id_of(backup), backup_conf_path); } backup->root_dir = pgut_strdup(data_path); @@ -1010,7 +1010,7 @@ catalog_get_backup_list(InstanceState *instanceState, time_t requested_backup_id { pgBackup *curr = parray_get(backups, i); pgBackup **ancestor; - pgBackup key; + pgBackup key = {0}; if (curr->backup_mode == BACKUP_MODE_FULL) continue; @@ -1180,7 +1180,7 @@ get_backup_filelist(pgBackup *backup, bool strict) /* redundant sanity? */ if (!files) - elog(strict ? ERROR : WARNING, "Failed to get file list for backup %s", base36enc(backup->start_time)); + elog(strict ? ERROR : WARNING, "Failed to get file list for backup %s", backup_id_of(backup)); return files; } @@ -1206,7 +1206,7 @@ catalog_lock_backup_list(parray *backup_list, int from_idx, int to_idx, bool str pgBackup *backup = (pgBackup *) parray_get(backup_list, i); if (!lock_backup(backup, strict, exclusive)) elog(ERROR, "Cannot lock backup %s directory", - base36enc(backup->start_time)); + backup_id_of(backup)); } } @@ -1239,7 +1239,7 @@ catalog_get_last_data_backup(parray *backup_list, TimeLineID tli, time_t current return NULL; elog(LOG, "Latest valid FULL backup: %s", - base36enc(full_backup->start_time)); + backup_id_of(full_backup)); /* FULL backup is found, lets find his latest child */ for (i = 0; i < parray_num(backup_list); i++) @@ -1255,13 +1255,13 @@ catalog_get_last_data_backup(parray *backup_list, TimeLineID tli, time_t current /* broken chain */ case ChainIsBroken: elog(WARNING, "Backup %s has missing parent: %s. Cannot be a parent", - base36enc(backup->start_time), base36enc(tmp_backup->parent_backup)); + backup_id_of(backup), base36enc(tmp_backup->parent_backup)); continue; /* chain is intact, but at least one parent is invalid */ case ChainIsInvalid: elog(WARNING, "Backup %s has invalid parent: %s. Cannot be a parent", - base36enc(backup->start_time), base36enc(tmp_backup->start_time)); + backup_id_of(backup), backup_id_of(tmp_backup)); continue; /* chain is ok */ @@ -1280,7 +1280,7 @@ catalog_get_last_data_backup(parray *backup_list, TimeLineID tli, time_t current else { elog(WARNING, "Backup %s has status: %s. Cannot be a parent.", - base36enc(backup->start_time), status2str(backup->status)); + backup_id_of(backup), status2str(backup->status)); } } @@ -1366,7 +1366,7 @@ get_multi_timeline_parent(parray *backup_list, parray *tli_list, return NULL; else elog(LOG, "Latest valid full backup: %s, tli: %i", - base36enc(ancestor_backup->start_time), ancestor_backup->tli); + backup_id_of(ancestor_backup), ancestor_backup->tli); /* At this point we found suitable full backup, * now we must find his latest child, suitable to be @@ -1871,7 +1871,7 @@ catalog_get_timelines(InstanceState *instanceState, InstanceConfig *instance) { elog(LOG, "Pinned backup %s is ignored for the " "purpose of WAL retention", - base36enc(backup->start_time)); + backup_id_of(backup)); continue; } @@ -2057,7 +2057,7 @@ catalog_get_timelines(InstanceState *instanceState, InstanceConfig *instance) elog(LOG, "Archive backup %s to stay consistent " "protect from purge WAL interval " "between %s and %s on timeline %i", - base36enc(backup->start_time), + backup_id_of(backup), begin_segno_str, end_segno_str, backup->tli); if (tlinfo->keep_segments == NULL) @@ -2266,7 +2266,7 @@ pin_backup(pgBackup *target_backup, pgSetBackupParams *set_backup_params) if (target_backup->expire_time == 0) { elog(WARNING, "Backup %s is not pinned, nothing to unpin", - base36enc(target_backup->start_time)); + backup_id_of(target_backup)); return; } target_backup->expire_time = 0; @@ -2286,11 +2286,11 @@ pin_backup(pgBackup *target_backup, pgSetBackupParams *set_backup_params) char expire_timestamp[100]; time2iso(expire_timestamp, lengthof(expire_timestamp), target_backup->expire_time, false); - elog(INFO, "Backup %s is pinned until '%s'", base36enc(target_backup->start_time), + elog(INFO, "Backup %s is pinned until '%s'", backup_id_of(target_backup), expire_timestamp); } else - elog(INFO, "Backup %s is unpinned", base36enc(target_backup->start_time)); + elog(INFO, "Backup %s is unpinned", backup_id_of(target_backup)); return; } @@ -2310,7 +2310,7 @@ add_note(pgBackup *target_backup, char *note) { target_backup->note = NULL; elog(INFO, "Removing note from backup %s", - base36enc(target_backup->start_time)); + backup_id_of(target_backup)); } else { @@ -2325,7 +2325,7 @@ add_note(pgBackup *target_backup, char *note) target_backup->note = note_string; elog(INFO, "Adding note to backup %s: '%s'", - base36enc(target_backup->start_time), target_backup->note); + backup_id_of(target_backup), target_backup->note); } /* Update backup.control */ @@ -2644,7 +2644,7 @@ write_backup_filelist(pgBackup *backup, parray *files, const char *root, static pgBackup * readBackupControlFile(const char *path) { - pgBackup *backup = pgut_new(pgBackup); + pgBackup *backup = pgut_new0(pgBackup); char *backup_mode = NULL; char *start_lsn = NULL; char *stop_lsn = NULL; @@ -3047,7 +3047,7 @@ find_parent_full_backup(pgBackup *current_backup) base36enc(base_full_backup->parent_backup)); else elog(WARNING, "Failed to find parent FULL backup for %s", - base36enc(current_backup->start_time)); + backup_id_of(current_backup)); return NULL; } diff --git a/src/data.c b/src/data.c index 08727d41c..490faf9b6 100644 --- a/src/data.c +++ b/src/data.c @@ -1294,7 +1294,7 @@ restore_non_data_file(parray *parent_chain, pgBackup *dest_backup, if (!tmp_file) { elog(ERROR, "Failed to locate non-data file \"%s\" in backup %s", - dest_file->rel_path, base36enc(tmp_backup->start_time)); + dest_file->rel_path, backup_id_of(tmp_backup)); continue; } @@ -1327,7 +1327,7 @@ restore_non_data_file(parray *parent_chain, pgBackup *dest_backup, if (tmp_file->write_size <= 0) elog(ERROR, "Full copy of non-data file has invalid size: %li. " "Metadata corruption in backup %s in file: \"%s\"", - tmp_file->write_size, base36enc(tmp_backup->start_time), + tmp_file->write_size, backup_id_of(tmp_backup), to_fullpath); /* incremental restore */ diff --git a/src/delete.c b/src/delete.c index a1ab81331..3f299d78b 100644 --- a/src/delete.c +++ b/src/delete.c @@ -71,7 +71,7 @@ do_delete(InstanceState *instanceState, time_t backup_id) parray_append(delete_list, backup); elog(LOG, "Backup %s %s be deleted", - base36enc(backup->start_time), dry_run? "can":"will"); + backup_id_of(backup), dry_run? "can":"will"); size_to_delete += backup->data_bytes; if (backup->stream) @@ -84,7 +84,7 @@ do_delete(InstanceState *instanceState, time_t backup_id) { pretty_size(size_to_delete, size_to_delete_pretty, lengthof(size_to_delete_pretty)); elog(INFO, "Resident data size to free by delete of backup %s : %s", - base36enc(target_backup->start_time), size_to_delete_pretty); + backup_id_of(target_backup), size_to_delete_pretty); } if (!dry_run) @@ -321,12 +321,12 @@ do_retention_internal(parray *backup_list, parray *to_keep_list, parray *to_purg time2iso(expire_timestamp, lengthof(expire_timestamp), backup->expire_time, false); elog(LOG, "Backup %s is pinned until '%s', retain", - base36enc(backup->start_time), expire_timestamp); + backup_id_of(backup), expire_timestamp); continue; } /* Add backup to purge_list */ - elog(VERBOSE, "Mark backup %s for purge.", base36enc(backup->start_time)); + elog(VERBOSE, "Mark backup %s for purge.", backup_id_of(backup)); parray_append(to_purge_list, backup); continue; } @@ -406,7 +406,7 @@ do_retention_internal(parray *backup_list, parray *to_keep_list, parray *to_purg /* TODO: add ancestor(chain full backup) ID */ elog(INFO, "Backup %s, mode: %s, status: %s. Redundancy: %i/%i, Time Window: %ud/%ud. %s", - base36enc(backup->start_time), + backup_id_of(backup), pgBackupGetBackupMode(backup, false), status2str(backup->status), cur_full_backup_num, @@ -460,7 +460,7 @@ do_retention_merge(InstanceState *instanceState, parray *backup_list, if (!keep_backup) continue; - elog(INFO, "Consider backup %s for merge", base36enc(keep_backup->start_time)); + elog(INFO, "Consider backup %s for merge", backup_id_of(keep_backup)); /* Got valid incremental backup, find its FULL ancestor */ full_backup = find_parent_full_backup(keep_backup); @@ -468,7 +468,7 @@ do_retention_merge(InstanceState *instanceState, parray *backup_list, /* Failed to find parent */ if (!full_backup) { - elog(WARNING, "Failed to find FULL parent for %s", base36enc(keep_backup->start_time)); + elog(WARNING, "Failed to find FULL parent for %s", backup_id_of(keep_backup)); continue; } @@ -478,7 +478,7 @@ do_retention_merge(InstanceState *instanceState, parray *backup_list, pgBackupCompareIdDesc)) { elog(WARNING, "Skip backup %s for merging, " - "because his FULL parent is not marked for purge", base36enc(keep_backup->start_time)); + "because his FULL parent is not marked for purge", backup_id_of(keep_backup)); continue; } @@ -488,8 +488,8 @@ do_retention_merge(InstanceState *instanceState, parray *backup_list, */ elog(INFO, "Merge incremental chain between full backup %s and backup %s", - base36enc(full_backup->start_time), - base36enc(keep_backup->start_time)); + backup_id_of(full_backup), + backup_id_of(keep_backup)); merge_list = parray_new(); @@ -531,7 +531,7 @@ do_retention_merge(InstanceState *instanceState, parray *backup_list, // if (is_prolific(backup_list, full_backup)) // { // elog(WARNING, "Backup %s has multiple valid descendants. " -// "Automatic merge is not possible.", base36enc(full_backup->start_time)); +// "Automatic merge is not possible.", backup_id_of(full_backup)); // } /* Merge list example: @@ -557,7 +557,7 @@ do_retention_merge(InstanceState *instanceState, parray *backup_list, if (!no_validate) pgBackupValidate(full_backup, NULL); if (full_backup->status == BACKUP_STATUS_CORRUPT) - elog(ERROR, "Merging of backup %s failed", base36enc(full_backup->start_time)); + elog(ERROR, "Merging of backup %s failed", backup_id_of(full_backup)); /* Cleanup */ parray_free(merge_list); @@ -589,7 +589,7 @@ do_retention_purge(parray *to_keep_list, parray *to_purge_list) pgBackup *delete_backup = (pgBackup *) parray_get(to_purge_list, j); elog(LOG, "Consider backup %s for purge", - base36enc(delete_backup->start_time)); + backup_id_of(delete_backup)); /* Evaluate marked for delete backup against every backup in keep list. * If marked for delete backup is recognized as parent of one of those, @@ -608,8 +608,8 @@ do_retention_purge(parray *to_keep_list, parray *to_purge_list) continue; elog(LOG, "Check if backup %s is parent of backup %s", - base36enc(delete_backup->start_time), - base36enc(keep_backup->start_time)); + backup_id_of(delete_backup), + backup_id_of(keep_backup)); if (is_parent(delete_backup->start_time, keep_backup, true)) { @@ -617,8 +617,8 @@ do_retention_purge(parray *to_keep_list, parray *to_purge_list) /* We must not delete this backup, evict it from purge list */ elog(LOG, "Retain backup %s because his " "descendant %s is guarded by retention", - base36enc(delete_backup->start_time), - base36enc(keep_backup->start_time)); + backup_id_of(delete_backup), + backup_id_of(keep_backup)); purge = false; break; @@ -634,7 +634,7 @@ do_retention_purge(parray *to_keep_list, parray *to_purge_list) { /* If the backup still is used, do not interrupt and go to the next */ elog(WARNING, "Cannot lock backup %s directory, skip purging", - base36enc(delete_backup->start_time)); + backup_id_of(delete_backup)); continue; } @@ -737,7 +737,7 @@ delete_backup_files(pgBackup *backup) if (backup->status == BACKUP_STATUS_DELETED) { elog(WARNING, "Backup %s already deleted", - base36enc(backup->start_time)); + backup_id_of(backup)); return; } @@ -747,7 +747,7 @@ delete_backup_files(pgBackup *backup) time2iso(timestamp, lengthof(timestamp), backup->start_time, false); elog(INFO, "Delete: %s %s", - base36enc(backup->start_time), timestamp); + backup_id_of(backup), timestamp); /* * Update STATUS to BACKUP_STATUS_DELETING in preparation for the case which @@ -1076,7 +1076,7 @@ do_delete_status(InstanceState *instanceState, InstanceConfig *instance_config, backup = (pgBackup *)parray_get(delete_list, i); elog(INFO, "Backup %s with status %s %s be deleted", - base36enc(backup->start_time), status2str(backup->status), dry_run ? "can" : "will"); + backup_id_of(backup), status2str(backup->status), dry_run ? "can" : "will"); size_to_delete += backup->data_bytes; if (backup->stream) diff --git a/src/dir.c b/src/dir.c index 6609b9f19..182f0a51d 100644 --- a/src/dir.c +++ b/src/dir.c @@ -1101,7 +1101,7 @@ check_tablespace_mapping(pgBackup *backup, bool incremental, bool force, bool pg bool tblspaces_are_empty = true; elog(LOG, "Checking tablespace directories of backup %s", - base36enc(backup->start_time)); + backup_id_of(backup)); /* validate tablespace map, * if there are no tablespaces, then there is nothing left to do @@ -1250,7 +1250,7 @@ check_external_dir_mapping(pgBackup *backup, bool incremental) int i; elog(LOG, "check external directories of backup %s", - base36enc(backup->start_time)); + backup_id_of(backup)); if (!backup->external_dir_str) { diff --git a/src/merge.c b/src/merge.c index ac1e97e71..a0d3fee1f 100644 --- a/src/merge.c +++ b/src/merge.c @@ -105,7 +105,7 @@ do_merge(InstanceState *instanceState, time_t backup_id, bool no_validate, bool backup->status != BACKUP_STATUS_MERGED && backup->status != BACKUP_STATUS_DELETING) elog(ERROR, "Backup %s has status: %s", - base36enc(backup->start_time), status2str(backup->status)); + backup_id_of(backup), status2str(backup->status)); dest_backup = backup; break; @@ -154,12 +154,12 @@ do_merge(InstanceState *instanceState, time_t backup_id, bool no_validate, bool full_backup = dest_backup; dest_backup = NULL; elog(INFO, "Merge target backup %s is full backup", - base36enc(full_backup->start_time)); + backup_id_of(full_backup)); /* sanity */ if (full_backup->status == BACKUP_STATUS_DELETING) elog(ERROR, "Backup %s has status: %s", - base36enc(full_backup->start_time), + backup_id_of(full_backup), status2str(full_backup->status)); /* Case #1 */ @@ -194,7 +194,7 @@ do_merge(InstanceState *instanceState, time_t backup_id, bool no_validate, bool if (dest_backup == NULL) elog(ERROR, "Failed to find merge candidate, " "backup %s has no valid children", - base36enc(full_backup->start_time)); + backup_id_of(full_backup)); } /* Case #2 */ @@ -224,7 +224,7 @@ do_merge(InstanceState *instanceState, time_t backup_id, bool no_validate, bool if (!dest_backup) { elog(ERROR, "Full backup %s has unfinished merge with missing backup %s", - base36enc(full_backup->start_time), + backup_id_of(full_backup), base36enc(full_backup->merge_dest_backup)); } } @@ -252,13 +252,13 @@ do_merge(InstanceState *instanceState, time_t backup_id, bool no_validate, bool if (!dest_backup) { elog(WARNING, "Full backup %s has unfinished merge with missing backup %s", - base36enc(full_backup->start_time), + backup_id_of(full_backup), base36enc(full_backup->merge_dest_backup)); } } else elog(ERROR, "Backup %s has status: %s", - base36enc(full_backup->start_time), + backup_id_of(full_backup), status2str(full_backup->status)); } else @@ -296,7 +296,7 @@ do_merge(InstanceState *instanceState, time_t backup_id, bool no_validate, bool if (dest_backup->status == BACKUP_STATUS_MERGING || dest_backup->status == BACKUP_STATUS_DELETING) elog(WARNING, "Rerun unfinished merge for backup %s", - base36enc(dest_backup->start_time)); + backup_id_of(dest_backup)); /* First we should try to find parent FULL backup */ full_backup = find_parent_full_backup(dest_backup); @@ -310,7 +310,7 @@ do_merge(InstanceState *instanceState, time_t backup_id, bool no_validate, bool */ if (dest_backup->status != BACKUP_STATUS_MERGING) elog(ERROR, "Failed to find parent full backup for %s", - base36enc(dest_backup->start_time)); + backup_id_of(dest_backup)); /* Find FULL backup that has unfinished merge with dest backup */ for (i = 0; i < parray_num(backups); i++) @@ -327,7 +327,7 @@ do_merge(InstanceState *instanceState, time_t backup_id, bool no_validate, bool if (!full_backup) elog(ERROR, "Failed to find full backup that has unfinished merge" "with backup %s, cannot rerun merge", - base36enc(dest_backup->start_time)); + backup_id_of(dest_backup)); if (full_backup->status == BACKUP_STATUS_MERGED) elog(WARNING, "Incremental chain is broken, try to recover unfinished merge"); @@ -341,7 +341,7 @@ do_merge(InstanceState *instanceState, time_t backup_id, bool no_validate, bool dest_backup->start_time != full_backup->merge_dest_backup) { elog(ERROR, "Full backup %s has unfinished merge with backup %s", - base36enc(full_backup->start_time), + backup_id_of(full_backup), base36enc(full_backup->merge_dest_backup)); } @@ -357,7 +357,7 @@ do_merge(InstanceState *instanceState, time_t backup_id, bool no_validate, bool * having status MERGED */ if (dest_backup == NULL && full_backup->status != BACKUP_STATUS_MERGED) elog(ERROR, "Cannot run merge for full backup %s", - base36enc(full_backup->start_time)); + backup_id_of(full_backup)); /* sanity */ if (full_backup->status != BACKUP_STATUS_OK && @@ -366,7 +366,7 @@ do_merge(InstanceState *instanceState, time_t backup_id, bool no_validate, bool full_backup->status != BACKUP_STATUS_MERGED && full_backup->status != BACKUP_STATUS_MERGING) elog(ERROR, "Backup %s has status: %s", - base36enc(full_backup->start_time), status2str(full_backup->status)); + backup_id_of(full_backup), status2str(full_backup->status)); /* Form merge list */ dest_backup_tmp = dest_backup; @@ -384,7 +384,7 @@ do_merge(InstanceState *instanceState, time_t backup_id, bool no_validate, bool dest_backup_tmp->status != BACKUP_STATUS_MERGED && dest_backup_tmp->status != BACKUP_STATUS_DELETING) elog(ERROR, "Backup %s has status: %s", - base36enc(dest_backup_tmp->start_time), + backup_id_of(dest_backup_tmp), status2str(dest_backup_tmp->status)); if (dest_backup_tmp->backup_mode == BACKUP_MODE_FULL) @@ -472,10 +472,10 @@ merge_chain(InstanceState *instanceState, full_backup->status == BACKUP_STATUS_MERGED) { is_retry = true; - elog(INFO, "Retry failed merge of backup %s with parent chain", base36enc(dest_backup->start_time)); + elog(INFO, "Retry failed merge of backup %s with parent chain", backup_id_of(dest_backup)); } else - elog(INFO, "Merging backup %s with parent chain", base36enc(dest_backup->start_time)); + elog(INFO, "Merging backup %s with parent chain", backup_id_of(dest_backup)); /* sanity */ if (full_backup->merge_dest_backup != INVALID_BACKUP_ID && @@ -483,8 +483,8 @@ merge_chain(InstanceState *instanceState, { elog(ERROR, "Cannot run merge for %s, because full backup %s has " "unfinished merge with backup %s", - base36enc(dest_backup->start_time), - base36enc(full_backup->start_time), + backup_id_of(dest_backup), + backup_id_of(full_backup), base36enc(full_backup->merge_dest_backup)); } @@ -506,7 +506,7 @@ merge_chain(InstanceState *instanceState, elog(ERROR, "Backup %s has been produced by pg_probackup version %s, " "but current program version is %s. Forward compatibility " "is not supported.", - base36enc(backup->start_time), + backup_id_of(backup), backup->program_version, PROGRAM_VERSION); } @@ -549,7 +549,7 @@ merge_chain(InstanceState *instanceState, if (!no_validate) { elog(INFO, "Validate parent chain for backup %s", - base36enc(dest_backup->start_time)); + backup_id_of(dest_backup)); for (i = parray_num(parent_chain) - 1; i >= 0; i--) { @@ -566,7 +566,7 @@ merge_chain(InstanceState *instanceState, if (backup->status != BACKUP_STATUS_OK) elog(ERROR, "Backup %s has status %s, merge is aborted", - base36enc(backup->start_time), status2str(backup->status)); + backup_id_of(backup), status2str(backup->status)); } } @@ -869,7 +869,7 @@ merge_chain(InstanceState *instanceState, * Merging finished, now we can safely update ID of the FULL backup */ elog(INFO, "Rename merged full backup %s to %s", - base36enc(full_backup->start_time), + backup_id_of(full_backup), base36enc(full_backup->merge_dest_backup)); full_backup->status = BACKUP_STATUS_OK; @@ -1333,7 +1333,7 @@ merge_non_data_file(parray *parent_chain, pgBackup *full_backup, if (!from_file) { elog(ERROR, "Failed to locate non-data file \"%s\" in backup %s", - dest_file->rel_path, base36enc(from_backup->start_time)); + dest_file->rel_path, backup_id_of(from_backup)); continue; } @@ -1429,7 +1429,7 @@ is_forward_compatible(parray *parent_chain) elog(WARNING, "In-place merge is disabled because of storage format incompatibility. " "Backup %s storage format version: %s, " "current storage format version: %s", - base36enc(oldest_ver_backup->start_time), + backup_id_of(oldest_ver_backup), oldest_ver_backup->program_version, STORAGE_FORMAT_VERSION); return false; diff --git a/src/parsexlog.c b/src/parsexlog.c index f12aae904..3a791dc5d 100644 --- a/src/parsexlog.c +++ b/src/parsexlog.c @@ -392,7 +392,7 @@ validate_backup_wal_from_start_to_stop(pgBackup *backup, elog(WARNING, "There are not enough WAL records to consistenly restore " "backup %s from START LSN: %X/%X to STOP LSN: %X/%X", - base36enc(backup->start_time), + backup_id_of(backup), (uint32) (backup->start_lsn >> 32), (uint32) (backup->start_lsn), (uint32) (backup->stop_lsn >> 32), @@ -410,24 +410,20 @@ validate_wal(pgBackup *backup, const char *archivedir, time_t target_time, TransactionId target_xid, XLogRecPtr target_lsn, TimeLineID tli, uint32 wal_seg_size) { - const char *backup_id; XLogRecTarget last_rec; char last_timestamp[100], target_timestamp[100]; bool all_wal = false; - /* We need free() this later */ - backup_id = base36enc(backup->start_time); - if (!XRecOffIsValid(backup->start_lsn)) elog(ERROR, "Invalid start_lsn value %X/%X of backup %s", (uint32) (backup->start_lsn >> 32), (uint32) (backup->start_lsn), - backup_id); + backup_id_of(backup)); if (!XRecOffIsValid(backup->stop_lsn)) elog(ERROR, "Invalid stop_lsn value %X/%X of backup %s", (uint32) (backup->stop_lsn >> 32), (uint32) (backup->stop_lsn), - backup_id); + backup_id_of(backup)); /* * Check that the backup has all wal files needed @@ -450,7 +446,7 @@ validate_wal(pgBackup *backup, const char *archivedir, if (backup->status == BACKUP_STATUS_CORRUPT) { - elog(WARNING, "Backup %s WAL segments are corrupted", backup_id); + elog(WARNING, "Backup %s WAL segments are corrupted", backup_id_of(backup)); return; } /* @@ -461,7 +457,7 @@ validate_wal(pgBackup *backup, const char *archivedir, !XRecOffIsValid(target_lsn)) { /* Recovery target is not given so exit */ - elog(INFO, "Backup %s WAL segments are valid", backup_id); + elog(INFO, "Backup %s WAL segments are valid", backup_id_of(backup)); return; } diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 8a12cc488..5ef6cbe31 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -135,6 +135,9 @@ extern const char *PROGRAM_EMAIL; #define XRecOffIsNull(xlrp) \ ((xlrp) % XLOG_BLCKSZ == 0) +/* log(2**64) / log(36) = 12.38 => max 13 char + '\0' */ +#define base36bufsize 14 + /* Text Coloring macro */ #define TC_LEN 11 #define TC_RED "\033[0;31m" @@ -151,7 +154,6 @@ extern const char *PROGRAM_EMAIL; #define TC_CYAN_BOLD "\033[1;36m" #define TC_RESET "\033[0m" - typedef struct RedoParams { TimeLineID tli; @@ -532,6 +534,8 @@ struct pgBackup /* map used for access to page headers */ HeaderMap hdr_map; + + char backup_id_encoded[base36bufsize]; }; /* Recovery target for restore and validate subcommands */ @@ -882,6 +886,8 @@ extern pgRecoveryTarget *parseRecoveryTargetOptions( extern parray *get_dbOid_exclude_list(pgBackup *backup, parray *datname_list, PartialRestoreType partial_restore_type); +extern const char* backup_id_of(pgBackup *backup); + extern parray *get_backup_filelist(pgBackup *backup, bool strict); extern parray *read_timeline_history(const char *arclog_path, TimeLineID targetTLI, bool strict); extern bool tliIsPartOfHistory(const parray *timelines, TimeLineID tli); @@ -1192,7 +1198,6 @@ extern void time2iso(char *buf, size_t len, time_t time, bool utc); extern const char *status2str(BackupStatus status); const char *status2str_color(BackupStatus status); extern BackupStatus str2status(const char *status); -#define base36bufsize 14 extern const char *base36enc_to(long unsigned int value, char buf[ARG_SIZE_HINT base36bufsize]); /* Abuse C99 Compound Literal's lifetime */ #define base36enc(value) (base36enc_to((value), (char[base36bufsize]){0})) diff --git a/src/restore.c b/src/restore.c index 6bd2ad3b4..6c0e1881f 100644 --- a/src/restore.c +++ b/src/restore.c @@ -76,12 +76,8 @@ static void set_orphan_status(parray *backups, pgBackup *parent_backup) { /* chain is intact, but at least one parent is invalid */ - const char *parent_backup_id; int j; - /* parent_backup_id is a human-readable backup ID */ - parent_backup_id = base36enc(parent_backup->start_time); - for (j = 0; j < parray_num(backups); j++) { @@ -96,14 +92,15 @@ set_orphan_status(parray *backups, pgBackup *parent_backup) elog(WARNING, "Backup %s is orphaned because his parent %s has status: %s", - base36enc(backup->start_time), - parent_backup_id, + backup_id_of(backup), + backup_id_of(parent_backup), status2str(parent_backup->status)); } else { elog(WARNING, "Backup %s has parent %s with status: %s", - base36enc(backup->start_time), parent_backup_id, + backup_id_of(backup), + backup_id_of(parent_backup), status2str(parent_backup->status)); } } @@ -242,7 +239,7 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg current_backup->status != BACKUP_STATUS_DONE)) { elog(WARNING, "Skipping backup %s, because it has non-valid status: %s", - base36enc(current_backup->start_time), status2str(current_backup->status)); + backup_id_of(current_backup), status2str(current_backup->status)); continue; } @@ -272,10 +269,10 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg current_backup->status == BACKUP_STATUS_RUNNING) && (!params->no_validate || params->force)) elog(WARNING, "Backup %s has status: %s", - base36enc(current_backup->start_time), status2str(current_backup->status)); + backup_id_of(current_backup), status2str(current_backup->status)); else elog(ERROR, "Backup %s has status: %s", - base36enc(current_backup->start_time), status2str(current_backup->status)); + backup_id_of(current_backup), status2str(current_backup->status)); } if (rt->target_tli) @@ -362,24 +359,23 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg */ if (is_parent(missing_backup_start_time, backup, false)) { - const char *backup_id = base36enc(backup->start_time); if (backup->status == BACKUP_STATUS_OK || backup->status == BACKUP_STATUS_DONE) { write_backup_status(backup, BACKUP_STATUS_ORPHAN, true); elog(WARNING, "Backup %s is orphaned because his parent %s is missing", - backup_id, missing_backup_id); + backup_id_of(backup), missing_backup_id); } else { elog(WARNING, "Backup %s has missing parent %s", - backup_id, missing_backup_id); + backup_id_of(backup), missing_backup_id); } } } /* No point in doing futher */ - elog(ERROR, "%s of backup %s failed.", action, base36enc(dest_backup->start_time)); + elog(ERROR, "%s of backup %s failed.", action, backup_id_of(dest_backup)); } else if (result == ChainIsInvalid) { @@ -390,7 +386,7 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg /* sanity */ if (!tmp_backup) elog(ERROR, "Parent full backup for the given backup %s was not found", - base36enc(dest_backup->start_time)); + backup_id_of(dest_backup)); } /* We have found full backup */ @@ -510,7 +506,7 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg if (redo.tli == tmp_backup->tli) { elog(INFO, "Backup %s is chosen as shiftpoint, its Stop LSN will be used as shift LSN", - base36enc(tmp_backup->start_time)); + backup_id_of(tmp_backup)); shift_lsn = tmp_backup->stop_lsn; break; @@ -534,7 +530,7 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg else elog(INFO, "Backup %s cannot be a shiftpoint, " "because its tli %i is not in history of redo timeline %i", - base36enc(tmp_backup->start_time), tmp_backup->tli, redo.tli); + backup_id_of(tmp_backup), tmp_backup->tli, redo.tli); } tmp_backup = tmp_backup->parent_backup_link; @@ -543,13 +539,13 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg if (XLogRecPtrIsInvalid(shift_lsn)) elog(ERROR, "Cannot perform incremental restore of backup chain %s in 'lsn' mode, " "because destination directory redo point %X/%X on tli %i is out of reach", - base36enc(dest_backup->start_time), + backup_id_of(dest_backup), (uint32) (redo.lsn >> 32), (uint32) redo.lsn, redo.tli); else elog(INFO, "Destination directory redo point %X/%X on tli %i is " "within reach of backup %s with Stop LSN %X/%X on tli %i", (uint32) (redo.lsn >> 32), (uint32) redo.lsn, redo.tli, - base36enc(tmp_backup->start_time), + backup_id_of(tmp_backup), (uint32) (tmp_backup->stop_lsn >> 32), (uint32) tmp_backup->stop_lsn, tmp_backup->tli); @@ -563,7 +559,7 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg if (!params->is_restore || !params->no_validate) { if (dest_backup->backup_mode != BACKUP_MODE_FULL) - elog(INFO, "Validating parents for backup %s", base36enc(dest_backup->start_time)); + elog(INFO, "Validating parents for backup %s", backup_id_of(dest_backup)); /* * Validate backups from base_full_backup to dest_backup. @@ -576,7 +572,7 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg if (!lock_backup(tmp_backup, true, false)) { elog(ERROR, "Cannot lock backup %s directory", - base36enc(tmp_backup->start_time)); + backup_id_of(tmp_backup)); } /* validate datafiles only */ @@ -623,27 +619,27 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg dest_backup->status == BACKUP_STATUS_DONE) { if (params->no_validate) - elog(WARNING, "Backup %s is used without validation.", base36enc(dest_backup->start_time)); + elog(WARNING, "Backup %s is used without validation.", backup_id_of(dest_backup)); else - elog(INFO, "Backup %s is valid.", base36enc(dest_backup->start_time)); + elog(INFO, "Backup %s is valid.", backup_id_of(dest_backup)); } else if (dest_backup->status == BACKUP_STATUS_CORRUPT) { if (params->force) - elog(WARNING, "Backup %s is corrupt.", base36enc(dest_backup->start_time)); + elog(WARNING, "Backup %s is corrupt.", backup_id_of(dest_backup)); else - elog(ERROR, "Backup %s is corrupt.", base36enc(dest_backup->start_time)); + elog(ERROR, "Backup %s is corrupt.", backup_id_of(dest_backup)); } else if (dest_backup->status == BACKUP_STATUS_ORPHAN) { if (params->force) - elog(WARNING, "Backup %s is orphan.", base36enc(dest_backup->start_time)); + elog(WARNING, "Backup %s is orphan.", backup_id_of(dest_backup)); else - elog(ERROR, "Backup %s is orphan.", base36enc(dest_backup->start_time)); + elog(ERROR, "Backup %s is orphan.", backup_id_of(dest_backup)); } else elog(ERROR, "Backup %s has status: %s", - base36enc(dest_backup->start_time), status2str(dest_backup->status)); + backup_id_of(dest_backup), status2str(dest_backup->status)); /* We ensured that all backups are valid, now restore if required */ @@ -667,7 +663,7 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg if (rt->lsn_string && parse_server_version(dest_backup->server_version) < 100000) elog(ERROR, "Backup %s was created for version %s which doesn't support recovery_target_lsn", - base36enc(dest_backup->start_time), + backup_id_of(dest_backup), dest_backup->server_version); restore_chain(dest_backup, parent_chain, dbOid_exclude_list, params, @@ -682,7 +678,7 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg fio_disconnect(); elog(INFO, "%s of backup %s completed.", - action, base36enc(dest_backup->start_time)); + action, backup_id_of(dest_backup)); /* cleanup */ parray_walk(backups, pgBackupFree); @@ -733,17 +729,17 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain, pgBackup *backup = (pgBackup *) parray_get(parent_chain, i); if (!lock_backup(backup, true, false)) - elog(ERROR, "Cannot lock backup %s", base36enc(backup->start_time)); + elog(ERROR, "Cannot lock backup %s", backup_id_of(backup)); if (backup->status != BACKUP_STATUS_OK && backup->status != BACKUP_STATUS_DONE) { if (params->force) elog(WARNING, "Backup %s is not valid, restore is forced", - base36enc(backup->start_time)); + backup_id_of(backup)); else elog(ERROR, "Backup %s cannot be restored because it is not valid", - base36enc(backup->start_time)); + backup_id_of(backup)); } /* confirm block size compatibility */ @@ -1620,7 +1616,7 @@ update_recovery_options(InstanceState *instanceState, pgBackup *backup, strerror(errno)); fio_fprintf(fp, "\n# recovery settings added by pg_probackup restore of backup %s at '%s'\n", - base36enc(backup->start_time), current_time_str); + backup_id_of(backup), current_time_str); if (params->recovery_settings_mode == PITR_REQUESTED) print_recovery_settings(instanceState, fp, backup, params, rt); @@ -2030,7 +2026,7 @@ get_dbOid_exclude_list(pgBackup *backup, parray *datname_list, if (!database_map_file) elog(ERROR, "Backup %s doesn't contain a database_map, partial restore is impossible.", - base36enc(backup->start_time)); + backup_id_of(backup)); join_path_components(path, backup->root_dir, DATABASE_DIR); join_path_components(database_map_path, path, DATABASE_MAP); @@ -2048,7 +2044,7 @@ get_dbOid_exclude_list(pgBackup *backup, parray *datname_list, /* partial restore requested but database_map is missing */ if (!database_map) elog(ERROR, "Backup %s has empty or mangled database_map, partial restore is impossible.", - base36enc(backup->start_time)); + backup_id_of(backup)); /* * So we have a list of datnames and a database_map for it. @@ -2078,7 +2074,7 @@ get_dbOid_exclude_list(pgBackup *backup, parray *datname_list, /* If specified datname is not found in database_map, error out */ if (!found_match) elog(ERROR, "Failed to find a database '%s' in database_map of backup %s", - datname, base36enc(backup->start_time)); + datname, backup_id_of(backup)); } /* At this moment only databases to exclude are left in the map */ @@ -2116,14 +2112,14 @@ get_dbOid_exclude_list(pgBackup *backup, parray *datname_list, /* If specified datname is not found in database_map, error out */ if (!found_match) elog(ERROR, "Failed to find a database '%s' in database_map of backup %s", - datname, base36enc(backup->start_time)); + datname, backup_id_of(backup)); } } /* extra sanity: ensure that list is not empty */ if (!dbOid_exclude_list || parray_num(dbOid_exclude_list) < 1) elog(ERROR, "Failed to find a match in database_map of backup %s for partial restore", - base36enc(backup->start_time)); + backup_id_of(backup)); /* clean backup filelist */ if (files) diff --git a/src/show.c b/src/show.c index db8a9e225..2e06582ed 100644 --- a/src/show.c +++ b/src/show.c @@ -353,7 +353,7 @@ print_backup_json_object(PQExpBuffer buf, pgBackup *backup) json_add(buf, JT_BEGIN_OBJECT, &json_level); - json_add_value(buf, "id", base36enc(backup->start_time), json_level, + json_add_value(buf, "id", backup_id_of(backup), json_level, true); if (backup->parent_backup != 0) @@ -583,7 +583,7 @@ show_instance_plain(const char *instance_name, parray *backup_list, bool show_na /* ID */ snprintf(row->backup_id, lengthof(row->backup_id), "%s", - base36enc(backup->start_time)); + backup_id_of(backup)); widths[cur] = Max(widths[cur], strlen(row->backup_id)); cur++; @@ -1100,7 +1100,7 @@ show_archive_json(const char *instance_name, uint32 xlog_seg_size, if (tlinfo->closest_backup != NULL) snprintf(tmp_buf, lengthof(tmp_buf), "%s", - base36enc(tlinfo->closest_backup->start_time)); + backup_id_of(tlinfo->closest_backup)); else snprintf(tmp_buf, lengthof(tmp_buf), "%s", ""); diff --git a/src/util.c b/src/util.c index 420795d72..d3cfcb37e 100644 --- a/src/util.c +++ b/src/util.c @@ -35,8 +35,7 @@ const char * base36enc_to(long unsigned int value, char buf[ARG_SIZE_HINT base36bufsize]) { const char base36[36] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"; - /* log(2**64) / log(36) = 12.38 => max 13 char + '\0' */ - char buffer[14]; + char buffer[base36bufsize]; char *p; p = &buffer[sizeof(buffer)-1]; @@ -562,3 +561,13 @@ datapagemap_print_debug(datapagemap_t *map) pg_free(iter); } + +const char* +backup_id_of(pgBackup *backup) +{ + if (backup->backup_id_encoded[0] == '\x00') + { + base36enc_to(backup->start_time, backup->backup_id_encoded); + } + return backup->backup_id_encoded; +} diff --git a/src/validate.c b/src/validate.c index 139beabd6..7a9140bbc 100644 --- a/src/validate.c +++ b/src/validate.c @@ -63,18 +63,18 @@ pgBackupValidate(pgBackup *backup, pgRestoreParams *params) elog(ERROR, "pg_probackup binary version is %s, but backup %s version is %s. " "pg_probackup do not guarantee to be forward compatible. " "Please upgrade pg_probackup binary.", - PROGRAM_VERSION, base36enc(backup->start_time), backup->program_version); + PROGRAM_VERSION, backup_id_of(backup), backup->program_version); /* Check backup server version */ if (strcmp(backup->server_version, PG_MAJORVERSION) != 0) elog(ERROR, "Backup %s has server version %s, but current pg_probackup binary " "compiled with server version %s", - base36enc(backup->start_time), backup->server_version, PG_MAJORVERSION); + backup_id_of(backup), backup->server_version, PG_MAJORVERSION); if (backup->status == BACKUP_STATUS_RUNNING) { elog(WARNING, "Backup %s has status %s, change it to ERROR and skip validation", - base36enc(backup->start_time), status2str(backup->status)); + backup_id_of(backup), status2str(backup->status)); write_backup_status(backup, BACKUP_STATUS_ERROR, true); corrupted_backup_found = true; return; @@ -88,7 +88,7 @@ pgBackupValidate(pgBackup *backup, pgRestoreParams *params) backup->status != BACKUP_STATUS_CORRUPT) { elog(WARNING, "Backup %s has status %s. Skip validation.", - base36enc(backup->start_time), status2str(backup->status)); + backup_id_of(backup), status2str(backup->status)); corrupted_backup_found = true; return; } @@ -98,28 +98,28 @@ pgBackupValidate(pgBackup *backup, pgRestoreParams *params) backup->status == BACKUP_STATUS_MERGING) { elog(WARNING, "Full backup %s has status %s, skip validation", - base36enc(backup->start_time), status2str(backup->status)); + backup_id_of(backup), status2str(backup->status)); return; } if (backup->status == BACKUP_STATUS_OK || backup->status == BACKUP_STATUS_DONE || backup->status == BACKUP_STATUS_MERGING) - elog(INFO, "Validating backup %s", base36enc(backup->start_time)); + elog(INFO, "Validating backup %s", backup_id_of(backup)); else - elog(INFO, "Revalidating backup %s", base36enc(backup->start_time)); + elog(INFO, "Revalidating backup %s", backup_id_of(backup)); if (backup->backup_mode != BACKUP_MODE_FULL && backup->backup_mode != BACKUP_MODE_DIFF_PAGE && backup->backup_mode != BACKUP_MODE_DIFF_PTRACK && backup->backup_mode != BACKUP_MODE_DIFF_DELTA) - elog(WARNING, "Invalid backup_mode of backup %s", base36enc(backup->start_time)); + elog(WARNING, "Invalid backup_mode of backup %s", backup_id_of(backup)); join_path_components(external_prefix, backup->root_dir, EXTERNAL_DIR); files = get_backup_filelist(backup, false); if (!files) { - elog(WARNING, "Backup %s file list is corrupted", base36enc(backup->start_time)); + elog(WARNING, "Backup %s file list is corrupted", backup_id_of(backup)); backup->status = BACKUP_STATUS_CORRUPT; write_backup_status(backup, BACKUP_STATUS_CORRUPT, true); return; @@ -189,9 +189,9 @@ pgBackupValidate(pgBackup *backup, pgRestoreParams *params) BACKUP_STATUS_OK, true); if (corrupted) - elog(WARNING, "Backup %s data files are corrupted", base36enc(backup->start_time)); + elog(WARNING, "Backup %s data files are corrupted", backup_id_of(backup)); else - elog(INFO, "Backup %s data files are valid", base36enc(backup->start_time)); + elog(INFO, "Backup %s data files are valid", backup_id_of(backup)); /* Issue #132 kludge */ if (!corrupted && @@ -208,7 +208,7 @@ pgBackupValidate(pgBackup *backup, pgRestoreParams *params) elog(WARNING, "Backup %s is a victim of metadata corruption. " "Additional information can be found here: " "https://github.com/postgrespro/pg_probackup/issues/132", - base36enc(backup->start_time)); + backup_id_of(backup)); backup->status = BACKUP_STATUS_CORRUPT; write_backup_status(backup, BACKUP_STATUS_CORRUPT, true); } @@ -532,21 +532,21 @@ do_validate_instance(InstanceState *instanceState) /* Oldest corrupt backup has a chance for revalidation */ if (current_backup->start_time != tmp_backup->start_time) { - const char *tmp_backup_id = base36enc(tmp_backup->start_time); - const char *cur_backup_id = base36enc(current_backup->start_time); /* orphanize current_backup */ if (current_backup->status == BACKUP_STATUS_OK || current_backup->status == BACKUP_STATUS_DONE) { write_backup_status(current_backup, BACKUP_STATUS_ORPHAN, true); elog(WARNING, "Backup %s is orphaned because his parent %s has status: %s", - cur_backup_id, tmp_backup_id, + backup_id_of(current_backup), + backup_id_of(tmp_backup), status2str(tmp_backup->status)); } else { elog(WARNING, "Backup %s has parent %s with status: %s", - cur_backup_id, tmp_backup_id, + backup_id_of(current_backup), + backup_id_of(tmp_backup), status2str(tmp_backup->status)); } continue; @@ -556,7 +556,7 @@ do_validate_instance(InstanceState *instanceState) /* sanity */ if (!base_full_backup) elog(ERROR, "Parent full backup for the given backup %s was not found", - base36enc(current_backup->start_time)); + backup_id_of(current_backup)); } /* chain is whole, all parents are valid at first glance, * current backup validation can proceed @@ -571,7 +571,7 @@ do_validate_instance(InstanceState *instanceState) if (!lock_backup(current_backup, true, false)) { elog(WARNING, "Cannot lock backup %s directory, skip validation", - base36enc(current_backup->start_time)); + backup_id_of(current_backup)); skipped_due_to_lock = true; continue; } @@ -589,7 +589,6 @@ do_validate_instance(InstanceState *instanceState) */ if (current_backup->status != BACKUP_STATUS_OK) { - const char *current_backup_id; /* This is ridiculous but legal. * PAGE_b2 <- OK * PAGE_a2 <- OK @@ -599,7 +598,6 @@ do_validate_instance(InstanceState *instanceState) */ corrupted_backup_found = true; - current_backup_id = base36enc(current_backup->start_time); for (j = i - 1; j >= 0; j--) { @@ -613,8 +611,8 @@ do_validate_instance(InstanceState *instanceState) write_backup_status(backup, BACKUP_STATUS_ORPHAN, true); elog(WARNING, "Backup %s is orphaned because his parent %s has status: %s", - base36enc(backup->start_time), - current_backup_id, + backup_id_of(backup), + backup_id_of(current_backup), status2str(current_backup->status)); } } @@ -665,7 +663,7 @@ do_validate_instance(InstanceState *instanceState) if (!lock_backup(backup, true, false)) { elog(WARNING, "Cannot lock backup %s directory, skip validation", - base36enc(backup->start_time)); + backup_id_of(backup)); skipped_due_to_lock = true; continue; } From 44fef8894ec54cf29200167eb75c580d9e4547e3 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Wed, 23 Nov 2022 17:50:51 +0300 Subject: [PATCH 391/525] a bit more backup_id_of --- src/backup.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/backup.c b/src/backup.c index 5097f46ec..86c92618a 100644 --- a/src/backup.c +++ b/src/backup.c @@ -887,13 +887,13 @@ do_backup(InstanceState *instanceState, pgSetBackupParams *set_backup_params, pretty_size(current.data_bytes + current.wal_bytes, pretty_bytes, lengthof(pretty_bytes)); else pretty_size(current.data_bytes, pretty_bytes, lengthof(pretty_bytes)); - elog(INFO, "Backup %s resident size: %s", base36enc(current.start_time), pretty_bytes); + elog(INFO, "Backup %s resident size: %s", backup_id_of(¤t), pretty_bytes); if (current.status == BACKUP_STATUS_OK || current.status == BACKUP_STATUS_DONE) - elog(INFO, "Backup %s completed", base36enc(current.start_time)); + elog(INFO, "Backup %s completed", backup_id_of(¤t)); else - elog(ERROR, "Backup %s failed", base36enc(current.start_time)); + elog(ERROR, "Backup %s failed", backup_id_of(¤t)); /* * After successful backup completion remove backups @@ -2034,7 +2034,7 @@ backup_cleanup(bool fatal, void *userdata) if (current.status == BACKUP_STATUS_RUNNING && current.end_time == 0) { elog(WARNING, "Backup %s is running, setting its status to ERROR", - base36enc(current.start_time)); + backup_id_of(¤t)); current.end_time = time(NULL); current.status = BACKUP_STATUS_ERROR; write_backup(¤t, true); From 87dd3f20217114fc720159ab9e9789bb54b3c724 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Wed, 23 Nov 2022 17:59:25 +0300 Subject: [PATCH 392/525] base36enc(backup->backup_id) -> backup_id_of(backup) Here we assume backup_id == start_time. It is really so at the moment, but could change in future. Well, it almost always same. Sometime backup_id is set while start_time is not set yet (backup creation). And we had to fix places where start_time were changed without change of backup_id. --- src/backup.c | 7 ++++--- src/catalog.c | 15 ++++++++++----- src/dir.c | 2 +- src/merge.c | 2 ++ src/pg_probackup.h | 1 + src/util.c | 12 +++++++++++- src/validate.c | 4 ++-- 7 files changed, 31 insertions(+), 12 deletions(-) diff --git a/src/backup.c b/src/backup.c index 86c92618a..35fc98092 100644 --- a/src/backup.c +++ b/src/backup.c @@ -768,6 +768,7 @@ do_backup(InstanceState *instanceState, pgSetBackupParams *set_backup_params, /* Update backup status and other metainfo. */ current.status = BACKUP_STATUS_RUNNING; + /* XXX BACKUP_ID change it when backup_id wouldn't match start_time */ current.start_time = current.backup_id; strlcpy(current.program_version, PROGRAM_VERSION, @@ -778,13 +779,13 @@ do_backup(InstanceState *instanceState, pgSetBackupParams *set_backup_params, elog(INFO, "Backup start, pg_probackup version: %s, instance: %s, backup ID: %s, backup mode: %s, " "wal mode: %s, remote: %s, compress-algorithm: %s, compress-level: %i", - PROGRAM_VERSION, instanceState->instance_name, base36enc(current.backup_id), pgBackupGetBackupMode(¤t, false), + PROGRAM_VERSION, instanceState->instance_name, backup_id_of(¤t), pgBackupGetBackupMode(¤t, false), current.stream ? "STREAM" : "ARCHIVE", IsSshProtocol() ? "true" : "false", deparse_compress_alg(current.compress_alg), current.compress_level); if (!lock_backup(¤t, true, true)) elog(ERROR, "Cannot lock backup %s directory", - base36enc(current.backup_id)); + backup_id_of(¤t)); write_backup(¤t, true); /* set the error processing function for the backup process */ @@ -799,7 +800,7 @@ do_backup(InstanceState *instanceState, pgSetBackupParams *set_backup_params, backup_conn = pgdata_basic_setup(instance_config.conn_opt, &nodeInfo); if (current.from_replica) - elog(INFO, "Backup %s is going to be taken from standby", base36enc(current.backup_id)); + elog(INFO, "Backup %s is going to be taken from standby", backup_id_of(¤t)); /* TODO, print PostgreSQL full version */ //elog(INFO, "PostgreSQL version: %s", nodeInfo.server_version_str); diff --git a/src/catalog.c b/src/catalog.c index d19e4a27d..92a2d84b7 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -275,7 +275,7 @@ lock_backup(pgBackup *backup, bool strict, bool exclusive) /* save lock metadata for later unlocking */ lock = pgut_malloc(sizeof(LockInfo)); - snprintf(lock->backup_id, 10, "%s", base36enc(backup->backup_id)); + snprintf(lock->backup_id, 10, "%s", backup_id_of(backup)); snprintf(lock->backup_dir, MAXPGPATH, "%s", backup->root_dir); lock->exclusive = exclusive; @@ -966,6 +966,9 @@ catalog_get_backup_list(InstanceState *instanceState, time_t requested_backup_id backup = pgut_new0(pgBackup); pgBackupInit(backup); backup->start_time = base36dec(data_ent->d_name); + /* XXX BACKUP_ID change it when backup_id wouldn't match start_time */ + Assert(backup->backup_id == 0 || backup->backup_id == backup->start_time); + backup->backup_id = backup->start_time; } else if (strcmp(backup_id_of(backup), data_ent->d_name) != 0) { @@ -983,7 +986,6 @@ catalog_get_backup_list(InstanceState *instanceState, time_t requested_backup_id init_header_map(backup); /* TODO: save encoded backup id */ - backup->backup_id = backup->start_time; if (requested_backup_id != INVALID_BACKUP_ID && requested_backup_id != backup->start_time) { @@ -1454,7 +1456,7 @@ pgBackupInitDir(pgBackup *backup, const char *backup_instance_path) if (create_backup_dir(backup, backup_instance_path) != 0) { /* Clear backup_id as indication of error */ - backup->backup_id = INVALID_BACKUP_ID; + reset_backup_id(backup); return; } @@ -1506,7 +1508,7 @@ create_backup_dir(pgBackup *backup, const char *backup_instance_path) int rc; char path[MAXPGPATH]; - join_path_components(path, backup_instance_path, base36enc(backup->backup_id)); + join_path_components(path, backup_instance_path, backup_id_of(backup)); /* TODO: add wrapper for remote mode */ rc = dir_create_dir(path, DIR_PERMISSION, true); @@ -2252,7 +2254,7 @@ pin_backup(pgBackup *target_backup, pgSetBackupParams *set_backup_params) /* sanity, backup must have positive recovery-time */ if (target_backup->recovery_time <= 0) elog(ERROR, "Failed to set 'expire-time' for backup %s: invalid 'recovery-time'", - base36enc(target_backup->backup_id)); + backup_id_of(target_backup)); /* Pin comes from ttl */ if (set_backup_params->ttl > 0) @@ -2714,6 +2716,9 @@ readBackupControlFile(const char *path) pgBackupFree(backup); return NULL; } + /* XXX BACKUP_ID change it when backup_id wouldn't match start_time */ + Assert(backup->backup_id == 0 || backup->backup_id == backup->start_time); + backup->backup_id = backup->start_time; if (backup_mode) { diff --git a/src/dir.c b/src/dir.c index 182f0a51d..0a55c0f67 100644 --- a/src/dir.c +++ b/src/dir.c @@ -1115,7 +1115,7 @@ check_tablespace_mapping(pgBackup *backup, bool incremental, bool force, bool pg */ if (tablespace_dirs.head != NULL) elog(ERROR, "Backup %s has no tablespaceses, nothing to remap " - "via \"--tablespace-mapping\" option", base36enc(backup->backup_id)); + "via \"--tablespace-mapping\" option", backup_id_of(backup)); return NoTblspc; } diff --git a/src/merge.c b/src/merge.c index a0d3fee1f..0017c9e9c 100644 --- a/src/merge.c +++ b/src/merge.c @@ -874,6 +874,8 @@ merge_chain(InstanceState *instanceState, full_backup->status = BACKUP_STATUS_OK; full_backup->start_time = full_backup->merge_dest_backup; + /* XXX BACKUP_ID change it when backup_id wouldn't match start_time */ + full_backup->backup_id = full_backup->start_time; full_backup->merge_dest_backup = INVALID_BACKUP_ID; write_backup(full_backup, true); /* Critical section end */ diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 5ef6cbe31..843fb3522 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -887,6 +887,7 @@ extern parray *get_dbOid_exclude_list(pgBackup *backup, parray *datname_list, PartialRestoreType partial_restore_type); extern const char* backup_id_of(pgBackup *backup); +extern void reset_backup_id(pgBackup *backup); extern parray *get_backup_filelist(pgBackup *backup, bool strict); extern parray *read_timeline_history(const char *arclog_path, TimeLineID targetTLI, bool strict); diff --git a/src/util.c b/src/util.c index d3cfcb37e..e371d2c6d 100644 --- a/src/util.c +++ b/src/util.c @@ -565,9 +565,19 @@ datapagemap_print_debug(datapagemap_t *map) const char* backup_id_of(pgBackup *backup) { + /* Change this Assert when backup_id will not be bound to start_time */ + Assert(backup->backup_id == backup->start_time || backup->start_time == 0); + if (backup->backup_id_encoded[0] == '\x00') { - base36enc_to(backup->start_time, backup->backup_id_encoded); + base36enc_to(backup->backup_id, backup->backup_id_encoded); } return backup->backup_id_encoded; } + +void +reset_backup_id(pgBackup *backup) +{ + backup->backup_id = INVALID_BACKUP_ID; + memset(backup->backup_id_encoded, 0, sizeof(backup->backup_id_encoded)); +} diff --git a/src/validate.c b/src/validate.c index 7a9140bbc..b89b67b84 100644 --- a/src/validate.c +++ b/src/validate.c @@ -734,7 +734,7 @@ validate_tablespace_map(pgBackup *backup, bool no_validate) if (!fileExists(map_path, FIO_BACKUP_HOST)) elog(ERROR, "Tablespace map is missing: \"%s\", " "probably backup %s is corrupt, validate it", - map_path, base36enc(backup->backup_id)); + map_path, backup_id_of(backup)); /* check tablespace map checksumms */ if (!no_validate) @@ -744,7 +744,7 @@ validate_tablespace_map(pgBackup *backup, bool no_validate) if ((*tablespace_map)->crc != crc) elog(ERROR, "Invalid CRC of tablespace map file \"%s\" : %X. Expected %X, " "probably backup %s is corrupt, validate it", - map_path, crc, (*tablespace_map)->crc, base36enc(backup->backup_id)); + map_path, crc, (*tablespace_map)->crc, backup_id_of(backup)); } pgFileFree(dummy); From 2b8a1532350af09eed827f3024f8f4a30baf04fb Mon Sep 17 00:00:00 2001 From: "v.shepard" Date: Thu, 24 Nov 2022 10:23:59 +0100 Subject: [PATCH 393/525] PBCKP-306 add '_test' to tests files --- tests/CVE_2018_1058_test.py | 129 + tests/archive_test.py | 2707 ++++++++++++++++++ tests/backup_test.py | 3564 +++++++++++++++++++++++ tests/cfs_backup_test.py | 1235 ++++++++ tests/cfs_catchup_test.py | 117 + tests/cfs_restore_test.py | 450 +++ tests/cfs_validate_backup_test.py | 24 + tests/checkdb_test.py | 849 ++++++ tests/compatibility_test.py | 1500 ++++++++++ tests/compression_test.py | 495 ++++ tests/config_test.py | 113 + tests/delete_test.py | 822 ++++++ tests/delta_test.py | 1201 ++++++++ tests/exclude_test.py | 338 +++ tests/external_test.py | 2405 ++++++++++++++++ tests/false_positive_test.py | 337 +++ tests/incr_restore_test.py | 2300 +++++++++++++++ tests/init_test.py | 138 + tests/locking_test.py | 629 ++++ tests/logging_test.py | 345 +++ tests/merge_test.py | 2759 ++++++++++++++++++ tests/option_test.py | 231 ++ tests/page_test.py | 1424 ++++++++++ tests/pgpro2068_test.py | 188 ++ tests/pgpro560_test.py | 123 + tests/pgpro589_test.py | 72 + tests/ptrack_test.py | 4407 +++++++++++++++++++++++++++++ tests/remote_test.py | 43 + tests/replica_test.py | 1654 +++++++++++ tests/restore_test.py | 3822 +++++++++++++++++++++++++ tests/retention_test.py | 2529 +++++++++++++++++ tests/set_backup_test.py | 476 ++++ tests/show_test.py | 509 ++++ tests/time_consuming_test.py | 77 + tests/time_stamp_test.py | 236 ++ tests/validate_test.py | 4083 ++++++++++++++++++++++++++ 36 files changed, 42331 insertions(+) create mode 100644 tests/CVE_2018_1058_test.py create mode 100644 tests/archive_test.py create mode 100644 tests/backup_test.py create mode 100644 tests/cfs_backup_test.py create mode 100644 tests/cfs_catchup_test.py create mode 100644 tests/cfs_restore_test.py create mode 100644 tests/cfs_validate_backup_test.py create mode 100644 tests/checkdb_test.py create mode 100644 tests/compatibility_test.py create mode 100644 tests/compression_test.py create mode 100644 tests/config_test.py create mode 100644 tests/delete_test.py create mode 100644 tests/delta_test.py create mode 100644 tests/exclude_test.py create mode 100644 tests/external_test.py create mode 100644 tests/false_positive_test.py create mode 100644 tests/incr_restore_test.py create mode 100644 tests/init_test.py create mode 100644 tests/locking_test.py create mode 100644 tests/logging_test.py create mode 100644 tests/merge_test.py create mode 100644 tests/option_test.py create mode 100644 tests/page_test.py create mode 100644 tests/pgpro2068_test.py create mode 100644 tests/pgpro560_test.py create mode 100644 tests/pgpro589_test.py create mode 100644 tests/ptrack_test.py create mode 100644 tests/remote_test.py create mode 100644 tests/replica_test.py create mode 100644 tests/restore_test.py create mode 100644 tests/retention_test.py create mode 100644 tests/set_backup_test.py create mode 100644 tests/show_test.py create mode 100644 tests/time_consuming_test.py create mode 100644 tests/time_stamp_test.py create mode 100644 tests/validate_test.py diff --git a/tests/CVE_2018_1058_test.py b/tests/CVE_2018_1058_test.py new file mode 100644 index 000000000..cfd55cc60 --- /dev/null +++ b/tests/CVE_2018_1058_test.py @@ -0,0 +1,129 @@ +import os +import unittest +from .helpers.ptrack_helpers import ProbackupTest, ProbackupException + +class CVE_2018_1058(ProbackupTest, unittest.TestCase): + + # @unittest.skip("skip") + def test_basic_default_search_path(self): + """""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + 'postgres', + "CREATE FUNCTION public.pgpro_edition() " + "RETURNS text " + "AS $$ " + "BEGIN " + " RAISE 'pg_probackup vulnerable!'; " + "END " + "$$ LANGUAGE plpgsql") + + self.backup_node(backup_dir, 'node', node, backup_type='full', options=['--stream']) + + # @unittest.skip("skip") + def test_basic_backup_modified_search_path(self): + """""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True) + self.set_auto_conf(node, options={'search_path': 'public,pg_catalog'}) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + 'postgres', + "CREATE FUNCTION public.pg_control_checkpoint(OUT timeline_id integer, OUT dummy integer) " + "RETURNS record " + "AS $$ " + "BEGIN " + " RAISE '% vulnerable!', 'pg_probackup'; " + "END " + "$$ LANGUAGE plpgsql") + + node.safe_psql( + 'postgres', + "CREATE FUNCTION public.pg_proc(OUT proname name, OUT dummy integer) " + "RETURNS record " + "AS $$ " + "BEGIN " + " RAISE '% vulnerable!', 'pg_probackup'; " + "END " + "$$ LANGUAGE plpgsql; " + "CREATE VIEW public.pg_proc AS SELECT proname FROM public.pg_proc()") + + self.backup_node(backup_dir, 'node', node, backup_type='full', options=['--stream']) + + log_file = os.path.join(node.logs_dir, 'postgresql.log') + with open(log_file, 'r') as f: + log_content = f.read() + self.assertFalse( + 'pg_probackup vulnerable!' in log_content) + + # @unittest.skip("skip") + def test_basic_checkdb_modified_search_path(self): + """""" + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + self.set_auto_conf(node, options={'search_path': 'public,pg_catalog'}) + node.slow_start() + + node.safe_psql( + 'postgres', + "CREATE FUNCTION public.pg_database(OUT datname name, OUT oid oid, OUT dattablespace oid) " + "RETURNS record " + "AS $$ " + "BEGIN " + " RAISE 'pg_probackup vulnerable!'; " + "END " + "$$ LANGUAGE plpgsql; " + "CREATE VIEW public.pg_database AS SELECT * FROM public.pg_database()") + + node.safe_psql( + 'postgres', + "CREATE FUNCTION public.pg_extension(OUT extname name, OUT extnamespace oid, OUT extversion text) " + "RETURNS record " + "AS $$ " + "BEGIN " + " RAISE 'pg_probackup vulnerable!'; " + "END " + "$$ LANGUAGE plpgsql; " + "CREATE FUNCTION public.pg_namespace(OUT oid oid, OUT nspname name) " + "RETURNS record " + "AS $$ " + "BEGIN " + " RAISE 'pg_probackup vulnerable!'; " + "END " + "$$ LANGUAGE plpgsql; " + "CREATE VIEW public.pg_extension AS SELECT * FROM public.pg_extension();" + "CREATE VIEW public.pg_namespace AS SELECT * FROM public.pg_namespace();" + ) + + try: + self.checkdb_node( + options=[ + '--amcheck', + '--skip-block-validation', + '-d', 'postgres', '-p', str(node.port)]) + self.assertEqual( + 1, 0, + "Expecting Error because amcheck{,_next} not installed\n" + " Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + "WARNING: Extension 'amcheck' or 'amcheck_next' are not installed in database postgres", + e.message, + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) diff --git a/tests/archive_test.py b/tests/archive_test.py new file mode 100644 index 000000000..5e59dd268 --- /dev/null +++ b/tests/archive_test.py @@ -0,0 +1,2707 @@ +import os +import shutil +import gzip +import unittest +from .helpers.ptrack_helpers import ProbackupTest, ProbackupException, GdbException +from datetime import datetime, timedelta +import subprocess +from sys import exit +from time import sleep +from distutils.dir_util import copy_tree + + +class ArchiveTest(ProbackupTest, unittest.TestCase): + + # @unittest.expectedFailure + # @unittest.skip("skip") + def test_pgpro434_1(self): + """Description in jira issue PGPRO-434""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'checkpoint_timeout': '30s'}) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + "postgres", + "create table t_heap as select 1 as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector from " + "generate_series(0,100) i") + + result = node.safe_psql("postgres", "SELECT * FROM t_heap") + self.backup_node( + backup_dir, 'node', node) + node.cleanup() + + self.restore_node( + backup_dir, 'node', node) + node.slow_start() + + # Recreate backup catalog + self.clean_pb(backup_dir) + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + + # Make backup + self.backup_node(backup_dir, 'node', node) + node.cleanup() + + # Restore Database + self.restore_node(backup_dir, 'node', node) + node.slow_start() + + self.assertEqual( + result, node.safe_psql("postgres", "SELECT * FROM t_heap"), + 'data after restore not equal to original data') + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_pgpro434_2(self): + """ + Check that timelines are correct. + WAITING PGPRO-1053 for --immediate + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'checkpoint_timeout': '30s'} + ) + + if self.get_version(node) < self.version_to_num('9.6.0'): + self.skipTest( + 'Skipped because pg_control_checkpoint() is not supported in PG 9.5') + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # FIRST TIMELINE + node.safe_psql( + "postgres", + "create table t_heap as select 1 as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,100) i") + backup_id = self.backup_node(backup_dir, 'node', node) + node.safe_psql( + "postgres", + "insert into t_heap select 100501 as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,1) i") + + # SECOND TIMELIN + node.cleanup() + self.restore_node( + backup_dir, 'node', node, + options=['--immediate', '--recovery-target-action=promote']) + node.slow_start() + + if self.verbose: + print(node.safe_psql( + "postgres", + "select redo_wal_file from pg_control_checkpoint()")) + self.assertFalse( + node.execute( + "postgres", + "select exists(select 1 " + "from t_heap where id = 100501)")[0][0], + 'data after restore not equal to original data') + + node.safe_psql( + "postgres", + "insert into t_heap select 2 as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(100,200) i") + + backup_id = self.backup_node(backup_dir, 'node', node) + + node.safe_psql( + "postgres", + "insert into t_heap select 100502 as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,256) i") + + # THIRD TIMELINE + node.cleanup() + self.restore_node( + backup_dir, 'node', node, + options=['--immediate', '--recovery-target-action=promote']) + node.slow_start() + + if self.verbose: + print( + node.safe_psql( + "postgres", + "select redo_wal_file from pg_control_checkpoint()")) + + node.safe_psql( + "postgres", + "insert into t_heap select 3 as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(200,300) i") + + backup_id = self.backup_node(backup_dir, 'node', node) + + result = node.safe_psql("postgres", "SELECT * FROM t_heap") + node.safe_psql( + "postgres", + "insert into t_heap select 100503 as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,256) i") + + # FOURTH TIMELINE + node.cleanup() + self.restore_node( + backup_dir, 'node', node, + options=['--immediate', '--recovery-target-action=promote']) + node.slow_start() + + if self.verbose: + print('Fourth timeline') + print(node.safe_psql( + "postgres", + "select redo_wal_file from pg_control_checkpoint()")) + + # FIFTH TIMELINE + node.cleanup() + self.restore_node( + backup_dir, 'node', node, + options=['--immediate', '--recovery-target-action=promote']) + node.slow_start() + + if self.verbose: + print('Fifth timeline') + print(node.safe_psql( + "postgres", + "select redo_wal_file from pg_control_checkpoint()")) + + # SIXTH TIMELINE + node.cleanup() + self.restore_node( + backup_dir, 'node', node, + options=['--immediate', '--recovery-target-action=promote']) + node.slow_start() + + if self.verbose: + print('Sixth timeline') + print(node.safe_psql( + "postgres", + "select redo_wal_file from pg_control_checkpoint()")) + + self.assertFalse( + node.execute( + "postgres", + "select exists(select 1 from t_heap where id > 100500)")[0][0], + 'data after restore not equal to original data') + + self.assertEqual( + result, + node.safe_psql( + "postgres", + "SELECT * FROM t_heap"), + 'data after restore not equal to original data') + + # @unittest.skip("skip") + def test_pgpro434_3(self): + """ + Check pg_stop_backup_timeout, needed backup_timeout + Fixed in commit d84d79668b0c139 and assert fixed by ptrack 1.7 + """ + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + + node.slow_start() + + gdb = self.backup_node( + backup_dir, 'node', node, + options=[ + "--archive-timeout=60", + "--log-level-file=LOG"], + gdb=True) + + # Attention! this breakpoint has been set on internal probackup function, not on a postgres core one + gdb.set_breakpoint('pg_stop_backup') + gdb.run_until_break() + + self.set_auto_conf(node, {'archive_command': 'exit 1'}) + node.reload() + + gdb.continue_execution_until_exit() + + sleep(1) + + log_file = os.path.join(backup_dir, 'log', 'pg_probackup.log') + with open(log_file, 'r') as f: + log_content = f.read() + + # in PG =< 9.6 pg_stop_backup always wait + if self.get_version(node) < 100000: + self.assertIn( + "ERROR: pg_stop_backup doesn't answer in 60 seconds, cancel it", + log_content) + else: + self.assertIn( + "ERROR: WAL segment 000000010000000000000003 could not be archived in 60 seconds", + log_content) + + log_file = os.path.join(node.logs_dir, 'postgresql.log') + with open(log_file, 'r') as f: + log_content = f.read() + + self.assertNotIn( + 'FailedAssertion', + log_content, + 'PostgreSQL crashed because of a failed assert') + + # @unittest.skip("skip") + def test_pgpro434_4(self): + """ + Check pg_stop_backup_timeout, libpq-timeout requested. + Fixed in commit d84d79668b0c139 and assert fixed by ptrack 1.7 + """ + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + + node.slow_start() + + gdb = self.backup_node( + backup_dir, 'node', node, + options=[ + "--archive-timeout=60", + "--log-level-file=info"], + gdb=True) + + # Attention! this breakpoint has been set on internal probackup function, not on a postgres core one + gdb.set_breakpoint('pg_stop_backup') + gdb.run_until_break() + + self.set_auto_conf(node, {'archive_command': 'exit 1'}) + node.reload() + + os.environ["PGAPPNAME"] = "foo" + + pid = node.safe_psql( + "postgres", + "SELECT pid " + "FROM pg_stat_activity " + "WHERE application_name = 'pg_probackup'").decode('utf-8').rstrip() + + os.environ["PGAPPNAME"] = "pg_probackup" + + postgres_gdb = self.gdb_attach(pid) + postgres_gdb.set_breakpoint('do_pg_stop_backup') + postgres_gdb.continue_execution_until_running() + + gdb.continue_execution_until_exit() + # gdb._execute('detach') + + log_file = os.path.join(backup_dir, 'log', 'pg_probackup.log') + with open(log_file, 'r') as f: + log_content = f.read() + + if self.get_version(node) < 150000: + self.assertIn( + "ERROR: pg_stop_backup doesn't answer in 60 seconds, cancel it", + log_content) + else: + self.assertIn( + "ERROR: pg_backup_stop doesn't answer in 60 seconds, cancel it", + log_content) + + log_file = os.path.join(node.logs_dir, 'postgresql.log') + with open(log_file, 'r') as f: + log_content = f.read() + + self.assertNotIn( + 'FailedAssertion', + log_content, + 'PostgreSQL crashed because of a failed assert') + + # @unittest.skip("skip") + def test_archive_push_file_exists(self): + """Archive-push if file exists""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'checkpoint_timeout': '30s'}) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + + wals_dir = os.path.join(backup_dir, 'wal', 'node') + if self.archive_compress: + filename = '000000010000000000000001.gz' + file = os.path.join(wals_dir, filename) + else: + filename = '000000010000000000000001' + file = os.path.join(wals_dir, filename) + + with open(file, 'a+b') as f: + f.write(b"blablablaadssaaaaaaaaaaaaaaa") + f.flush() + f.close() + + node.slow_start() + node.safe_psql( + "postgres", + "create table t_heap as select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,100500) i") + log_file = os.path.join(node.logs_dir, 'postgresql.log') + + self.switch_wal_segment(node) + sleep(1) + + with open(log_file, 'r') as f: + log_content = f.read() + self.assertIn( + 'LOG: archive command failed with exit code 1', + log_content) + + self.assertIn( + 'DETAIL: The failed archive command was:', + log_content) + + self.assertIn( + 'pg_probackup archive-push WAL file', + log_content) + + self.assertIn( + 'WAL file already exists in archive with different checksum', + log_content) + + self.assertNotIn( + 'pg_probackup archive-push completed successfully', log_content) + + if self.get_version(node) < 100000: + wal_src = os.path.join( + node.data_dir, 'pg_xlog', '000000010000000000000001') + else: + wal_src = os.path.join( + node.data_dir, 'pg_wal', '000000010000000000000001') + + if self.archive_compress: + with open(wal_src, 'rb') as f_in, gzip.open( + file, 'wb', compresslevel=1) as f_out: + shutil.copyfileobj(f_in, f_out) + else: + shutil.copyfile(wal_src, file) + + self.switch_wal_segment(node) + sleep(5) + + with open(log_file, 'r') as f: + log_content = f.read() + + self.assertIn( + 'pg_probackup archive-push completed successfully', + log_content) + + # btw check that console coloring codes are not slipped into log file + self.assertNotIn('[0m', log_content) + + print(log_content) + + # @unittest.skip("skip") + def test_archive_push_file_exists_overwrite(self): + """Archive-push if file exists""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={'checkpoint_timeout': '30s'}) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + + wals_dir = os.path.join(backup_dir, 'wal', 'node') + if self.archive_compress: + filename = '000000010000000000000001.gz' + file = os.path.join(wals_dir, filename) + else: + filename = '000000010000000000000001' + file = os.path.join(wals_dir, filename) + + with open(file, 'a+b') as f: + f.write(b"blablablaadssaaaaaaaaaaaaaaa") + f.flush() + f.close() + + node.slow_start() + node.safe_psql( + "postgres", + "create table t_heap as select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,100500) i") + log_file = os.path.join(node.logs_dir, 'postgresql.log') + + self.switch_wal_segment(node) + sleep(1) + + with open(log_file, 'r') as f: + log_content = f.read() + + self.assertIn( + 'LOG: archive command failed with exit code 1', log_content) + self.assertIn( + 'DETAIL: The failed archive command was:', log_content) + self.assertIn( + 'pg_probackup archive-push WAL file', log_content) + self.assertNotIn( + 'WAL file already exists in archive with ' + 'different checksum, overwriting', log_content) + self.assertIn( + 'WAL file already exists in archive with ' + 'different checksum', log_content) + + self.assertNotIn( + 'pg_probackup archive-push completed successfully', log_content) + + self.set_archiving(backup_dir, 'node', node, overwrite=True) + node.reload() + self.switch_wal_segment(node) + sleep(5) + + with open(log_file, 'r') as f: + log_content = f.read() + self.assertTrue( + 'pg_probackup archive-push completed successfully' in log_content, + 'Expecting messages about successfull execution archive_command') + + self.assertIn( + 'WAL file already exists in archive with ' + 'different checksum, overwriting', log_content) + + # @unittest.skip("skip") + def test_archive_push_partial_file_exists(self): + """Archive-push if stale '.part' file exists""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving( + backup_dir, 'node', node, + log_level='verbose', archive_timeout=60) + + node.slow_start() + + # this backup is needed only for validation to xid + self.backup_node(backup_dir, 'node', node) + + node.safe_psql( + "postgres", + "create table t1(a int)") + + xid = node.safe_psql( + "postgres", + "INSERT INTO t1 VALUES (1) RETURNING (xmin)").decode('utf-8').rstrip() + + if self.get_version(node) < 100000: + filename_orig = node.safe_psql( + "postgres", + "SELECT file_name " + "FROM pg_xlogfile_name_offset(pg_current_xlog_location());").rstrip() + else: + filename_orig = node.safe_psql( + "postgres", + "SELECT file_name " + "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn());").rstrip() + + filename_orig = filename_orig.decode('utf-8') + + # form up path to next .part WAL segment + wals_dir = os.path.join(backup_dir, 'wal', 'node') + if self.archive_compress: + filename = filename_orig + '.gz' + '.part' + file = os.path.join(wals_dir, filename) + else: + filename = filename_orig + '.part' + file = os.path.join(wals_dir, filename) + + # emulate stale .part file + with open(file, 'a+b') as f: + f.write(b"blahblah") + f.flush() + f.close() + + self.switch_wal_segment(node) + sleep(70) + + # check that segment is archived + if self.archive_compress: + filename_orig = filename_orig + '.gz' + + file = os.path.join(wals_dir, filename_orig) + self.assertTrue(os.path.isfile(file)) + + # successful validate means that archive-push reused stale wal segment + self.validate_pb( + backup_dir, 'node', + options=['--recovery-target-xid={0}'.format(xid)]) + + log_file = os.path.join(node.logs_dir, 'postgresql.log') + with open(log_file, 'r') as f: + log_content = f.read() + + self.assertIn( + 'Reusing stale temp WAL file', + log_content) + + # @unittest.skip("skip") + def test_archive_push_part_file_exists_not_stale(self): + """Archive-push if .part file exists and it is not stale""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node, archive_timeout=60) + + node.slow_start() + + node.safe_psql( + "postgres", + "create table t1()") + self.switch_wal_segment(node) + + node.safe_psql( + "postgres", + "create table t2()") + + if self.get_version(node) < 100000: + filename_orig = node.safe_psql( + "postgres", + "SELECT file_name " + "FROM pg_xlogfile_name_offset(pg_current_xlog_location());").rstrip() + else: + filename_orig = node.safe_psql( + "postgres", + "SELECT file_name " + "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn());").rstrip() + + filename_orig = filename_orig.decode('utf-8') + + # form up path to next .part WAL segment + wals_dir = os.path.join(backup_dir, 'wal', 'node') + if self.archive_compress: + filename = filename_orig + '.gz' + '.part' + file = os.path.join(wals_dir, filename) + else: + filename = filename_orig + '.part' + file = os.path.join(wals_dir, filename) + + with open(file, 'a+b') as f: + f.write(b"blahblah") + f.flush() + f.close() + + self.switch_wal_segment(node) + sleep(30) + + with open(file, 'a+b') as f: + f.write(b"blahblahblahblah") + f.flush() + f.close() + + sleep(40) + + # check that segment is NOT archived + if self.archive_compress: + filename_orig = filename_orig + '.gz' + + file = os.path.join(wals_dir, filename_orig) + + self.assertFalse(os.path.isfile(file)) + + # log_file = os.path.join(node.logs_dir, 'postgresql.log') + # with open(log_file, 'r') as f: + # log_content = f.read() + # self.assertIn( + # 'is not stale', + # log_content) + + # @unittest.expectedFailure + # @unittest.skip("skip") + def test_replica_archive(self): + """ + make node without archiving, take stream backup and + turn it into replica, set replica with archiving, + make archive backup from replica + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + master = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'master'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'archive_timeout': '10s', + 'checkpoint_timeout': '30s', + 'max_wal_size': '32MB'}) + + if self.get_version(master) < self.version_to_num('9.6.0'): + self.skipTest( + 'Skipped because backup from replica is not supported in PG 9.5') + + self.init_pb(backup_dir) + # ADD INSTANCE 'MASTER' + self.add_instance(backup_dir, 'master', master) + master.slow_start() + + replica = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica.cleanup() + + master.psql( + "postgres", + "create table t_heap as select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,2560) i") + + self.backup_node(backup_dir, 'master', master, options=['--stream']) + before = master.safe_psql("postgres", "SELECT * FROM t_heap") + + # Settings for Replica + self.restore_node(backup_dir, 'master', replica) + self.set_replica(master, replica, synchronous=True) + + self.add_instance(backup_dir, 'replica', replica) + self.set_archiving(backup_dir, 'replica', replica, replica=True) + replica.slow_start(replica=True) + + # Check data correctness on replica + after = replica.safe_psql("postgres", "SELECT * FROM t_heap") + self.assertEqual(before, after) + + # Change data on master, take FULL backup from replica, + # restore taken backup and check that restored data equal + # to original data + master.psql( + "postgres", + "insert into t_heap select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(256,512) i") + before = master.safe_psql("postgres", "SELECT * FROM t_heap") + + backup_id = self.backup_node( + backup_dir, 'replica', replica, + options=[ + '--archive-timeout=30', + '--master-host=localhost', + '--master-db=postgres', + '--master-port={0}'.format(master.port), + '--stream']) + + self.validate_pb(backup_dir, 'replica') + self.assertEqual( + 'OK', self.show_pb(backup_dir, 'replica', backup_id)['status']) + + # RESTORE FULL BACKUP TAKEN FROM replica + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node')) + node.cleanup() + self.restore_node(backup_dir, 'replica', data_dir=node.data_dir) + + self.set_auto_conf(node, {'port': node.port}) + node.slow_start() + # CHECK DATA CORRECTNESS + after = node.safe_psql("postgres", "SELECT * FROM t_heap") + self.assertEqual(before, after) + + # Change data on master, make PAGE backup from replica, + # restore taken backup and check that restored data equal + # to original data + master.psql( + "postgres", + "insert into t_heap select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(512,80680) i") + + before = master.safe_psql("postgres", "SELECT * FROM t_heap") + + self.wait_until_replica_catch_with_master(master, replica) + + backup_id = self.backup_node( + backup_dir, 'replica', + replica, backup_type='page', + options=[ + '--archive-timeout=60', + '--master-db=postgres', + '--master-host=localhost', + '--master-port={0}'.format(master.port), + '--stream']) + + self.validate_pb(backup_dir, 'replica') + self.assertEqual( + 'OK', self.show_pb(backup_dir, 'replica', backup_id)['status']) + + # RESTORE PAGE BACKUP TAKEN FROM replica + node.cleanup() + self.restore_node( + backup_dir, 'replica', data_dir=node.data_dir, backup_id=backup_id) + + self.set_auto_conf(node, {'port': node.port}) + + node.slow_start() + # CHECK DATA CORRECTNESS + after = node.safe_psql("postgres", "SELECT * FROM t_heap") + self.assertEqual(before, after) + + # @unittest.expectedFailure + # @unittest.skip("skip") + def test_master_and_replica_parallel_archiving(self): + """ + make node 'master 'with archiving, + take archive backup and turn it into replica, + set replica with archiving, make archive backup from replica, + make archive backup from master + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + master = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'master'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'archive_timeout': '10s'} + ) + + if self.get_version(master) < self.version_to_num('9.6.0'): + self.skipTest( + 'Skipped because backup from replica is not supported in PG 9.5') + + replica = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica.cleanup() + + self.init_pb(backup_dir) + # ADD INSTANCE 'MASTER' + self.add_instance(backup_dir, 'master', master) + self.set_archiving(backup_dir, 'master', master) + master.slow_start() + + master.psql( + "postgres", + "create table t_heap as select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,10000) i") + + # TAKE FULL ARCHIVE BACKUP FROM MASTER + self.backup_node(backup_dir, 'master', master) + # GET LOGICAL CONTENT FROM MASTER + before = master.safe_psql("postgres", "SELECT * FROM t_heap") + # GET PHYSICAL CONTENT FROM MASTER + pgdata_master = self.pgdata_content(master.data_dir) + + # Settings for Replica + self.restore_node(backup_dir, 'master', replica) + # CHECK PHYSICAL CORRECTNESS on REPLICA + pgdata_replica = self.pgdata_content(replica.data_dir) + self.compare_pgdata(pgdata_master, pgdata_replica) + + self.set_replica(master, replica) + # ADD INSTANCE REPLICA + self.add_instance(backup_dir, 'replica', replica) + # SET ARCHIVING FOR REPLICA + self.set_archiving(backup_dir, 'replica', replica, replica=True) + replica.slow_start(replica=True) + + # CHECK LOGICAL CORRECTNESS on REPLICA + after = replica.safe_psql("postgres", "SELECT * FROM t_heap") + self.assertEqual(before, after) + + master.psql( + "postgres", + "insert into t_heap select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0, 60000) i") + + backup_id = self.backup_node( + backup_dir, 'replica', replica, + options=[ + '--archive-timeout=30', + '--master-host=localhost', + '--master-db=postgres', + '--master-port={0}'.format(master.port), + '--stream']) + + self.validate_pb(backup_dir, 'replica') + self.assertEqual( + 'OK', self.show_pb(backup_dir, 'replica', backup_id)['status']) + + # TAKE FULL ARCHIVE BACKUP FROM MASTER + backup_id = self.backup_node(backup_dir, 'master', master) + self.validate_pb(backup_dir, 'master') + self.assertEqual( + 'OK', self.show_pb(backup_dir, 'master', backup_id)['status']) + + # @unittest.expectedFailure + # @unittest.skip("skip") + def test_basic_master_and_replica_concurrent_archiving(self): + """ + make node 'master 'with archiving, + take archive backup and turn it into replica, + set replica with archiving, + make sure that archiving on both node is working. + """ + if self.pg_config_version < self.version_to_num('9.6.0'): + self.skipTest('You need PostgreSQL >= 9.6 for this test') + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + master = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'master'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'checkpoint_timeout': '30s', + 'archive_timeout': '10s'}) + + if self.get_version(master) < self.version_to_num('9.6.0'): + self.skipTest( + 'Skipped because backup from replica is not supported in PG 9.5') + + replica = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica.cleanup() + + self.init_pb(backup_dir) + # ADD INSTANCE 'MASTER' + self.add_instance(backup_dir, 'master', master) + self.set_archiving(backup_dir, 'master', master) + master.slow_start() + + master.psql( + "postgres", + "create table t_heap as select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,10000) i") + + master.pgbench_init(scale=5) + + # TAKE FULL ARCHIVE BACKUP FROM MASTER + self.backup_node(backup_dir, 'master', master) + # GET LOGICAL CONTENT FROM MASTER + before = master.safe_psql("postgres", "SELECT * FROM t_heap") + # GET PHYSICAL CONTENT FROM MASTER + pgdata_master = self.pgdata_content(master.data_dir) + + # Settings for Replica + self.restore_node( + backup_dir, 'master', replica) + # CHECK PHYSICAL CORRECTNESS on REPLICA + pgdata_replica = self.pgdata_content(replica.data_dir) + self.compare_pgdata(pgdata_master, pgdata_replica) + + self.set_replica(master, replica, synchronous=False) + # ADD INSTANCE REPLICA + # self.add_instance(backup_dir, 'replica', replica) + # SET ARCHIVING FOR REPLICA + self.set_archiving(backup_dir, 'master', replica, replica=True) + replica.slow_start(replica=True) + + # CHECK LOGICAL CORRECTNESS on REPLICA + after = replica.safe_psql("postgres", "SELECT * FROM t_heap") + self.assertEqual(before, after) + + master.psql( + "postgres", + "insert into t_heap select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,10000) i") + + # TAKE FULL ARCHIVE BACKUP FROM REPLICA + backup_id = self.backup_node(backup_dir, 'master', replica) + + self.validate_pb(backup_dir, 'master') + self.assertEqual( + 'OK', self.show_pb(backup_dir, 'master', backup_id)['status']) + + # TAKE FULL ARCHIVE BACKUP FROM MASTER + backup_id = self.backup_node(backup_dir, 'master', master) + self.validate_pb(backup_dir, 'master') + self.assertEqual( + 'OK', self.show_pb(backup_dir, 'master', backup_id)['status']) + + master.pgbench_init(scale=10) + + sleep(10) + + replica.promote() + + master.pgbench_init(scale=10) + replica.pgbench_init(scale=10) + + self.backup_node(backup_dir, 'master', master) + self.backup_node(backup_dir, 'master', replica) + + # @unittest.expectedFailure + # @unittest.skip("skip") + def test_concurrent_archiving(self): + """ + Concurrent archiving from master, replica and cascade replica + https://github.com/postgrespro/pg_probackup/issues/327 + + For PG >= 11 it is expected to pass this test + """ + + if self.pg_config_version < self.version_to_num('11.0'): + self.skipTest('You need PostgreSQL >= 11 for this test') + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + master = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'master'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', master) + self.set_archiving(backup_dir, 'node', master, replica=True) + master.slow_start() + + master.pgbench_init(scale=10) + + # TAKE FULL ARCHIVE BACKUP FROM MASTER + self.backup_node(backup_dir, 'node', master) + + # Settings for Replica + replica = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica.cleanup() + self.restore_node(backup_dir, 'node', replica) + + self.set_replica(master, replica, synchronous=True) + self.set_archiving(backup_dir, 'node', replica, replica=True) + self.set_auto_conf(replica, {'port': replica.port}) + replica.slow_start(replica=True) + + # create cascade replicas + replica1 = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'replica1')) + replica1.cleanup() + + # Settings for casaced replica + self.restore_node(backup_dir, 'node', replica1) + self.set_replica(replica, replica1, synchronous=False) + self.set_auto_conf(replica1, {'port': replica1.port}) + replica1.slow_start(replica=True) + + # Take full backup from master + self.backup_node(backup_dir, 'node', master) + + pgbench = master.pgbench( + stdout=subprocess.PIPE, stderr=subprocess.STDOUT, + options=['-T', '30', '-c', '1']) + + # Take several incremental backups from master + self.backup_node(backup_dir, 'node', master, backup_type='page', options=['--no-validate']) + + self.backup_node(backup_dir, 'node', master, backup_type='page', options=['--no-validate']) + + pgbench.wait() + pgbench.stdout.close() + + with open(os.path.join(master.logs_dir, 'postgresql.log'), 'r') as f: + log_content = f.read() + self.assertNotIn('different checksum', log_content) + + with open(os.path.join(replica.logs_dir, 'postgresql.log'), 'r') as f: + log_content = f.read() + self.assertNotIn('different checksum', log_content) + + with open(os.path.join(replica1.logs_dir, 'postgresql.log'), 'r') as f: + log_content = f.read() + self.assertNotIn('different checksum', log_content) + + # @unittest.expectedFailure + # @unittest.skip("skip") + def test_archive_pg_receivexlog(self): + """Test backup with pg_receivexlog wal delivary method""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'checkpoint_timeout': '30s'}) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + if self.get_version(node) < 100000: + pg_receivexlog_path = self.get_bin_path('pg_receivexlog') + else: + pg_receivexlog_path = self.get_bin_path('pg_receivewal') + + pg_receivexlog = self.run_binary( + [ + pg_receivexlog_path, '-p', str(node.port), '--synchronous', + '-D', os.path.join(backup_dir, 'wal', 'node') + ], asynchronous=True) + + if pg_receivexlog.returncode: + self.assertFalse( + True, + 'Failed to start pg_receivexlog: {0}'.format( + pg_receivexlog.communicate()[1])) + + node.safe_psql( + "postgres", + "create table t_heap as select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,10000) i") + + self.backup_node(backup_dir, 'node', node) + + # PAGE + node.safe_psql( + "postgres", + "insert into t_heap select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(10000,20000) i") + + self.backup_node( + backup_dir, + 'node', + node, + backup_type='page' + ) + result = node.safe_psql("postgres", "SELECT * FROM t_heap") + self.validate_pb(backup_dir) + + # Check data correctness + node.cleanup() + self.restore_node(backup_dir, 'node', node) + node.slow_start() + + self.assertEqual( + result, + node.safe_psql( + "postgres", "SELECT * FROM t_heap" + ), + 'data after restore not equal to original data') + + # Clean after yourself + pg_receivexlog.kill() + + # @unittest.expectedFailure + # @unittest.skip("skip") + def test_archive_pg_receivexlog_compression_pg10(self): + """Test backup with pg_receivewal compressed wal delivary method""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'checkpoint_timeout': '30s'} + ) + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + if self.get_version(node) < self.version_to_num('10.0'): + self.skipTest('You need PostgreSQL >= 10 for this test') + else: + pg_receivexlog_path = self.get_bin_path('pg_receivewal') + + pg_receivexlog = self.run_binary( + [ + pg_receivexlog_path, '-p', str(node.port), '--synchronous', + '-Z', '9', '-D', os.path.join(backup_dir, 'wal', 'node') + ], asynchronous=True) + + if pg_receivexlog.returncode: + self.assertFalse( + True, + 'Failed to start pg_receivexlog: {0}'.format( + pg_receivexlog.communicate()[1])) + + node.safe_psql( + "postgres", + "create table t_heap as select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,10000) i") + + self.backup_node(backup_dir, 'node', node) + + # PAGE + node.safe_psql( + "postgres", + "insert into t_heap select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(10000,20000) i") + + self.backup_node( + backup_dir, 'node', node, + backup_type='page' + ) + result = node.safe_psql("postgres", "SELECT * FROM t_heap") + self.validate_pb(backup_dir) + + # Check data correctness + node.cleanup() + self.restore_node(backup_dir, 'node', node) + node.slow_start() + + self.assertEqual( + result, node.safe_psql("postgres", "SELECT * FROM t_heap"), + 'data after restore not equal to original data') + + # Clean after yourself + pg_receivexlog.kill() + + # @unittest.expectedFailure + # @unittest.skip("skip") + def test_archive_catalog(self): + """ + ARCHIVE replica: + + t6 |----------------------- + t5 | |------- + | | + t4 | |-------------- + | | + t3 | |--B1--|/|--B2-|/|-B3--- + | | + t2 |--A1--------A2--- + t1 ---------Y1--Y2-- + + ARCHIVE master: + t1 -Z1--Z2--- + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + master = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'master'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'archive_timeout': '30s', + 'checkpoint_timeout': '30s'}) + + if self.get_version(master) < self.version_to_num('9.6.0'): + self.skipTest( + 'Skipped because backup from replica is not supported in PG 9.5') + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'master', master) + self.set_archiving(backup_dir, 'master', master) + + master.slow_start() + + # FULL + master.safe_psql( + "postgres", + "create table t_heap as select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,10000) i") + + self.backup_node(backup_dir, 'master', master) + + # PAGE + master.safe_psql( + "postgres", + "insert into t_heap select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(10000,20000) i") + + self.backup_node( + backup_dir, 'master', master, backup_type='page') + + replica = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica.cleanup() + self.restore_node(backup_dir, 'master', replica) + self.set_replica(master, replica) + + self.add_instance(backup_dir, 'replica', replica) + self.set_archiving(backup_dir, 'replica', replica, replica=True) + + copy_tree( + os.path.join(backup_dir, 'wal', 'master'), + os.path.join(backup_dir, 'wal', 'replica')) + + replica.slow_start(replica=True) + + # FULL backup replica + Y1 = self.backup_node( + backup_dir, 'replica', replica, + options=['--stream', '--archive-timeout=60s']) + + master.pgbench_init(scale=5) + + # PAGE backup replica + Y2 = self.backup_node( + backup_dir, 'replica', replica, + backup_type='page', options=['--stream', '--archive-timeout=60s']) + + # create timeline t2 + replica.promote() + + # FULL backup replica + A1 = self.backup_node( + backup_dir, 'replica', replica) + + replica.pgbench_init(scale=5) + + replica.safe_psql( + 'postgres', + "CREATE TABLE t1 (a text)") + + target_xid = None + with replica.connect("postgres") as con: + res = con.execute( + "INSERT INTO t1 VALUES ('inserted') RETURNING (xmin)") + con.commit() + target_xid = res[0][0] + + # DELTA backup replica + A2 = self.backup_node( + backup_dir, 'replica', replica, backup_type='delta') + + # create timeline t3 + replica.cleanup() + self.restore_node( + backup_dir, 'replica', replica, + options=[ + '--recovery-target-xid={0}'.format(target_xid), + '--recovery-target-timeline=2', + '--recovery-target-action=promote']) + + replica.slow_start() + + B1 = self.backup_node( + backup_dir, 'replica', replica) + + replica.pgbench_init(scale=2) + + B2 = self.backup_node( + backup_dir, 'replica', replica, backup_type='page') + + replica.pgbench_init(scale=2) + + target_xid = None + with replica.connect("postgres") as con: + res = con.execute( + "INSERT INTO t1 VALUES ('inserted') RETURNING (xmin)") + con.commit() + target_xid = res[0][0] + + B3 = self.backup_node( + backup_dir, 'replica', replica, backup_type='page') + + replica.pgbench_init(scale=2) + + # create timeline t4 + replica.cleanup() + self.restore_node( + backup_dir, 'replica', replica, + options=[ + '--recovery-target-xid={0}'.format(target_xid), + '--recovery-target-timeline=3', + '--recovery-target-action=promote']) + + replica.slow_start() + + replica.safe_psql( + 'postgres', + 'CREATE TABLE ' + 't2 as select i, ' + 'repeat(md5(i::text),5006056) as fat_attr ' + 'from generate_series(0,6) i') + + target_xid = None + with replica.connect("postgres") as con: + res = con.execute( + "INSERT INTO t1 VALUES ('inserted') RETURNING (xmin)") + con.commit() + target_xid = res[0][0] + + replica.safe_psql( + 'postgres', + 'CREATE TABLE ' + 't3 as select i, ' + 'repeat(md5(i::text),5006056) as fat_attr ' + 'from generate_series(0,10) i') + + # create timeline t5 + replica.cleanup() + self.restore_node( + backup_dir, 'replica', replica, + options=[ + '--recovery-target-xid={0}'.format(target_xid), + '--recovery-target-timeline=4', + '--recovery-target-action=promote']) + + replica.slow_start() + + replica.safe_psql( + 'postgres', + 'CREATE TABLE ' + 't4 as select i, ' + 'repeat(md5(i::text),5006056) as fat_attr ' + 'from generate_series(0,6) i') + + # create timeline t6 + replica.cleanup() + + self.restore_node( + backup_dir, 'replica', replica, backup_id=A1, + options=[ + '--recovery-target=immediate', + '--recovery-target-action=promote']) + replica.slow_start() + + replica.pgbench_init(scale=2) + + sleep(5) + + show = self.show_archive(backup_dir, as_text=True) + show = self.show_archive(backup_dir) + + for instance in show: + if instance['instance'] == 'replica': + replica_timelines = instance['timelines'] + + if instance['instance'] == 'master': + master_timelines = instance['timelines'] + + # check that all timelines are ok + for timeline in replica_timelines: + self.assertTrue(timeline['status'], 'OK') + + # check that all timelines are ok + for timeline in master_timelines: + self.assertTrue(timeline['status'], 'OK') + + # create holes in t3 + wals_dir = os.path.join(backup_dir, 'wal', 'replica') + wals = [ + f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join(wals_dir, f)) + and not f.endswith('.backup') and not f.endswith('.history') and f.startswith('00000003') + ] + wals.sort() + + # check that t3 is ok + self.show_archive(backup_dir) + + file = os.path.join(backup_dir, 'wal', 'replica', '000000030000000000000017') + if self.archive_compress: + file = file + '.gz' + os.remove(file) + + file = os.path.join(backup_dir, 'wal', 'replica', '000000030000000000000012') + if self.archive_compress: + file = file + '.gz' + os.remove(file) + + file = os.path.join(backup_dir, 'wal', 'replica', '000000030000000000000013') + if self.archive_compress: + file = file + '.gz' + os.remove(file) + + # check that t3 is not OK + show = self.show_archive(backup_dir) + + show = self.show_archive(backup_dir) + + for instance in show: + if instance['instance'] == 'replica': + replica_timelines = instance['timelines'] + + # sanity + for timeline in replica_timelines: + if timeline['tli'] == 1: + timeline_1 = timeline + continue + + if timeline['tli'] == 2: + timeline_2 = timeline + continue + + if timeline['tli'] == 3: + timeline_3 = timeline + continue + + if timeline['tli'] == 4: + timeline_4 = timeline + continue + + if timeline['tli'] == 5: + timeline_5 = timeline + continue + + if timeline['tli'] == 6: + timeline_6 = timeline + continue + + self.assertEqual(timeline_6['status'], "OK") + self.assertEqual(timeline_5['status'], "OK") + self.assertEqual(timeline_4['status'], "OK") + self.assertEqual(timeline_3['status'], "DEGRADED") + self.assertEqual(timeline_2['status'], "OK") + self.assertEqual(timeline_1['status'], "OK") + + self.assertEqual(len(timeline_3['lost-segments']), 2) + self.assertEqual( + timeline_3['lost-segments'][0]['begin-segno'], + '000000030000000000000012') + self.assertEqual( + timeline_3['lost-segments'][0]['end-segno'], + '000000030000000000000013') + self.assertEqual( + timeline_3['lost-segments'][1]['begin-segno'], + '000000030000000000000017') + self.assertEqual( + timeline_3['lost-segments'][1]['end-segno'], + '000000030000000000000017') + + self.assertEqual(len(timeline_6['backups']), 0) + self.assertEqual(len(timeline_5['backups']), 0) + self.assertEqual(len(timeline_4['backups']), 0) + self.assertEqual(len(timeline_3['backups']), 3) + self.assertEqual(len(timeline_2['backups']), 2) + self.assertEqual(len(timeline_1['backups']), 2) + + # check closest backup correctness + self.assertEqual(timeline_6['closest-backup-id'], A1) + self.assertEqual(timeline_5['closest-backup-id'], B2) + self.assertEqual(timeline_4['closest-backup-id'], B2) + self.assertEqual(timeline_3['closest-backup-id'], A1) + self.assertEqual(timeline_2['closest-backup-id'], Y2) + + # check parent tli correctness + self.assertEqual(timeline_6['parent-tli'], 2) + self.assertEqual(timeline_5['parent-tli'], 4) + self.assertEqual(timeline_4['parent-tli'], 3) + self.assertEqual(timeline_3['parent-tli'], 2) + self.assertEqual(timeline_2['parent-tli'], 1) + self.assertEqual(timeline_1['parent-tli'], 0) + + # @unittest.expectedFailure + # @unittest.skip("skip") + def test_archive_catalog_1(self): + """ + double segment - compressed and not + """ + if not self.archive_compress: + self.skipTest('You need to enable ARCHIVE_COMPRESSION ' + 'for this test to run') + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'archive_timeout': '30s', + 'checkpoint_timeout': '30s'}) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node, compress=True) + + node.slow_start() + + # FULL + self.backup_node(backup_dir, 'node', node) + node.pgbench_init(scale=2) + + wals_dir = os.path.join(backup_dir, 'wal', 'node') + original_file = os.path.join(wals_dir, '000000010000000000000001.gz') + tmp_file = os.path.join(wals_dir, '000000010000000000000001') + + with gzip.open(original_file, 'rb') as f_in, open(tmp_file, 'wb') as f_out: + shutil.copyfileobj(f_in, f_out) + + os.rename( + os.path.join(wals_dir, '000000010000000000000001'), + os.path.join(wals_dir, '000000010000000000000002')) + + show = self.show_archive(backup_dir) + + for instance in show: + timelines = instance['timelines'] + + # sanity + for timeline in timelines: + self.assertEqual( + timeline['min-segno'], + '000000010000000000000001') + self.assertEqual(timeline['status'], 'OK') + + # @unittest.expectedFailure + # @unittest.skip("skip") + def test_archive_catalog_2(self): + """ + double segment - compressed and not + """ + if not self.archive_compress: + self.skipTest('You need to enable ARCHIVE_COMPRESSION ' + 'for this test to run') + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'archive_timeout': '30s', + 'checkpoint_timeout': '30s'}) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node, compress=True) + + node.slow_start() + + # FULL + self.backup_node(backup_dir, 'node', node) + node.pgbench_init(scale=2) + + wals_dir = os.path.join(backup_dir, 'wal', 'node') + original_file = os.path.join(wals_dir, '000000010000000000000001.gz') + tmp_file = os.path.join(wals_dir, '000000010000000000000001') + + with gzip.open(original_file, 'rb') as f_in, open(tmp_file, 'wb') as f_out: + shutil.copyfileobj(f_in, f_out) + + os.rename( + os.path.join(wals_dir, '000000010000000000000001'), + os.path.join(wals_dir, '000000010000000000000002')) + + os.remove(original_file) + + show = self.show_archive(backup_dir) + + for instance in show: + timelines = instance['timelines'] + + # sanity + for timeline in timelines: + self.assertEqual( + timeline['min-segno'], + '000000010000000000000002') + self.assertEqual(timeline['status'], 'OK') + + # @unittest.expectedFailure + # @unittest.skip("skip") + def test_archive_options(self): + """ + check that '--archive-host', '--archive-user', '--archiver-port' + and '--restore-command' are working as expected. + """ + if not self.remote: + self.skipTest("You must enable PGPROBACKUP_SSH_REMOTE" + " for run this test") + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node, compress=True) + + node.slow_start() + + # FULL + self.backup_node(backup_dir, 'node', node) + node.pgbench_init(scale=1) + + node.cleanup() + + wal_dir = os.path.join(backup_dir, 'wal', 'node') + self.restore_node( + backup_dir, 'node', node, + options=[ + '--restore-command="cp {0}/%f %p"'.format(wal_dir), + '--archive-host=localhost', + '--archive-port=22', + '--archive-user={0}'.format(self.user) + ]) + + if self.get_version(node) >= self.version_to_num('12.0'): + recovery_conf = os.path.join(node.data_dir, 'postgresql.auto.conf') + else: + recovery_conf = os.path.join(node.data_dir, 'recovery.conf') + + with open(recovery_conf, 'r') as f: + recovery_content = f.read() + + self.assertIn( + 'restore_command = \'"cp {0}/%f %p"\''.format(wal_dir), + recovery_content) + + node.cleanup() + + self.restore_node( + backup_dir, 'node', node, + options=[ + '--archive-host=localhost', + '--archive-port=22', + '--archive-user={0}'.format(self.user)]) + + with open(recovery_conf, 'r') as f: + recovery_content = f.read() + + self.assertIn( + "restore_command = '\"{0}\" archive-get -B \"{1}\" --instance \"{2}\" " + "--wal-file-path=%p --wal-file-name=%f --remote-host=localhost " + "--remote-port=22 --remote-user={3}'".format( + self.probackup_path, backup_dir, 'node', self.user), + recovery_content) + + node.slow_start() + + node.safe_psql( + 'postgres', + 'select 1') + + # @unittest.expectedFailure + # @unittest.skip("skip") + def test_archive_options_1(self): + """ + check that '--archive-host', '--archive-user', '--archiver-port' + and '--restore-command' are working as expected with set-config + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node, compress=True) + + node.slow_start() + + # FULL + self.backup_node(backup_dir, 'node', node) + node.pgbench_init(scale=1) + + node.cleanup() + + wal_dir = os.path.join(backup_dir, 'wal', 'node') + self.set_config( + backup_dir, 'node', + options=[ + '--restore-command="cp {0}/%f %p"'.format(wal_dir), + '--archive-host=localhost', + '--archive-port=22', + '--archive-user={0}'.format(self.user)]) + self.restore_node(backup_dir, 'node', node) + + if self.get_version(node) >= self.version_to_num('12.0'): + recovery_conf = os.path.join(node.data_dir, 'postgresql.auto.conf') + else: + recovery_conf = os.path.join(node.data_dir, 'recovery.conf') + + with open(recovery_conf, 'r') as f: + recovery_content = f.read() + + self.assertIn( + 'restore_command = \'"cp {0}/%f %p"\''.format(wal_dir), + recovery_content) + + node.cleanup() + + self.restore_node( + backup_dir, 'node', node, + options=[ + '--restore-command=none', + '--archive-host=localhost1', + '--archive-port=23', + '--archive-user={0}'.format(self.user) + ]) + + with open(recovery_conf, 'r') as f: + recovery_content = f.read() + + self.assertIn( + "restore_command = '\"{0}\" archive-get -B \"{1}\" --instance \"{2}\" " + "--wal-file-path=%p --wal-file-name=%f --remote-host=localhost1 " + "--remote-port=23 --remote-user={3}'".format( + self.probackup_path, backup_dir, 'node', self.user), + recovery_content) + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_undefined_wal_file_path(self): + """ + check that archive-push works correct with undefined + --wal-file-path + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + if os.name == 'posix': + archive_command = '\"{0}\" archive-push -B \"{1}\" --instance \"{2}\" --wal-file-name=%f'.format( + self.probackup_path, backup_dir, 'node') + elif os.name == 'nt': + archive_command = '\"{0}\" archive-push -B \"{1}\" --instance \"{2}\" --wal-file-name=%f'.format( + self.probackup_path, backup_dir, 'node').replace("\\","\\\\") + else: + self.assertTrue(False, 'Unexpected os family') + + self.set_auto_conf( + node, + {'archive_command': archive_command}) + + node.slow_start() + node.safe_psql( + "postgres", + "create table t_heap as select i" + " as id from generate_series(0, 10) i") + self.switch_wal_segment(node) + + # check + self.assertEqual(self.show_archive(backup_dir, instance='node', tli=1)['min-segno'], '000000010000000000000001') + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_intermediate_archiving(self): + """ + check that archive-push works correct with --wal-file-path setting by user + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + node_pg_options = {} + if node.major_version >= 13: + node_pg_options['wal_keep_size'] = '0MB' + else: + node_pg_options['wal_keep_segments'] = '0' + self.set_auto_conf(node, node_pg_options) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + + wal_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'intermediate_dir') + shutil.rmtree(wal_dir, ignore_errors=True) + os.makedirs(wal_dir) + if os.name == 'posix': + self.set_archiving(backup_dir, 'node', node, custom_archive_command='cp -v %p {0}/%f'.format(wal_dir)) + elif os.name == 'nt': + self.set_archiving(backup_dir, 'node', node, custom_archive_command='copy /Y "%p" "{0}\\\\%f"'.format(wal_dir.replace("\\","\\\\"))) + else: + self.assertTrue(False, 'Unexpected os family') + + node.slow_start() + node.safe_psql( + "postgres", + "create table t_heap as select i" + " as id from generate_series(0, 10) i") + self.switch_wal_segment(node) + + wal_segment = '000000010000000000000001' + + self.run_pb(["archive-push", "-B", backup_dir, + "--instance=node", "-D", node.data_dir, + "--wal-file-path", "{0}/{1}".format(wal_dir, wal_segment), "--wal-file-name", wal_segment]) + + self.assertEqual(self.show_archive(backup_dir, instance='node', tli=1)['min-segno'], wal_segment) + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_waldir_outside_pgdata_archiving(self): + """ + check that archive-push works correct with symlinked waldir + """ + if self.pg_config_version < self.version_to_num('10.0'): + self.skipTest( + 'Skipped because waldir outside pgdata is supported since PG 10') + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + external_wal_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'ext_wal_dir') + shutil.rmtree(external_wal_dir, ignore_errors=True) + + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums', '--waldir={0}'.format(external_wal_dir)]) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + + node.slow_start() + node.safe_psql( + "postgres", + "create table t_heap as select i" + " as id from generate_series(0, 10) i") + self.switch_wal_segment(node) + + # check + self.assertEqual(self.show_archive(backup_dir, instance='node', tli=1)['min-segno'], '000000010000000000000001') + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_hexadecimal_timeline(self): + """ + Check that timelines are correct. + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node, log_level='verbose') + node.slow_start() + + backup_id = self.backup_node(backup_dir, 'node', node) + node.pgbench_init(scale=2) + + # create timelines + for i in range(1, 13): + # print(i) + node.cleanup() + self.restore_node( + backup_dir, 'node', node, + options=['--recovery-target-timeline={0}'.format(i)]) + node.slow_start() + node.pgbench_init(scale=2) + + sleep(5) + + show = self.show_archive(backup_dir) + + timelines = show[0]['timelines'] + + print(timelines[0]) + + tli13 = timelines[0] + + self.assertEqual( + 13, + tli13['tli']) + + self.assertEqual( + 12, + tli13['parent-tli']) + + self.assertEqual( + backup_id, + tli13['closest-backup-id']) + + self.assertEqual( + '0000000D000000000000001C', + tli13['max-segno']) + + @unittest.skip("skip") + # @unittest.expectedFailure + def test_archiving_and_slots(self): + """ + Check that archiving don`t break slot + guarantee. + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'checkpoint_timeout': '30s', + 'max_wal_size': '64MB'}) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node, log_level='verbose') + node.slow_start() + + if self.get_version(node) < 100000: + pg_receivexlog_path = self.get_bin_path('pg_receivexlog') + else: + pg_receivexlog_path = self.get_bin_path('pg_receivewal') + + # "pg_receivewal --create-slot --slot archive_slot --if-not-exists " + # "&& pg_receivewal --synchronous -Z 1 /tmp/wal --slot archive_slot --no-loop" + + self.run_binary( + [ + pg_receivexlog_path, '-p', str(node.port), '--synchronous', + '--create-slot', '--slot', 'archive_slot', '--if-not-exists' + ]) + + node.pgbench_init(scale=10) + + pg_receivexlog = self.run_binary( + [ + pg_receivexlog_path, '-p', str(node.port), '--synchronous', + '-D', os.path.join(backup_dir, 'wal', 'node'), + '--no-loop', '--slot', 'archive_slot', + '-Z', '1' + ], asynchronous=True) + + if pg_receivexlog.returncode: + self.assertFalse( + True, + 'Failed to start pg_receivexlog: {0}'.format( + pg_receivexlog.communicate()[1])) + + sleep(2) + + pg_receivexlog.kill() + + backup_id = self.backup_node(backup_dir, 'node', node) + node.pgbench_init(scale=20) + + exit(1) + + def test_archive_push_sanity(self): + """""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'archive_mode': 'on', + 'archive_command': 'exit 1'}) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + + node.slow_start() + + node.pgbench_init(scale=50) + node.stop() + + self.set_archiving(backup_dir, 'node', node) + os.remove(os.path.join(node.logs_dir, 'postgresql.log')) + node.slow_start() + + self.backup_node(backup_dir, 'node', node) + + with open(os.path.join(node.logs_dir, 'postgresql.log'), 'r') as f: + postgres_log_content = f.read() + + # print(postgres_log_content) + # make sure that .backup file is not compressed + self.assertNotIn('.backup.gz', postgres_log_content) + self.assertNotIn('WARNING', postgres_log_content) + + replica = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica.cleanup() + + self.restore_node( + backup_dir, 'node', replica, + data_dir=replica.data_dir, options=['-R']) + + # self.set_archiving(backup_dir, 'replica', replica, replica=True) + self.set_auto_conf(replica, {'port': replica.port}) + self.set_auto_conf(replica, {'archive_mode': 'always'}) + self.set_auto_conf(replica, {'hot_standby': 'on'}) + replica.slow_start(replica=True) + + self.wait_until_replica_catch_with_master(node, replica) + + node.pgbench_init(scale=5) + + replica.promote() + replica.pgbench_init(scale=10) + + with open(os.path.join(replica.logs_dir, 'postgresql.log'), 'r') as f: + replica_log_content = f.read() + + # make sure that .partial file is not compressed + self.assertNotIn('.partial.gz', replica_log_content) + # make sure that .history file is not compressed + self.assertNotIn('.history.gz', replica_log_content) + self.assertNotIn('WARNING', replica_log_content) + + output = self.show_archive( + backup_dir, 'node', as_json=False, as_text=True, + options=['--log-level-console=INFO']) + + self.assertNotIn('WARNING', output) + + # @unittest.expectedFailure + # @unittest.skip("skip") + def test_archive_pg_receivexlog_partial_handling(self): + """check that archive-get delivers .partial and .gz.partial files""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + if self.get_version(node) < self.version_to_num('9.6.0'): + self.skipTest( + 'Skipped because backup from replica is not supported in PG 9.5') + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + + node.slow_start() + + if self.get_version(node) < 100000: + app_name = 'pg_receivexlog' + pg_receivexlog_path = self.get_bin_path('pg_receivexlog') + else: + app_name = 'pg_receivewal' + pg_receivexlog_path = self.get_bin_path('pg_receivewal') + + cmdline = [ + pg_receivexlog_path, '-p', str(node.port), '--synchronous', + '-D', os.path.join(backup_dir, 'wal', 'node')] + + if self.archive_compress and node.major_version >= 10: + cmdline += ['-Z', '1'] + + env = self.test_env + env["PGAPPNAME"] = app_name + pg_receivexlog = self.run_binary(cmdline, asynchronous=True, env=env) + + if pg_receivexlog.returncode: + self.assertFalse( + True, + 'Failed to start pg_receivexlog: {0}'.format( + pg_receivexlog.communicate()[1])) + + self.set_auto_conf(node, {'synchronous_standby_names': app_name}) + self.set_auto_conf(node, {'synchronous_commit': 'on'}) + node.reload() + + # FULL + self.backup_node(backup_dir, 'node', node, options=['--stream']) + + node.safe_psql( + "postgres", + "create table t_heap as select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,1000000) i") + + # PAGE + self.backup_node( + backup_dir, 'node', node, backup_type='page', options=['--stream']) + + node.safe_psql( + "postgres", + "insert into t_heap select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(1000000,2000000) i") + + pg_receivexlog.kill() + + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored.cleanup() + + self.restore_node( + backup_dir, 'node', node_restored, node_restored.data_dir, + options=['--recovery-target=latest', '--recovery-target-action=promote']) + self.set_auto_conf(node_restored, {'port': node_restored.port}) + self.set_auto_conf(node_restored, {'hot_standby': 'off'}) + + node_restored.slow_start() + + result = node.safe_psql( + "postgres", + "select sum(id) from t_heap").decode('utf-8').rstrip() + + result_new = node_restored.safe_psql( + "postgres", + "select sum(id) from t_heap").decode('utf-8').rstrip() + + self.assertEqual(result, result_new) + + @unittest.skip("skip") + def test_multi_timeline_recovery_prefetching(self): + """""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + + node.slow_start() + + self.backup_node(backup_dir, 'node', node) + + node.pgbench_init(scale=50) + + target_xid = node.safe_psql( + 'postgres', + 'select txid_current()').rstrip() + + node.pgbench_init(scale=20) + + node.stop() + node.cleanup() + + self.restore_node( + backup_dir, 'node', node, + options=[ + '--recovery-target-xid={0}'.format(target_xid), + '--recovery-target-action=promote']) + + node.slow_start() + + node.pgbench_init(scale=20) + + target_xid = node.safe_psql( + 'postgres', + 'select txid_current()').rstrip() + + node.stop(['-m', 'immediate', '-D', node.data_dir]) + node.cleanup() + + self.restore_node( + backup_dir, 'node', node, + options=[ +# '--recovery-target-xid={0}'.format(target_xid), + '--recovery-target-timeline=2', +# '--recovery-target-action=promote', + '--no-validate']) + node.slow_start() + + node.pgbench_init(scale=20) + result = node.safe_psql( + 'postgres', + 'select * from pgbench_accounts') + node.stop() + node.cleanup() + + self.restore_node( + backup_dir, 'node', node, + options=[ +# '--recovery-target-xid=100500', + '--recovery-target-timeline=3', +# '--recovery-target-action=promote', + '--no-validate']) + os.remove(os.path.join(node.logs_dir, 'postgresql.log')) + + restore_command = self.get_restore_command(backup_dir, 'node', node) + restore_command += ' -j 2 --batch-size=10 --log-level-console=VERBOSE' + + if node.major_version >= 12: + node.append_conf( + 'postgresql.auto.conf', "restore_command = '{0}'".format(restore_command)) + else: + node.append_conf( + 'recovery.conf', "restore_command = '{0}'".format(restore_command)) + + node.slow_start() + + result_new = node.safe_psql( + 'postgres', + 'select * from pgbench_accounts') + + self.assertEqual(result, result_new) + + with open(os.path.join(node.logs_dir, 'postgresql.log'), 'r') as f: + postgres_log_content = f.read() + + # check that requesting of non-existing segment do not + # throwns aways prefetch + self.assertIn( + 'pg_probackup archive-get failed to ' + 'deliver WAL file: 000000030000000000000006', + postgres_log_content) + + self.assertIn( + 'pg_probackup archive-get failed to ' + 'deliver WAL file: 000000020000000000000006', + postgres_log_content) + + self.assertIn( + 'pg_probackup archive-get used prefetched ' + 'WAL segment 000000010000000000000006, prefetch state: 5/10', + postgres_log_content) + + def test_archive_get_batching_sanity(self): + """ + Make sure that batching works. + .gz file is corrupted and uncompressed is not, check that both + corruption detected and uncompressed file is used. + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + if self.get_version(node) < self.version_to_num('9.6.0'): + self.skipTest( + 'Skipped because backup from replica is not supported in PG 9.5') + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + + node.slow_start() + + self.backup_node(backup_dir, 'node', node, options=['--stream']) + + node.pgbench_init(scale=50) + + replica = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica.cleanup() + + self.restore_node( + backup_dir, 'node', replica, replica.data_dir) + self.set_replica(node, replica, log_shipping=True) + + if node.major_version >= 12: + self.set_auto_conf(replica, {'restore_command': 'exit 1'}) + else: + replica.append_conf('recovery.conf', "restore_command = 'exit 1'") + + replica.slow_start(replica=True) + + # at this point replica is consistent + restore_command = self.get_restore_command(backup_dir, 'node', replica) + + restore_command += ' -j 2 --batch-size=10' + + # print(restore_command) + + if node.major_version >= 12: + self.set_auto_conf(replica, {'restore_command': restore_command}) + else: + replica.append_conf( + 'recovery.conf', "restore_command = '{0}'".format(restore_command)) + + replica.restart() + + sleep(5) + + with open(os.path.join(replica.logs_dir, 'postgresql.log'), 'r') as f: + postgres_log_content = f.read() + + self.assertIn( + 'pg_probackup archive-get completed successfully, fetched: 10/10', + postgres_log_content) + self.assertIn('used prefetched WAL segment', postgres_log_content) + self.assertIn('prefetch state: 9/10', postgres_log_content) + self.assertIn('prefetch state: 8/10', postgres_log_content) + + def test_archive_get_prefetch_corruption(self): + """ + Make sure that WAL corruption is detected. + And --prefetch-dir is honored. + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + + node.slow_start() + + self.backup_node(backup_dir, 'node', node, options=['--stream']) + + node.pgbench_init(scale=50) + + replica = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica.cleanup() + + self.restore_node( + backup_dir, 'node', replica, replica.data_dir) + self.set_replica(node, replica, log_shipping=True) + + if node.major_version >= 12: + self.set_auto_conf(replica, {'restore_command': 'exit 1'}) + else: + replica.append_conf('recovery.conf', "restore_command = 'exit 1'") + + replica.slow_start(replica=True) + + # at this point replica is consistent + restore_command = self.get_restore_command(backup_dir, 'node', replica) + + restore_command += ' -j5 --batch-size=10 --log-level-console=VERBOSE' + #restore_command += ' --batch-size=2 --log-level-console=VERBOSE' + + if node.major_version >= 12: + self.set_auto_conf(replica, {'restore_command': restore_command}) + else: + replica.append_conf( + 'recovery.conf', "restore_command = '{0}'".format(restore_command)) + + replica.restart() + + sleep(5) + + with open(os.path.join(replica.logs_dir, 'postgresql.log'), 'r') as f: + postgres_log_content = f.read() + + self.assertIn( + 'pg_probackup archive-get completed successfully, fetched: 10/10', + postgres_log_content) + self.assertIn('used prefetched WAL segment', postgres_log_content) + self.assertIn('prefetch state: 9/10', postgres_log_content) + self.assertIn('prefetch state: 8/10', postgres_log_content) + + replica.stop() + + # generate WAL, copy it into prefetch directory, then corrupt + # some segment + node.pgbench_init(scale=20) + sleep(20) + + # now copy WAL files into prefetch directory and corrupt some of them + archive_dir = os.path.join(backup_dir, 'wal', 'node') + files = os.listdir(archive_dir) + files.sort() + + for filename in [files[-4], files[-3], files[-2], files[-1]]: + src_file = os.path.join(archive_dir, filename) + + if node.major_version >= 10: + wal_dir = 'pg_wal' + else: + wal_dir = 'pg_xlog' + + if filename.endswith('.gz'): + dst_file = os.path.join(replica.data_dir, wal_dir, 'pbk_prefetch', filename[:-3]) + with gzip.open(src_file, 'rb') as f_in, open(dst_file, 'wb') as f_out: + shutil.copyfileobj(f_in, f_out) + else: + dst_file = os.path.join(replica.data_dir, wal_dir, 'pbk_prefetch', filename) + shutil.copyfile(src_file, dst_file) + + # print(dst_file) + + # corrupt file + if files[-2].endswith('.gz'): + filename = files[-2][:-3] + else: + filename = files[-2] + + prefetched_file = os.path.join(replica.data_dir, wal_dir, 'pbk_prefetch', filename) + + with open(prefetched_file, "rb+", 0) as f: + f.seek(8192*2) + f.write(b"SURIKEN") + f.flush() + f.close + + # enable restore_command + restore_command = self.get_restore_command(backup_dir, 'node', replica) + restore_command += ' --batch-size=2 --log-level-console=VERBOSE' + + if node.major_version >= 12: + self.set_auto_conf(replica, {'restore_command': restore_command}) + else: + replica.append_conf( + 'recovery.conf', "restore_command = '{0}'".format(restore_command)) + + os.remove(os.path.join(replica.logs_dir, 'postgresql.log')) + replica.slow_start(replica=True) + + sleep(60) + + with open(os.path.join(replica.logs_dir, 'postgresql.log'), 'r') as f: + postgres_log_content = f.read() + + self.assertIn( + 'Prefetched WAL segment {0} is invalid, cannot use it'.format(filename), + postgres_log_content) + + self.assertIn( + 'LOG: restored log file "{0}" from archive'.format(filename), + postgres_log_content) + + # @unittest.skip("skip") + def test_archive_show_partial_files_handling(self): + """ + check that files with '.part', '.part.gz', '.partial' and '.partial.gz' + siffixes are handled correctly + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node, compress=False) + + node.slow_start() + + self.backup_node(backup_dir, 'node', node) + + wals_dir = os.path.join(backup_dir, 'wal', 'node') + + # .part file + node.safe_psql( + "postgres", + "create table t1()") + + if self.get_version(node) < 100000: + filename = node.safe_psql( + "postgres", + "SELECT file_name " + "FROM pg_xlogfile_name_offset(pg_current_xlog_location())").rstrip() + else: + filename = node.safe_psql( + "postgres", + "SELECT file_name " + "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn())").rstrip() + + filename = filename.decode('utf-8') + + self.switch_wal_segment(node) + + os.rename( + os.path.join(wals_dir, filename), + os.path.join(wals_dir, '{0}.part'.format(filename))) + + # .gz.part file + node.safe_psql( + "postgres", + "create table t2()") + + if self.get_version(node) < 100000: + filename = node.safe_psql( + "postgres", + "SELECT file_name " + "FROM pg_xlogfile_name_offset(pg_current_xlog_location())").rstrip() + else: + filename = node.safe_psql( + "postgres", + "SELECT file_name " + "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn())").rstrip() + + filename = filename.decode('utf-8') + + self.switch_wal_segment(node) + + os.rename( + os.path.join(wals_dir, filename), + os.path.join(wals_dir, '{0}.gz.part'.format(filename))) + + # .partial file + node.safe_psql( + "postgres", + "create table t3()") + + if self.get_version(node) < 100000: + filename = node.safe_psql( + "postgres", + "SELECT file_name " + "FROM pg_xlogfile_name_offset(pg_current_xlog_location())").rstrip() + else: + filename = node.safe_psql( + "postgres", + "SELECT file_name " + "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn())").rstrip() + + filename = filename.decode('utf-8') + + self.switch_wal_segment(node) + + os.rename( + os.path.join(wals_dir, filename), + os.path.join(wals_dir, '{0}.partial'.format(filename))) + + # .gz.partial file + node.safe_psql( + "postgres", + "create table t4()") + + if self.get_version(node) < 100000: + filename = node.safe_psql( + "postgres", + "SELECT file_name " + "FROM pg_xlogfile_name_offset(pg_current_xlog_location())").rstrip() + else: + filename = node.safe_psql( + "postgres", + "SELECT file_name " + "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn())").rstrip() + + filename = filename.decode('utf-8') + + self.switch_wal_segment(node) + + os.rename( + os.path.join(wals_dir, filename), + os.path.join(wals_dir, '{0}.gz.partial'.format(filename))) + + self.show_archive(backup_dir, 'node', options=['--log-level-file=VERBOSE']) + + with open(os.path.join(backup_dir, 'log', 'pg_probackup.log'), 'r') as f: + log_content = f.read() + + self.assertNotIn( + 'WARNING', + log_content) + + # @unittest.expectedFailure + # @unittest.skip("skip") + def test_archive_empty_history_file(self): + """ + https://github.com/postgrespro/pg_probackup/issues/326 + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + + node.slow_start() + node.pgbench_init(scale=5) + + # FULL + self.backup_node(backup_dir, 'node', node) + + node.pgbench_init(scale=5) + node.cleanup() + + self.restore_node( + backup_dir, 'node', node, + options=[ + '--recovery-target=latest', + '--recovery-target-action=promote']) + + # Node in timeline 2 + node.slow_start() + + node.pgbench_init(scale=5) + node.cleanup() + + self.restore_node( + backup_dir, 'node', node, + options=[ + '--recovery-target=latest', + '--recovery-target-timeline=2', + '--recovery-target-action=promote']) + + # Node in timeline 3 + node.slow_start() + + node.pgbench_init(scale=5) + node.cleanup() + + self.restore_node( + backup_dir, 'node', node, + options=[ + '--recovery-target=latest', + '--recovery-target-timeline=3', + '--recovery-target-action=promote']) + + # Node in timeline 4 + node.slow_start() + node.pgbench_init(scale=5) + + # Truncate history files + for tli in range(2, 5): + file = os.path.join( + backup_dir, 'wal', 'node', '0000000{0}.history'.format(tli)) + with open(file, "w+") as f: + f.truncate() + + timelines = self.show_archive(backup_dir, 'node', options=['--log-level-file=INFO']) + + # check that all timelines has zero switchpoint + for timeline in timelines: + self.assertEqual(timeline['switchpoint'], '0/0') + + log_file = os.path.join(backup_dir, 'log', 'pg_probackup.log') + with open(log_file, 'r') as f: + log_content = f.read() + wal_dir = os.path.join(backup_dir, 'wal', 'node') + + self.assertIn( + 'WARNING: History file is corrupted or missing: "{0}"'.format(os.path.join(wal_dir, '00000002.history')), + log_content) + self.assertIn( + 'WARNING: History file is corrupted or missing: "{0}"'.format(os.path.join(wal_dir, '00000003.history')), + log_content) + self.assertIn( + 'WARNING: History file is corrupted or missing: "{0}"'.format(os.path.join(wal_dir, '00000004.history')), + log_content) + +# TODO test with multiple not archived segments. +# TODO corrupted file in archive. + +# important - switchpoint may be NullOffset LSN and not actually existing in archive to boot. +# so write WAL validation code accordingly + +# change wal-seg-size +# +# +#t3 ---------------- +# / +#t2 ---------------- +# / +#t1 -A-------- +# +# + + +#t3 ---------------- +# / +#t2 ---------------- +# / +#t1 -A-------- +# diff --git a/tests/backup_test.py b/tests/backup_test.py new file mode 100644 index 000000000..db7ccf5a0 --- /dev/null +++ b/tests/backup_test.py @@ -0,0 +1,3564 @@ +import unittest +import os +from time import sleep, time +from .helpers.ptrack_helpers import base36enc, ProbackupTest, ProbackupException +import shutil +from distutils.dir_util import copy_tree +from testgres import ProcessType, QueryException +import subprocess + + +class BackupTest(ProbackupTest, unittest.TestCase): + + def test_full_backup(self): + """ + Just test full backup with at least two segments + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums'], + # we need to write a lot. Lets speedup a bit. + pg_options={"fsync": "off", "synchronous_commit": "off"}) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # Fill with data + # Have to use scale=100 to create second segment. + node.pgbench_init(scale=100, no_vacuum=True) + + # FULL + backup_id = self.backup_node(backup_dir, 'node', node) + + out = self.validate_pb(backup_dir, 'node', backup_id) + self.assertIn( + "INFO: Backup {0} is valid".format(backup_id), + out) + + def test_full_backup_stream(self): + """ + Just test full backup with at least two segments in stream mode + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums'], + # we need to write a lot. Lets speedup a bit. + pg_options={"fsync": "off", "synchronous_commit": "off"}) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + # Fill with data + # Have to use scale=100 to create second segment. + node.pgbench_init(scale=100, no_vacuum=True) + + # FULL + backup_id = self.backup_node(backup_dir, 'node', node, + options=["--stream"]) + + out = self.validate_pb(backup_dir, 'node', backup_id) + self.assertIn( + "INFO: Backup {0} is valid".format(backup_id), + out) + + # @unittest.skip("skip") + # @unittest.expectedFailure + # PGPRO-707 + def test_backup_modes_archive(self): + """standart backup modes with ARCHIVE WAL method""" + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + full_backup_id = self.backup_node(backup_dir, 'node', node) + show_backup = self.show_pb(backup_dir, 'node')[0] + + self.assertEqual(show_backup['status'], "OK") + self.assertEqual(show_backup['backup-mode'], "FULL") + + # postmaster.pid and postmaster.opts shouldn't be copied + excluded = True + db_dir = os.path.join( + backup_dir, "backups", 'node', full_backup_id, "database") + + for f in os.listdir(db_dir): + if ( + os.path.isfile(os.path.join(db_dir, f)) and + ( + f == "postmaster.pid" or + f == "postmaster.opts" + ) + ): + excluded = False + self.assertEqual(excluded, True) + + # page backup mode + page_backup_id = self.backup_node( + backup_dir, 'node', node, backup_type="page") + + show_backup_1 = self.show_pb(backup_dir, 'node')[1] + self.assertEqual(show_backup_1['status'], "OK") + self.assertEqual(show_backup_1['backup-mode'], "PAGE") + + # delta backup mode + delta_backup_id = self.backup_node( + backup_dir, 'node', node, backup_type="delta") + + show_backup_2 = self.show_pb(backup_dir, 'node')[2] + self.assertEqual(show_backup_2['status'], "OK") + self.assertEqual(show_backup_2['backup-mode'], "DELTA") + + # Check parent backup + self.assertEqual( + full_backup_id, + self.show_pb( + backup_dir, 'node', + backup_id=show_backup_1['id'])["parent-backup-id"]) + + self.assertEqual( + page_backup_id, + self.show_pb( + backup_dir, 'node', + backup_id=show_backup_2['id'])["parent-backup-id"]) + + # @unittest.skip("skip") + def test_smooth_checkpoint(self): + """full backup with smooth checkpoint""" + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + self.backup_node( + backup_dir, 'node', node, + options=["-C"]) + self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK") + node.stop() + + # @unittest.skip("skip") + def test_incremental_backup_without_full(self): + """page backup without validated full backup""" + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + try: + self.backup_node(backup_dir, 'node', node, backup_type="page") + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because page backup should not be possible " + "without valid full backup.\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertTrue( + "WARNING: Valid full backup on current timeline 1 is not found" in e.message and + "ERROR: Create new full backup before an incremental one" in e.message, + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) + + self.assertEqual( + self.show_pb(backup_dir, 'node')[0]['status'], + "ERROR") + + # @unittest.skip("skip") + def test_incremental_backup_corrupt_full(self): + """page-level backup with corrupted full backup""" + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + backup_id = self.backup_node(backup_dir, 'node', node) + file = os.path.join( + backup_dir, "backups", "node", backup_id, + "database", "postgresql.conf") + os.remove(file) + + try: + self.validate_pb(backup_dir, 'node') + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because of validation of corrupted backup.\n" + " Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertTrue( + "INFO: Validate backups of the instance 'node'" in e.message and + "WARNING: Backup file" in e.message and "is not found" in e.message and + "WARNING: Backup {0} data files are corrupted".format( + backup_id) in e.message and + "WARNING: Some backups are not valid" in e.message, + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) + + try: + self.backup_node(backup_dir, 'node', node, backup_type="page") + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because page backup should not be possible " + "without valid full backup.\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertTrue( + "WARNING: Valid full backup on current timeline 1 is not found" in e.message and + "ERROR: Create new full backup before an incremental one" in e.message, + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) + + self.assertEqual( + self.show_pb(backup_dir, 'node', backup_id)['status'], "CORRUPT") + self.assertEqual( + self.show_pb(backup_dir, 'node')[1]['status'], "ERROR") + + # @unittest.skip("skip") + def test_delta_threads_stream(self): + """delta multi thread backup mode and stream""" + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + self.backup_node( + backup_dir, 'node', node, backup_type="full", + options=["-j", "4", "--stream"]) + + self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK") + self.backup_node( + backup_dir, 'node', node, + backup_type="delta", options=["-j", "4", "--stream"]) + self.assertEqual(self.show_pb(backup_dir, 'node')[1]['status'], "OK") + + # @unittest.skip("skip") + def test_page_detect_corruption(self): + """make node, corrupt some page, check that backup failed""" + + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + ptrack_enable=self.ptrack, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + self.backup_node( + backup_dir, 'node', node, + backup_type="full", options=["-j", "4", "--stream"]) + + node.safe_psql( + "postgres", + "create table t_heap as select 1 as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,1000) i") + + node.safe_psql( + "postgres", + "CHECKPOINT") + + heap_path = node.safe_psql( + "postgres", + "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip() + + path = os.path.join(node.data_dir, heap_path) + with open(path, "rb+", 0) as f: + f.seek(9000) + f.write(b"bla") + f.flush() + f.close + + try: + self.backup_node( + backup_dir, 'node', node, backup_type="full", + options=["-j", "4", "--stream", "--log-level-file=VERBOSE"]) + self.assertEqual( + 1, 0, + "Expecting Error because data file is corrupted" + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertTrue( + 'ERROR: Corruption detected in file "{0}", ' + 'block 1: page verification failed, calculated checksum'.format(path), + e.message) + + self.assertEqual( + self.show_pb(backup_dir, 'node')[1]['status'], + 'ERROR', + "Backup Status should be ERROR") + + # @unittest.skip("skip") + def test_backup_detect_corruption(self): + """make node, corrupt some page, check that backup failed""" + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + ptrack_enable=self.ptrack, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + if self.ptrack: + node.safe_psql( + "postgres", + "create extension ptrack") + + self.backup_node( + backup_dir, 'node', node, + backup_type="full", options=["-j", "4", "--stream"]) + + node.safe_psql( + "postgres", + "create table t_heap as select 1 as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,10000) i") + + heap_path = node.safe_psql( + "postgres", + "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip() + + self.backup_node( + backup_dir, 'node', node, + backup_type="full", options=["-j", "4", "--stream"]) + + node.safe_psql( + "postgres", + "select count(*) from t_heap") + + node.safe_psql( + "postgres", + "update t_heap set id = id + 10000") + + node.stop() + + heap_fullpath = os.path.join(node.data_dir, heap_path) + + with open(heap_fullpath, "rb+", 0) as f: + f.seek(9000) + f.write(b"bla") + f.flush() + f.close + + node.slow_start() + + try: + self.backup_node( + backup_dir, 'node', node, + backup_type="full", options=["-j", "4", "--stream"]) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because of block corruption" + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: Corruption detected in file "{0}", block 1: ' + 'page verification failed, calculated checksum'.format( + heap_fullpath), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + sleep(1) + + try: + self.backup_node( + backup_dir, 'node', node, + backup_type="delta", options=["-j", "4", "--stream"]) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because of block corruption" + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: Corruption detected in file "{0}", block 1: ' + 'page verification failed, calculated checksum'.format( + heap_fullpath), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + sleep(1) + + try: + self.backup_node( + backup_dir, 'node', node, + backup_type="page", options=["-j", "4", "--stream"]) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because of block corruption" + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: Corruption detected in file "{0}", block 1: ' + 'page verification failed, calculated checksum'.format( + heap_fullpath), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + sleep(1) + + if self.ptrack: + try: + self.backup_node( + backup_dir, 'node', node, + backup_type="ptrack", options=["-j", "4", "--stream"]) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because of block corruption" + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: Corruption detected in file "{0}", block 1: ' + 'page verification failed, calculated checksum'.format( + heap_fullpath), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + # @unittest.skip("skip") + def test_backup_detect_invalid_block_header(self): + """make node, corrupt some page, check that backup failed""" + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + ptrack_enable=self.ptrack, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + if self.ptrack: + node.safe_psql( + "postgres", + "create extension ptrack") + + node.safe_psql( + "postgres", + "create table t_heap as select 1 as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,10000) i") + + heap_path = node.safe_psql( + "postgres", + "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip() + + self.backup_node( + backup_dir, 'node', node, + backup_type="full", options=["-j", "4", "--stream"]) + + node.safe_psql( + "postgres", + "select count(*) from t_heap") + + node.safe_psql( + "postgres", + "update t_heap set id = id + 10000") + + node.stop() + + heap_fullpath = os.path.join(node.data_dir, heap_path) + with open(heap_fullpath, "rb+", 0) as f: + f.seek(8193) + f.write(b"blahblahblahblah") + f.flush() + f.close + + node.slow_start() + +# self.backup_node( +# backup_dir, 'node', node, +# backup_type="full", options=["-j", "4", "--stream"]) + + try: + self.backup_node( + backup_dir, 'node', node, + backup_type="full", options=["-j", "4", "--stream"]) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because of block corruption" + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: Corruption detected in file "{0}", block 1: ' + 'page header invalid, pd_lower'.format(heap_fullpath), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + sleep(1) + + try: + self.backup_node( + backup_dir, 'node', node, + backup_type="delta", options=["-j", "4", "--stream"]) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because of block corruption" + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: Corruption detected in file "{0}", block 1: ' + 'page header invalid, pd_lower'.format(heap_fullpath), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + sleep(1) + + try: + self.backup_node( + backup_dir, 'node', node, + backup_type="page", options=["-j", "4", "--stream"]) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because of block corruption" + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: Corruption detected in file "{0}", block 1: ' + 'page header invalid, pd_lower'.format(heap_fullpath), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + sleep(1) + + if self.ptrack: + try: + self.backup_node( + backup_dir, 'node', node, + backup_type="ptrack", options=["-j", "4", "--stream"]) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because of block corruption" + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: Corruption detected in file "{0}", block 1: ' + 'page header invalid, pd_lower'.format(heap_fullpath), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + # @unittest.skip("skip") + def test_backup_detect_missing_permissions(self): + """make node, corrupt some page, check that backup failed""" + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + ptrack_enable=self.ptrack, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + if self.ptrack: + node.safe_psql( + "postgres", + "create extension ptrack") + + node.safe_psql( + "postgres", + "create table t_heap as select 1 as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,10000) i") + + heap_path = node.safe_psql( + "postgres", + "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip() + + self.backup_node( + backup_dir, 'node', node, + backup_type="full", options=["-j", "4", "--stream"]) + + node.safe_psql( + "postgres", + "select count(*) from t_heap") + + node.safe_psql( + "postgres", + "update t_heap set id = id + 10000") + + node.stop() + + heap_fullpath = os.path.join(node.data_dir, heap_path) + with open(heap_fullpath, "rb+", 0) as f: + f.seek(8193) + f.write(b"blahblahblahblah") + f.flush() + f.close + + node.slow_start() + +# self.backup_node( +# backup_dir, 'node', node, +# backup_type="full", options=["-j", "4", "--stream"]) + + try: + self.backup_node( + backup_dir, 'node', node, + backup_type="full", options=["-j", "4", "--stream"]) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because of block corruption" + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: Corruption detected in file "{0}", block 1: ' + 'page header invalid, pd_lower'.format(heap_fullpath), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + sleep(1) + + try: + self.backup_node( + backup_dir, 'node', node, + backup_type="delta", options=["-j", "4", "--stream"]) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because of block corruption" + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: Corruption detected in file "{0}", block 1: ' + 'page header invalid, pd_lower'.format(heap_fullpath), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + sleep(1) + + try: + self.backup_node( + backup_dir, 'node', node, + backup_type="page", options=["-j", "4", "--stream"]) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because of block corruption" + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: Corruption detected in file "{0}", block 1: ' + 'page header invalid, pd_lower'.format(heap_fullpath), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + sleep(1) + + if self.ptrack: + try: + self.backup_node( + backup_dir, 'node', node, + backup_type="ptrack", options=["-j", "4", "--stream"]) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because of block corruption" + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: Corruption detected in file "{0}", block 1: ' + 'page header invalid, pd_lower'.format(heap_fullpath), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + # @unittest.skip("skip") + def test_backup_truncate_misaligned(self): + """ + make node, truncate file to size not even to BLCKSIZE, + take backup + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + "postgres", + "create table t_heap as select 1 as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,100000) i") + + node.safe_psql( + "postgres", + "CHECKPOINT;") + + heap_path = node.safe_psql( + "postgres", + "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip() + + heap_size = node.safe_psql( + "postgres", + "select pg_relation_size('t_heap')") + + with open(os.path.join(node.data_dir, heap_path), "rb+", 0) as f: + f.truncate(int(heap_size) - 4096) + f.flush() + f.close + + output = self.backup_node( + backup_dir, 'node', node, backup_type="full", + options=["-j", "4", "--stream"], return_id=False) + + self.assertIn("WARNING: File", output) + self.assertIn("invalid file size", output) + + # @unittest.skip("skip") + def test_tablespace_in_pgdata_pgpro_1376(self): + """PGPRO-1376 """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + self.create_tblspace_in_node( + node, 'tblspace1', + tblspc_path=( + os.path.join( + node.data_dir, 'somedirectory', '100500')) + ) + + self.create_tblspace_in_node( + node, 'tblspace2', + tblspc_path=(os.path.join(node.data_dir)) + ) + + node.safe_psql( + "postgres", + "create table t_heap1 tablespace tblspace1 as select 1 as id, " + "md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,1000) i") + + node.safe_psql( + "postgres", + "create table t_heap2 tablespace tblspace2 as select 1 as id, " + "md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,1000) i") + + backup_id_1 = self.backup_node( + backup_dir, 'node', node, backup_type="full", + options=["-j", "4", "--stream"]) + + node.safe_psql( + "postgres", + "drop table t_heap2") + node.safe_psql( + "postgres", + "drop tablespace tblspace2") + + self.backup_node( + backup_dir, 'node', node, backup_type="full", + options=["-j", "4", "--stream"]) + + pgdata = self.pgdata_content(node.data_dir) + + relfilenode = node.safe_psql( + "postgres", + "select 't_heap1'::regclass::oid" + ).decode('utf-8').rstrip() + + list = [] + for root, dirs, files in os.walk(os.path.join( + backup_dir, 'backups', 'node', backup_id_1)): + for file in files: + if file == relfilenode: + path = os.path.join(root, file) + list = list + [path] + + # We expect that relfilenode can be encountered only once + if len(list) > 1: + message = "" + for string in list: + message = message + string + "\n" + self.assertEqual( + 1, 0, + "Following file copied twice by backup:\n {0}".format( + message) + ) + + node.cleanup() + + self.restore_node( + backup_dir, 'node', node, options=["-j", "4"]) + + if self.paranoia: + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + def test_basic_tablespace_handling(self): + """ + make node, take full backup, check that restore with + tablespace mapping will end with error, take page backup, + check that restore with tablespace mapping will end with + success + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + backup_id = self.backup_node( + backup_dir, 'node', node, backup_type="full", + options=["-j", "4", "--stream"]) + + tblspace1_old_path = self.get_tblspace_path(node, 'tblspace1_old') + tblspace2_old_path = self.get_tblspace_path(node, 'tblspace2_old') + + self.create_tblspace_in_node( + node, 'some_lame_tablespace') + + self.create_tblspace_in_node( + node, 'tblspace1', + tblspc_path=tblspace1_old_path) + + self.create_tblspace_in_node( + node, 'tblspace2', + tblspc_path=tblspace2_old_path) + + node.safe_psql( + "postgres", + "create table t_heap_lame tablespace some_lame_tablespace " + "as select 1 as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,1000) i") + + node.safe_psql( + "postgres", + "create table t_heap2 tablespace tblspace2 as select 1 as id, " + "md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,1000) i") + + tblspace1_new_path = self.get_tblspace_path(node, 'tblspace1_new') + tblspace2_new_path = self.get_tblspace_path(node, 'tblspace2_new') + + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored.cleanup() + + try: + self.restore_node( + backup_dir, 'node', node_restored, + options=[ + "-j", "4", + "-T", "{0}={1}".format( + tblspace1_old_path, tblspace1_new_path), + "-T", "{0}={1}".format( + tblspace2_old_path, tblspace2_new_path)]) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because tablespace mapping is incorrect" + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: Backup {0} has no tablespaceses, ' + 'nothing to remap'.format(backup_id), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + node.safe_psql( + "postgres", + "drop table t_heap_lame") + + node.safe_psql( + "postgres", + "drop tablespace some_lame_tablespace") + + self.backup_node( + backup_dir, 'node', node, backup_type="delta", + options=["-j", "4", "--stream"]) + + self.restore_node( + backup_dir, 'node', node_restored, + options=[ + "-j", "4", + "-T", "{0}={1}".format( + tblspace1_old_path, tblspace1_new_path), + "-T", "{0}={1}".format( + tblspace2_old_path, tblspace2_new_path)]) + + if self.paranoia: + pgdata = self.pgdata_content(node.data_dir) + + if self.paranoia: + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + def test_tablespace_handling_1(self): + """ + make node with tablespace A, take full backup, check that restore with + tablespace mapping of tablespace B will end with error + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + tblspace1_old_path = self.get_tblspace_path(node, 'tblspace1_old') + tblspace2_old_path = self.get_tblspace_path(node, 'tblspace2_old') + + tblspace_new_path = self.get_tblspace_path(node, 'tblspace_new') + + self.create_tblspace_in_node( + node, 'tblspace1', + tblspc_path=tblspace1_old_path) + + self.backup_node( + backup_dir, 'node', node, backup_type="full", + options=["-j", "4", "--stream"]) + + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored.cleanup() + + try: + self.restore_node( + backup_dir, 'node', node_restored, + options=[ + "-j", "4", + "-T", "{0}={1}".format( + tblspace2_old_path, tblspace_new_path)]) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because tablespace mapping is incorrect" + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertTrue( + 'ERROR: --tablespace-mapping option' in e.message and + 'have an entry in tablespace_map file' in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + # @unittest.skip("skip") + def test_tablespace_handling_2(self): + """ + make node without tablespaces, take full backup, check that restore with + tablespace mapping will end with error + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + tblspace1_old_path = self.get_tblspace_path(node, 'tblspace1_old') + tblspace_new_path = self.get_tblspace_path(node, 'tblspace_new') + + backup_id = self.backup_node( + backup_dir, 'node', node, backup_type="full", + options=["-j", "4", "--stream"]) + + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored.cleanup() + + try: + self.restore_node( + backup_dir, 'node', node_restored, + options=[ + "-j", "4", + "-T", "{0}={1}".format( + tblspace1_old_path, tblspace_new_path)]) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because tablespace mapping is incorrect" + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: Backup {0} has no tablespaceses, ' + 'nothing to remap'.format(backup_id), e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + # @unittest.skip("skip") + def test_drop_rel_during_full_backup(self): + """""" + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + for i in range(1, 512): + node.safe_psql( + "postgres", + "create table t_heap_{0} as select i" + " as id from generate_series(0,100) i".format(i)) + + node.safe_psql( + "postgres", + "VACUUM") + + node.pgbench_init(scale=10) + + relative_path_1 = node.safe_psql( + "postgres", + "select pg_relation_filepath('t_heap_1')").decode('utf-8').rstrip() + + relative_path_2 = node.safe_psql( + "postgres", + "select pg_relation_filepath('t_heap_1')").decode('utf-8').rstrip() + + absolute_path_1 = os.path.join(node.data_dir, relative_path_1) + absolute_path_2 = os.path.join(node.data_dir, relative_path_2) + + # FULL backup + gdb = self.backup_node( + backup_dir, 'node', node, + options=['--stream', '--log-level-file=LOG', '--log-level-console=LOG', '--progress'], + gdb=True) + + gdb.set_breakpoint('backup_files') + gdb.run_until_break() + + # REMOVE file + for i in range(1, 512): + node.safe_psql( + "postgres", + "drop table t_heap_{0}".format(i)) + + node.safe_psql( + "postgres", + "CHECKPOINT") + + node.safe_psql( + "postgres", + "CHECKPOINT") + + # File removed, we can proceed with backup + gdb.continue_execution_until_exit() + + pgdata = self.pgdata_content(node.data_dir) + + #with open(os.path.join(backup_dir, 'log', 'pg_probackup.log')) as f: + # log_content = f.read() + # self.assertTrue( + # 'LOG: File "{0}" is not found'.format(absolute_path) in log_content, + # 'File "{0}" should be deleted but it`s not'.format(absolute_path)) + + node.cleanup() + self.restore_node(backup_dir, 'node', node) + + # Physical comparison + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + @unittest.skip("skip") + def test_drop_db_during_full_backup(self): + """""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + for i in range(1, 2): + node.safe_psql( + "postgres", + "create database t_heap_{0}".format(i)) + + node.safe_psql( + "postgres", + "VACUUM") + + # FULL backup + gdb = self.backup_node( + backup_dir, 'node', node, gdb=True, + options=[ + '--stream', '--log-level-file=LOG', + '--log-level-console=LOG', '--progress']) + + gdb.set_breakpoint('backup_files') + gdb.run_until_break() + + # REMOVE file + for i in range(1, 2): + node.safe_psql( + "postgres", + "drop database t_heap_{0}".format(i)) + + node.safe_psql( + "postgres", + "CHECKPOINT") + + node.safe_psql( + "postgres", + "CHECKPOINT") + + # File removed, we can proceed with backup + gdb.continue_execution_until_exit() + + pgdata = self.pgdata_content(node.data_dir) + + #with open(os.path.join(backup_dir, 'log', 'pg_probackup.log')) as f: + # log_content = f.read() + # self.assertTrue( + # 'LOG: File "{0}" is not found'.format(absolute_path) in log_content, + # 'File "{0}" should be deleted but it`s not'.format(absolute_path)) + + node.cleanup() + self.restore_node(backup_dir, 'node', node) + + # Physical comparison + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + def test_drop_rel_during_backup_delta(self): + """""" + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.pgbench_init(scale=10) + + node.safe_psql( + "postgres", + "create table t_heap as select i" + " as id from generate_series(0,100) i") + + relative_path = node.safe_psql( + "postgres", + "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip() + + absolute_path = os.path.join(node.data_dir, relative_path) + + # FULL backup + self.backup_node(backup_dir, 'node', node, options=['--stream']) + + # DELTA backup + gdb = self.backup_node( + backup_dir, 'node', node, backup_type='delta', + gdb=True, options=['--log-level-file=LOG']) + + gdb.set_breakpoint('backup_files') + gdb.run_until_break() + + # REMOVE file + node.safe_psql( + "postgres", + "DROP TABLE t_heap") + + node.safe_psql( + "postgres", + "CHECKPOINT") + + # File removed, we can proceed with backup + gdb.continue_execution_until_exit() + + pgdata = self.pgdata_content(node.data_dir) + + with open(os.path.join(backup_dir, 'log', 'pg_probackup.log')) as f: + log_content = f.read() + self.assertTrue( + 'LOG: File not found: "{0}"'.format(absolute_path) in log_content, + 'File "{0}" should be deleted but it`s not'.format(absolute_path)) + + node.cleanup() + self.restore_node(backup_dir, 'node', node, options=["-j", "4"]) + + # Physical comparison + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + def test_drop_rel_during_backup_page(self): + """""" + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + "postgres", + "create table t_heap as select i" + " as id from generate_series(0,100) i") + + relative_path = node.safe_psql( + "postgres", + "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip() + + absolute_path = os.path.join(node.data_dir, relative_path) + + # FULL backup + self.backup_node(backup_dir, 'node', node, options=['--stream']) + + node.safe_psql( + "postgres", + "insert into t_heap select i" + " as id from generate_series(101,102) i") + + # PAGE backup + gdb = self.backup_node( + backup_dir, 'node', node, backup_type='page', + gdb=True, options=['--log-level-file=LOG']) + + gdb.set_breakpoint('backup_files') + gdb.run_until_break() + + # REMOVE file + os.remove(absolute_path) + + # File removed, we can proceed with backup + gdb.continue_execution_until_exit() + gdb.kill() + + pgdata = self.pgdata_content(node.data_dir) + + backup_id = self.show_pb(backup_dir, 'node')[1]['id'] + + filelist = self.get_backup_filelist(backup_dir, 'node', backup_id) + self.assertNotIn(relative_path, filelist) + + node.cleanup() + self.restore_node(backup_dir, 'node', node, options=["-j", "4"]) + + # Physical comparison + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + def test_persistent_slot_for_stream_backup(self): + """""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'max_wal_size': '40MB'}) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + "postgres", + "SELECT pg_create_physical_replication_slot('slot_1')") + + # FULL backup + self.backup_node( + backup_dir, 'node', node, + options=['--stream', '--slot=slot_1']) + + # FULL backup + self.backup_node( + backup_dir, 'node', node, + options=['--stream', '--slot=slot_1']) + + # @unittest.skip("skip") + def test_basic_temp_slot_for_stream_backup(self): + """""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={'max_wal_size': '40MB'}) + + if self.get_version(node) < self.version_to_num('10.0'): + self.skipTest('You need PostgreSQL >= 10 for this test') + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # FULL backup + self.backup_node( + backup_dir, 'node', node, + options=['--stream', '--temp-slot']) + + # FULL backup + self.backup_node( + backup_dir, 'node', node, + options=['--stream', '--slot=slot_1', '--temp-slot']) + + # @unittest.skip("skip") + def test_backup_concurrent_drop_table(self): + """""" + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.pgbench_init(scale=1) + + # FULL backup + gdb = self.backup_node( + backup_dir, 'node', node, + options=['--stream', '--compress'], + gdb=True) + + gdb.set_breakpoint('backup_data_file') + gdb.run_until_break() + + node.safe_psql( + 'postgres', + 'DROP TABLE pgbench_accounts') + + # do checkpoint to guarantee filenode removal + node.safe_psql( + 'postgres', + 'CHECKPOINT') + + gdb.remove_all_breakpoints() + gdb.continue_execution_until_exit() + gdb.kill() + + show_backup = self.show_pb(backup_dir, 'node')[0] + + self.assertEqual(show_backup['status'], "OK") + + # @unittest.skip("skip") + def test_pg_11_adjusted_wal_segment_size(self): + """""" + if self.pg_config_version < self.version_to_num('11.0'): + self.skipTest('You need PostgreSQL >= 11 for this test') + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=[ + '--data-checksums', + '--wal-segsize=64'], + pg_options={ + 'min_wal_size': '128MB'}) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.pgbench_init(scale=5) + + # FULL STREAM backup + self.backup_node( + backup_dir, 'node', node, options=['--stream']) + + pgbench = node.pgbench(options=['-T', '5', '-c', '2']) + pgbench.wait() + + # PAGE STREAM backup + self.backup_node( + backup_dir, 'node', node, + backup_type='page', options=['--stream']) + + pgbench = node.pgbench(options=['-T', '5', '-c', '2']) + pgbench.wait() + + # DELTA STREAM backup + self.backup_node( + backup_dir, 'node', node, + backup_type='delta', options=['--stream']) + + pgbench = node.pgbench(options=['-T', '5', '-c', '2']) + pgbench.wait() + + # FULL ARCHIVE backup + self.backup_node(backup_dir, 'node', node) + + pgbench = node.pgbench(options=['-T', '5', '-c', '2']) + pgbench.wait() + + # PAGE ARCHIVE backup + self.backup_node(backup_dir, 'node', node, backup_type='page') + + pgbench = node.pgbench(options=['-T', '5', '-c', '2']) + pgbench.wait() + + # DELTA ARCHIVE backup + backup_id = self.backup_node(backup_dir, 'node', node, backup_type='delta') + pgdata = self.pgdata_content(node.data_dir) + + # delete + output = self.delete_pb( + backup_dir, 'node', + options=[ + '--expired', + '--delete-wal', + '--retention-redundancy=1']) + + # validate + self.validate_pb(backup_dir) + + # merge + self.merge_backup(backup_dir, 'node', backup_id=backup_id) + + # restore + node.cleanup() + self.restore_node( + backup_dir, 'node', node, backup_id=backup_id) + + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + def test_sigint_handling(self): + """""" + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + # FULL backup + gdb = self.backup_node( + backup_dir, 'node', node, gdb=True, + options=['--stream', '--log-level-file=LOG']) + + gdb.set_breakpoint('backup_non_data_file') + gdb.run_until_break() + + gdb.continue_execution_until_break(20) + gdb.remove_all_breakpoints() + + gdb._execute('signal SIGINT') + gdb.continue_execution_until_error() + gdb.kill() + + backup_id = self.show_pb(backup_dir, 'node')[0]['id'] + + self.assertEqual( + 'ERROR', + self.show_pb(backup_dir, 'node', backup_id)['status'], + 'Backup STATUS should be "ERROR"') + + # @unittest.skip("skip") + def test_sigterm_handling(self): + """""" + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + # FULL backup + gdb = self.backup_node( + backup_dir, 'node', node, gdb=True, + options=['--stream', '--log-level-file=LOG']) + + gdb.set_breakpoint('backup_non_data_file') + gdb.run_until_break() + + gdb.continue_execution_until_break(20) + gdb.remove_all_breakpoints() + + gdb._execute('signal SIGTERM') + gdb.continue_execution_until_error() + + backup_id = self.show_pb(backup_dir, 'node')[0]['id'] + + self.assertEqual( + 'ERROR', + self.show_pb(backup_dir, 'node', backup_id)['status'], + 'Backup STATUS should be "ERROR"') + + # @unittest.skip("skip") + def test_sigquit_handling(self): + """""" + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + # FULL backup + gdb = self.backup_node( + backup_dir, 'node', node, gdb=True, options=['--stream']) + + gdb.set_breakpoint('backup_non_data_file') + gdb.run_until_break() + + gdb.continue_execution_until_break(20) + gdb.remove_all_breakpoints() + + gdb._execute('signal SIGQUIT') + gdb.continue_execution_until_error() + + backup_id = self.show_pb(backup_dir, 'node')[0]['id'] + + self.assertEqual( + 'ERROR', + self.show_pb(backup_dir, 'node', backup_id)['status'], + 'Backup STATUS should be "ERROR"') + + # @unittest.skip("skip") + def test_drop_table(self): + """""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + connect_1 = node.connect("postgres") + connect_1.execute( + "create table t_heap as select i" + " as id from generate_series(0,100) i") + connect_1.commit() + + connect_2 = node.connect("postgres") + connect_2.execute("SELECT * FROM t_heap") + connect_2.commit() + + # DROP table + connect_2.execute("DROP TABLE t_heap") + connect_2.commit() + + # FULL backup + self.backup_node( + backup_dir, 'node', node, options=['--stream']) + + # @unittest.skip("skip") + def test_basic_missing_file_permissions(self): + """""" + if os.name == 'nt': + self.skipTest('Skipped because it is POSIX only test') + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + relative_path = node.safe_psql( + "postgres", + "select pg_relation_filepath('pg_class')").decode('utf-8').rstrip() + + full_path = os.path.join(node.data_dir, relative_path) + + os.chmod(full_path, 000) + + try: + # FULL backup + self.backup_node( + backup_dir, 'node', node, options=['--stream']) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because of missing permissions" + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: Cannot open file', + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + os.chmod(full_path, 700) + + # @unittest.skip("skip") + def test_basic_missing_dir_permissions(self): + """""" + if os.name == 'nt': + self.skipTest('Skipped because it is POSIX only test') + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + full_path = os.path.join(node.data_dir, 'pg_twophase') + + os.chmod(full_path, 000) + + try: + # FULL backup + self.backup_node( + backup_dir, 'node', node, options=['--stream']) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because of missing permissions" + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: Cannot open directory', + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + os.rmdir(full_path) + + # @unittest.skip("skip") + def test_backup_with_least_privileges_role(self): + """""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + ptrack_enable=self.ptrack, + initdb_params=['--data-checksums'], + pg_options={'archive_timeout': '30s'}) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + 'postgres', + 'CREATE DATABASE backupdb') + + if self.ptrack: + node.safe_psql( + "backupdb", + "CREATE SCHEMA ptrack; " + "CREATE EXTENSION ptrack WITH SCHEMA ptrack") + + # PG 9.5 + if self.get_version(node) < 90600: + node.safe_psql( + 'backupdb', + "REVOKE ALL ON DATABASE backupdb from PUBLIC; " + "REVOKE ALL ON SCHEMA public from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " + "CREATE ROLE backup WITH LOGIN REPLICATION; " + "GRANT CONNECT ON DATABASE backupdb to backup; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") + # PG 9.6 + elif self.get_version(node) > 90600 and self.get_version(node) < 100000: + node.safe_psql( + 'backupdb', + "REVOKE ALL ON DATABASE backupdb from PUBLIC; " + "REVOKE ALL ON SCHEMA public from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " + "CREATE ROLE backup WITH LOGIN REPLICATION; " + "GRANT CONNECT ON DATABASE backupdb to backup; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + ) + # >= 10 && < 15 + elif self.get_version(node) >= 100000 and self.get_version(node) < 150000: + node.safe_psql( + 'backupdb', + "REVOKE ALL ON DATABASE backupdb from PUBLIC; " + "REVOKE ALL ON SCHEMA public from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " + "CREATE ROLE backup WITH LOGIN REPLICATION; " + "GRANT CONNECT ON DATABASE backupdb to backup; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + ) + # >= 15 + else: + node.safe_psql( + 'backupdb', + "REVOKE ALL ON DATABASE backupdb from PUBLIC; " + "REVOKE ALL ON SCHEMA public from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " + "CREATE ROLE backup WITH LOGIN REPLICATION; " + "GRANT CONNECT ON DATABASE backupdb to backup; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_start(text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_stop(boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + ) + + if self.ptrack: + node.safe_psql( + "backupdb", + "GRANT USAGE ON SCHEMA ptrack TO backup") + + node.safe_psql( + "backupdb", + "GRANT EXECUTE ON FUNCTION ptrack.ptrack_get_pagemapset(pg_lsn) TO backup; " + "GRANT EXECUTE ON FUNCTION ptrack.ptrack_init_lsn() TO backup;") + + if ProbackupTest.enterprise: + node.safe_psql( + "backupdb", + "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;") + + # FULL backup + self.backup_node( + backup_dir, 'node', node, + datname='backupdb', options=['--stream', '-U', 'backup']) + self.backup_node( + backup_dir, 'node', node, + datname='backupdb', options=['-U', 'backup']) + + # PAGE + self.backup_node( + backup_dir, 'node', node, backup_type='page', + datname='backupdb', options=['-U', 'backup']) + self.backup_node( + backup_dir, 'node', node, backup_type='page', datname='backupdb', + options=['--stream', '-U', 'backup']) + + # DELTA + self.backup_node( + backup_dir, 'node', node, backup_type='delta', + datname='backupdb', options=['-U', 'backup']) + self.backup_node( + backup_dir, 'node', node, backup_type='delta', + datname='backupdb', options=['--stream', '-U', 'backup']) + + # PTRACK + if self.ptrack: + self.backup_node( + backup_dir, 'node', node, backup_type='ptrack', + datname='backupdb', options=['-U', 'backup']) + self.backup_node( + backup_dir, 'node', node, backup_type='ptrack', + datname='backupdb', options=['--stream', '-U', 'backup']) + + # @unittest.skip("skip") + def test_parent_choosing(self): + """ + PAGE3 <- RUNNING(parent should be FULL) + PAGE2 <- OK + PAGE1 <- CORRUPT + FULL + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + full_id = self.backup_node(backup_dir, 'node', node) + + # PAGE1 + page1_id = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # PAGE2 + page2_id = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # Change PAGE1 to ERROR + self.change_backup_status(backup_dir, 'node', page1_id, 'ERROR') + + # PAGE3 + page3_id = self.backup_node( + backup_dir, 'node', node, + backup_type='page', options=['--log-level-file=LOG']) + + log_file_path = os.path.join(backup_dir, 'log', 'pg_probackup.log') + with open(log_file_path) as f: + log_file_content = f.read() + + self.assertIn( + "WARNING: Backup {0} has invalid parent: {1}. " + "Cannot be a parent".format(page2_id, page1_id), + log_file_content) + + self.assertIn( + "WARNING: Backup {0} has status: ERROR. " + "Cannot be a parent".format(page1_id), + log_file_content) + + self.assertIn( + "Parent backup: {0}".format(full_id), + log_file_content) + + self.assertEqual( + self.show_pb( + backup_dir, 'node', backup_id=page3_id)['parent-backup-id'], + full_id) + + # @unittest.skip("skip") + def test_parent_choosing_1(self): + """ + PAGE3 <- RUNNING(parent should be FULL) + PAGE2 <- OK + PAGE1 <- (missing) + FULL + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + full_id = self.backup_node(backup_dir, 'node', node) + + # PAGE1 + page1_id = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # PAGE2 + page2_id = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # Delete PAGE1 + shutil.rmtree( + os.path.join(backup_dir, 'backups', 'node', page1_id)) + + # PAGE3 + page3_id = self.backup_node( + backup_dir, 'node', node, + backup_type='page', options=['--log-level-file=LOG']) + + log_file_path = os.path.join(backup_dir, 'log', 'pg_probackup.log') + with open(log_file_path) as f: + log_file_content = f.read() + + self.assertIn( + "WARNING: Backup {0} has missing parent: {1}. " + "Cannot be a parent".format(page2_id, page1_id), + log_file_content) + + self.assertIn( + "Parent backup: {0}".format(full_id), + log_file_content) + + self.assertEqual( + self.show_pb( + backup_dir, 'node', backup_id=page3_id)['parent-backup-id'], + full_id) + + # @unittest.skip("skip") + def test_parent_choosing_2(self): + """ + PAGE3 <- RUNNING(backup should fail) + PAGE2 <- OK + PAGE1 <- OK + FULL <- (missing) + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + full_id = self.backup_node(backup_dir, 'node', node) + + # PAGE1 + page1_id = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # PAGE2 + page2_id = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # Delete FULL + shutil.rmtree( + os.path.join(backup_dir, 'backups', 'node', full_id)) + + # PAGE3 + try: + self.backup_node( + backup_dir, 'node', node, + backup_type='page', options=['--log-level-file=LOG']) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because FULL backup is missing" + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertTrue( + 'WARNING: Valid full backup on current timeline 1 is not found' in e.message and + 'ERROR: Create new full backup before an incremental one' in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.assertEqual( + self.show_pb( + backup_dir, 'node')[2]['status'], + 'ERROR') + + # @unittest.skip("skip") + def test_backup_with_less_privileges_role(self): + """ + check permissions correctness from documentation: + https://github.com/postgrespro/pg_probackup/blob/master/Documentation.md#configuring-the-database-cluster + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + ptrack_enable=self.ptrack, + initdb_params=['--data-checksums'], + pg_options={ + 'archive_timeout': '30s', + 'archive_mode': 'always', + 'checkpoint_timeout': '60s', + 'wal_level': 'logical'}) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_config(backup_dir, 'node', options=['--archive-timeout=60s']) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + 'postgres', + 'CREATE DATABASE backupdb') + + if self.ptrack: + node.safe_psql( + 'backupdb', + 'CREATE EXTENSION ptrack') + + # PG 9.5 + if self.get_version(node) < 90600: + node.safe_psql( + 'backupdb', + "CREATE ROLE backup WITH LOGIN; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") + # PG 9.6 + elif self.get_version(node) > 90600 and self.get_version(node) < 100000: + node.safe_psql( + 'backupdb', + "CREATE ROLE backup WITH LOGIN; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; " + "COMMIT;" + ) + # >= 10 && < 15 + elif self.get_version(node) >= 100000 and self.get_version(node) < 150000: + node.safe_psql( + 'backupdb', + "CREATE ROLE backup WITH LOGIN; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; " + "COMMIT;" + ) + # >= 15 + else: + node.safe_psql( + 'backupdb', + "BEGIN; " + "CREATE ROLE backup WITH LOGIN; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_start(text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_stop(boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; " + "COMMIT;" + ) + + # enable STREAM backup + node.safe_psql( + 'backupdb', + 'ALTER ROLE backup WITH REPLICATION;') + + # FULL backup + self.backup_node( + backup_dir, 'node', node, + datname='backupdb', options=['--stream', '-U', 'backup']) + self.backup_node( + backup_dir, 'node', node, + datname='backupdb', options=['-U', 'backup']) + + # PAGE + self.backup_node( + backup_dir, 'node', node, backup_type='page', + datname='backupdb', options=['-U', 'backup']) + self.backup_node( + backup_dir, 'node', node, backup_type='page', datname='backupdb', + options=['--stream', '-U', 'backup']) + + # DELTA + self.backup_node( + backup_dir, 'node', node, backup_type='delta', + datname='backupdb', options=['-U', 'backup']) + self.backup_node( + backup_dir, 'node', node, backup_type='delta', + datname='backupdb', options=['--stream', '-U', 'backup']) + + # PTRACK + if self.ptrack: + self.backup_node( + backup_dir, 'node', node, backup_type='ptrack', + datname='backupdb', options=['-U', 'backup']) + self.backup_node( + backup_dir, 'node', node, backup_type='ptrack', + datname='backupdb', options=['--stream', '-U', 'backup']) + + if self.get_version(node) < 90600: + return + + # Restore as replica + replica = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica.cleanup() + + self.restore_node(backup_dir, 'node', replica) + self.set_replica(node, replica) + self.add_instance(backup_dir, 'replica', replica) + self.set_config( + backup_dir, 'replica', + options=['--archive-timeout=120s', '--log-level-console=LOG']) + self.set_archiving(backup_dir, 'replica', replica, replica=True) + self.set_auto_conf(replica, {'hot_standby': 'on'}) + + # freeze bgwriter to get rid of RUNNING XACTS records + # bgwriter_pid = node.auxiliary_pids[ProcessType.BackgroundWriter][0] + # gdb_checkpointer = self.gdb_attach(bgwriter_pid) + + copy_tree( + os.path.join(backup_dir, 'wal', 'node'), + os.path.join(backup_dir, 'wal', 'replica')) + + replica.slow_start(replica=True) + + # self.switch_wal_segment(node) + # self.switch_wal_segment(node) + + self.backup_node( + backup_dir, 'replica', replica, + datname='backupdb', options=['-U', 'backup']) + + # stream full backup from replica + self.backup_node( + backup_dir, 'replica', replica, + datname='backupdb', options=['--stream', '-U', 'backup']) + +# self.switch_wal_segment(node) + + # PAGE backup from replica + self.switch_wal_segment(node) + self.backup_node( + backup_dir, 'replica', replica, backup_type='page', + datname='backupdb', options=['-U', 'backup', '--archive-timeout=30s']) + + self.backup_node( + backup_dir, 'replica', replica, backup_type='page', + datname='backupdb', options=['--stream', '-U', 'backup']) + + # DELTA backup from replica + self.switch_wal_segment(node) + self.backup_node( + backup_dir, 'replica', replica, backup_type='delta', + datname='backupdb', options=['-U', 'backup']) + self.backup_node( + backup_dir, 'replica', replica, backup_type='delta', + datname='backupdb', options=['--stream', '-U', 'backup']) + + # PTRACK backup from replica + if self.ptrack: + self.switch_wal_segment(node) + self.backup_node( + backup_dir, 'replica', replica, backup_type='ptrack', + datname='backupdb', options=['-U', 'backup']) + self.backup_node( + backup_dir, 'replica', replica, backup_type='ptrack', + datname='backupdb', options=['--stream', '-U', 'backup']) + + @unittest.skip("skip") + def test_issue_132(self): + """ + https://github.com/postgrespro/pg_probackup/issues/132 + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + with node.connect("postgres") as conn: + for i in range(50000): + conn.execute( + "CREATE TABLE t_{0} as select 1".format(i)) + conn.commit() + + self.backup_node( + backup_dir, 'node', node, options=['--stream']) + + pgdata = self.pgdata_content(node.data_dir) + + node.cleanup() + self.restore_node(backup_dir, 'node', node) + + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + exit(1) + + @unittest.skip("skip") + def test_issue_132_1(self): + """ + https://github.com/postgrespro/pg_probackup/issues/132 + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + # TODO: check version of old binary, it should be 2.1.4, 2.1.5 or 2.2.1 + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + with node.connect("postgres") as conn: + for i in range(30000): + conn.execute( + "CREATE TABLE t_{0} as select 1".format(i)) + conn.commit() + + full_id = self.backup_node( + backup_dir, 'node', node, options=['--stream'], old_binary=True) + + delta_id = self.backup_node( + backup_dir, 'node', node, backup_type='delta', + options=['--stream'], old_binary=True) + + node.cleanup() + + # make sure that new binary can detect corruption + try: + self.validate_pb(backup_dir, 'node', backup_id=full_id) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because FULL backup is CORRUPT" + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'WARNING: Backup {0} is a victim of metadata corruption'.format(full_id), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + try: + self.validate_pb(backup_dir, 'node', backup_id=delta_id) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because FULL backup is CORRUPT" + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'WARNING: Backup {0} is a victim of metadata corruption'.format(full_id), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.assertEqual( + 'CORRUPT', self.show_pb(backup_dir, 'node', full_id)['status'], + 'Backup STATUS should be "CORRUPT"') + + self.assertEqual( + 'ORPHAN', self.show_pb(backup_dir, 'node', delta_id)['status'], + 'Backup STATUS should be "ORPHAN"') + + # check that revalidation is working correctly + try: + self.restore_node( + backup_dir, 'node', node, backup_id=delta_id) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because FULL backup is CORRUPT" + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'WARNING: Backup {0} is a victim of metadata corruption'.format(full_id), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.assertEqual( + 'CORRUPT', self.show_pb(backup_dir, 'node', full_id)['status'], + 'Backup STATUS should be "CORRUPT"') + + self.assertEqual( + 'ORPHAN', self.show_pb(backup_dir, 'node', delta_id)['status'], + 'Backup STATUS should be "ORPHAN"') + + # check that '--no-validate' do not allow to restore ORPHAN backup +# try: +# self.restore_node( +# backup_dir, 'node', node, backup_id=delta_id, +# options=['--no-validate']) +# # we should die here because exception is what we expect to happen +# self.assertEqual( +# 1, 0, +# "Expecting Error because FULL backup is CORRUPT" +# "\n Output: {0} \n CMD: {1}".format( +# repr(self.output), self.cmd)) +# except ProbackupException as e: +# self.assertIn( +# 'Insert data', +# e.message, +# '\n Unexpected Error Message: {0}\n CMD: {1}'.format( +# repr(e.message), self.cmd)) + + node.cleanup() + + output = self.restore_node( + backup_dir, 'node', node, backup_id=full_id, options=['--force']) + + self.assertIn( + 'WARNING: Backup {0} has status: CORRUPT'.format(full_id), + output) + + self.assertIn( + 'WARNING: Backup {0} is corrupt.'.format(full_id), + output) + + self.assertIn( + 'WARNING: Backup {0} is not valid, restore is forced'.format(full_id), + output) + + self.assertIn( + 'INFO: Restore of backup {0} completed.'.format(full_id), + output) + + node.cleanup() + + output = self.restore_node( + backup_dir, 'node', node, backup_id=delta_id, options=['--force']) + + self.assertIn( + 'WARNING: Backup {0} is orphan.'.format(delta_id), + output) + + self.assertIn( + 'WARNING: Backup {0} is not valid, restore is forced'.format(full_id), + output) + + self.assertIn( + 'WARNING: Backup {0} is not valid, restore is forced'.format(delta_id), + output) + + self.assertIn( + 'INFO: Restore of backup {0} completed.'.format(delta_id), + output) + + def test_note_sanity(self): + """ + test that adding note to backup works as expected + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # FULL backup + backup_id = self.backup_node( + backup_dir, 'node', node, + options=['--stream', '--log-level-file=LOG', '--note=test_note']) + + show_backups = self.show_pb(backup_dir, 'node') + + print(self.show_pb(backup_dir, as_text=True, as_json=True)) + + self.assertEqual(show_backups[0]['note'], "test_note") + + self.set_backup(backup_dir, 'node', backup_id, options=['--note=none']) + + backup_meta = self.show_pb(backup_dir, 'node', backup_id) + + self.assertNotIn( + 'note', + backup_meta) + + # @unittest.skip("skip") + def test_parent_backup_made_by_newer_version(self): + """incremental backup with parent made by newer version""" + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + backup_id = self.backup_node(backup_dir, 'node', node) + + control_file = os.path.join( + backup_dir, "backups", "node", backup_id, + "backup.control") + + version = self.probackup_version + fake_new_version = str(int(version.split('.')[0]) + 1) + '.0.0' + + with open(control_file, 'r') as f: + data = f.read(); + + data = data.replace(version, fake_new_version) + + with open(control_file, 'w') as f: + f.write(data); + + try: + self.backup_node(backup_dir, 'node', node, backup_type="page") + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because incremental backup should not be possible " + "if parent made by newer version.\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + "pg_probackup do not guarantee to be forward compatible. " + "Please upgrade pg_probackup binary.", + e.message, + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) + + self.assertEqual( + self.show_pb(backup_dir, 'node')[1]['status'], "ERROR") + + # @unittest.skip("skip") + def test_issue_289(self): + """ + https://github.com/postgrespro/pg_probackup/issues/289 + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + + node.slow_start() + + try: + self.backup_node( + backup_dir, 'node', node, + backup_type='page', options=['--archive-timeout=10s']) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because full backup is missing" + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertNotIn( + "INFO: Wait for WAL segment", + e.message, + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) + + self.assertIn( + "ERROR: Create new full backup before an incremental one", + e.message, + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) + + self.assertEqual( + self.show_pb(backup_dir, 'node')[0]['status'], "ERROR") + + # @unittest.skip("skip") + def test_issue_290(self): + """ + https://github.com/postgrespro/pg_probackup/issues/290 + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + + os.rmdir( + os.path.join(backup_dir, "wal", "node")) + + node.slow_start() + + try: + self.backup_node( + backup_dir, 'node', node, + options=['--archive-timeout=10s']) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because full backup is missing" + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertNotIn( + "INFO: Wait for WAL segment", + e.message, + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) + + self.assertIn( + "WAL archive directory is not accessible", + e.message, + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) + + self.assertEqual( + self.show_pb(backup_dir, 'node')[0]['status'], "ERROR") + + @unittest.skip("skip") + def test_issue_203(self): + """ + https://github.com/postgrespro/pg_probackup/issues/203 + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + with node.connect("postgres") as conn: + for i in range(1000000): + conn.execute( + "CREATE TABLE t_{0} as select 1".format(i)) + conn.commit() + + full_id = self.backup_node( + backup_dir, 'node', node, options=['--stream', '-j2']) + + pgdata = self.pgdata_content(node.data_dir) + + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored.cleanup() + + self.restore_node(backup_dir, 'node', + node_restored, data_dir=node_restored.data_dir) + + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + def test_issue_231(self): + """ + https://github.com/postgrespro/pg_probackup/issues/231 + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + datadir = os.path.join(node.data_dir, '123') + + try: + self.backup_node( + backup_dir, 'node', node, data_dir='{0}'.format(datadir)) + except: + pass + + out = self.backup_node(backup_dir, 'node', node, options=['--stream'], return_id=False) + + # it is a bit racy + self.assertIn("WARNING: Cannot create directory", out) + + def test_incr_backup_filenode_map(self): + """ + https://github.com/postgrespro/pg_probackup/issues/320 + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node1 = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node1'), + initdb_params=['--data-checksums']) + node1.cleanup() + + node.pgbench_init(scale=5) + + # FULL backup + backup_id = self.backup_node(backup_dir, 'node', node) + + pgbench = node.pgbench( + stdout=subprocess.PIPE, stderr=subprocess.STDOUT, + options=['-T', '10', '-c', '1']) + + backup_id = self.backup_node(backup_dir, 'node', node, backup_type='delta') + + node.safe_psql( + 'postgres', + 'reindex index pg_type_oid_index') + + backup_id = self.backup_node( + backup_dir, 'node', node, backup_type='delta') + + # incremental restore into node1 + node.cleanup() + + self.restore_node(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + 'postgres', + 'select 1') + + # @unittest.skip("skip") + def test_missing_wal_segment(self): + """""" + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + ptrack_enable=self.ptrack, + initdb_params=['--data-checksums'], + pg_options={'archive_timeout': '30s'}) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.pgbench_init(scale=10) + + node.safe_psql( + 'postgres', + 'CREATE DATABASE backupdb') + + # get segments in pg_wal, sort then and remove all but the latest + pg_wal_dir = os.path.join(node.data_dir, 'pg_wal') + + if node.major_version >= 10: + pg_wal_dir = os.path.join(node.data_dir, 'pg_wal') + else: + pg_wal_dir = os.path.join(node.data_dir, 'pg_xlog') + + # Full backup in streaming mode + gdb = self.backup_node( + backup_dir, 'node', node, datname='backupdb', + options=['--stream', '--log-level-file=INFO'], gdb=True) + + # break at streaming start + gdb.set_breakpoint('start_WAL_streaming') + gdb.run_until_break() + + # generate some more data + node.pgbench_init(scale=3) + + # remove redundant WAL segments in pg_wal + files = os.listdir(pg_wal_dir) + files.sort(reverse=True) + + # leave first two files in list + del files[:2] + for filename in files: + os.remove(os.path.join(pg_wal_dir, filename)) + + gdb.continue_execution_until_exit() + + self.assertIn( + 'unexpected termination of replication stream: ERROR: requested WAL segment', + gdb.output) + + self.assertIn( + 'has already been removed', + gdb.output) + + self.assertIn( + 'ERROR: Interrupted during waiting for WAL streaming', + gdb.output) + + self.assertIn( + 'WARNING: backup in progress, stop backup', + gdb.output) + + # TODO: check the same for PAGE backup + + # @unittest.skip("skip") + def test_missing_replication_permission(self): + """""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + ptrack_enable=self.ptrack, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) +# self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # FULL backup + self.backup_node(backup_dir, 'node', node, options=['--stream']) + + # Create replica + replica = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica.cleanup() + self.restore_node(backup_dir, 'node', replica) + + # Settings for Replica + self.set_replica(node, replica) + replica.slow_start(replica=True) + + node.safe_psql( + 'postgres', + 'CREATE DATABASE backupdb') + + # PG 9.5 + if self.get_version(node) < 90600: + node.safe_psql( + 'backupdb', + "CREATE ROLE backup WITH LOGIN; " + "GRANT CONNECT ON DATABASE backupdb to backup; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") + # PG 9.6 + elif self.get_version(node) > 90600 and self.get_version(node) < 100000: + node.safe_psql( + 'backupdb', + "CREATE ROLE backup WITH LOGIN; " + "GRANT CONNECT ON DATABASE backupdb to backup; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") + # >= 10 && < 15 + elif self.get_version(node) >= 100000 and self.get_version(node) < 150000: + node.safe_psql( + 'backupdb', + "CREATE ROLE backup WITH LOGIN; " + "GRANT CONNECT ON DATABASE backupdb to backup; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + ) + # >= 15 + else: + node.safe_psql( + 'backupdb', + "CREATE ROLE backup WITH LOGIN; " + "GRANT CONNECT ON DATABASE backupdb to backup; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_start(text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_stop(boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + ) + + if ProbackupTest.enterprise: + node.safe_psql( + "backupdb", + "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;") + + sleep(2) + replica.promote() + + # Delta backup + try: + self.backup_node( + backup_dir, 'node', replica, backup_type='delta', + data_dir=replica.data_dir, datname='backupdb', options=['--stream', '-U', 'backup']) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because incremental backup should not be possible " + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + # 9.5: ERROR: must be superuser or replication role to run a backup + # >=9.6: FATAL: must be superuser or replication role to start walsender + self.assertRegex( + e.message, + "ERROR: must be superuser or replication role to run a backup|FATAL: must be superuser or replication role to start walsender", + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) + + # @unittest.skip("skip") + def test_missing_replication_permission_1(self): + """""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + ptrack_enable=self.ptrack, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # FULL backup + self.backup_node(backup_dir, 'node', node, options=['--stream']) + + # Create replica + replica = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica.cleanup() + self.restore_node(backup_dir, 'node', replica) + + # Settings for Replica + self.set_replica(node, replica) + replica.slow_start(replica=True) + + node.safe_psql( + 'postgres', + 'CREATE DATABASE backupdb') + + # PG 9.5 + if self.get_version(node) < 90600: + node.safe_psql( + 'backupdb', + "CREATE ROLE backup WITH LOGIN; " + "GRANT CONNECT ON DATABASE backupdb to backup; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") + # PG 9.6 + elif self.get_version(node) > 90600 and self.get_version(node) < 100000: + node.safe_psql( + 'backupdb', + "CREATE ROLE backup WITH LOGIN; " + "GRANT CONNECT ON DATABASE backupdb to backup; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + ) + # >= 10 && < 15 + elif self.get_version(node) >= 100000 and self.get_version(node) < 150000: + node.safe_psql( + 'backupdb', + "CREATE ROLE backup WITH LOGIN; " + "GRANT CONNECT ON DATABASE backupdb to backup; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + ) + # > 15 + else: + node.safe_psql( + 'backupdb', + "CREATE ROLE backup WITH LOGIN; " + "GRANT CONNECT ON DATABASE backupdb to backup; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_start(text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_stop(boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + ) + + if ProbackupTest.enterprise: + node.safe_psql( + "backupdb", + "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;") + + replica.promote() + + # PAGE + output = self.backup_node( + backup_dir, 'node', replica, backup_type='page', + data_dir=replica.data_dir, datname='backupdb', options=['-U', 'backup'], + return_id=False) + + self.assertIn( + 'WARNING: Valid full backup on current timeline 2 is not found, trying to look up on previous timelines', + output) + + # Messages before 14 + # 'WARNING: could not connect to database backupdb: FATAL: must be superuser or replication role to start walsender' + # Messages for >=14 + # 'WARNING: could not connect to database backupdb: connection to server on socket "/tmp/.s.PGSQL.30983" failed: FATAL: must be superuser or replication role to start walsender' + # 'WARNING: could not connect to database backupdb: connection to server at "localhost" (127.0.0.1), port 29732 failed: FATAL: must be superuser or replication role to start walsender' + self.assertRegex( + output, + r'WARNING: could not connect to database backupdb: (connection to server (on socket "/tmp/.s.PGSQL.\d+"|at "localhost" \(127.0.0.1\), port \d+) failed: ){0,1}' + 'FATAL: must be superuser or replication role to start walsender') + + # @unittest.skip("skip") + def test_basic_backup_default_transaction_read_only(self): + """""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={'default_transaction_read_only': 'on'}) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + try: + node.safe_psql( + 'postgres', + 'create temp table t1()') + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because incremental backup should not be possible " + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except QueryException as e: + self.assertIn( + "cannot execute CREATE TABLE in a read-only transaction", + e.message, + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) + + # FULL backup + self.backup_node( + backup_dir, 'node', node, + options=['--stream']) + + # DELTA backup + self.backup_node( + backup_dir, 'node', node, backup_type='delta', options=['--stream']) + + # PAGE backup + self.backup_node(backup_dir, 'node', node, backup_type='page') + + # @unittest.skip("skip") + def test_backup_atexit(self): + """""" + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + ptrack_enable=self.ptrack, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.pgbench_init(scale=5) + + # Full backup in streaming mode + gdb = self.backup_node( + backup_dir, 'node', node, + options=['--stream', '--log-level-file=VERBOSE'], gdb=True) + + # break at streaming start + gdb.set_breakpoint('backup_data_file') + gdb.run_until_break() + + gdb.remove_all_breakpoints() + gdb._execute('signal SIGINT') + sleep(1) + + self.assertEqual( + self.show_pb( + backup_dir, 'node')[0]['status'], 'ERROR') + + with open(os.path.join(backup_dir, 'log', 'pg_probackup.log')) as f: + log_content = f.read() + #print(log_content) + self.assertIn( + 'WARNING: backup in progress, stop backup', + log_content) + + if self.get_version(node) < 150000: + self.assertIn( + 'FROM pg_catalog.pg_stop_backup', + log_content) + else: + self.assertIn( + 'FROM pg_catalog.pg_backup_stop', + log_content) + + self.assertIn( + 'setting its status to ERROR', + log_content) + + # @unittest.skip("skip") + def test_pg_stop_backup_missing_permissions(self): + """""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + ptrack_enable=self.ptrack, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.pgbench_init(scale=5) + + self.simple_bootstrap(node, 'backup') + + if self.get_version(node) < 90600: + node.safe_psql( + 'postgres', + 'REVOKE EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() FROM backup') + elif self.get_version(node) > 90600 and self.get_version(node) < 100000: + node.safe_psql( + 'postgres', + 'REVOKE EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) FROM backup') + elif self.get_version(node) < 150000: + node.safe_psql( + 'postgres', + 'REVOKE EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) FROM backup') + else: + node.safe_psql( + 'postgres', + 'REVOKE EXECUTE ON FUNCTION pg_catalog.pg_backup_stop(boolean) FROM backup') + + + # Full backup in streaming mode + try: + self.backup_node( + backup_dir, 'node', node, + options=['--stream', '-U', 'backup']) + # we should die here because exception is what we expect to happen + if self.get_version(node) < 150000: + self.assertEqual( + 1, 0, + "Expecting Error because of missing permissions on pg_stop_backup " + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + else: + self.assertEqual( + 1, 0, + "Expecting Error because of missing permissions on pg_backup_stop " + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + if self.get_version(node) < 150000: + self.assertIn( + "ERROR: permission denied for function pg_stop_backup", + e.message, + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) + else: + self.assertIn( + "ERROR: permission denied for function pg_backup_stop", + e.message, + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) + + self.assertIn( + "query was: SELECT pg_catalog.txid_snapshot_xmax", + e.message, + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) + + # @unittest.skip("skip") + def test_start_time(self): + """Test, that option --start-time allows to set backup_id and restore""" + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + ptrack_enable=self.ptrack, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # FULL backup + startTime = int(time()) + self.backup_node( + backup_dir, 'node', node, backup_type='full', + options=['--stream', '--start-time={0}'.format(str(startTime))]) + # restore FULL backup by backup_id calculated from start-time + self.restore_node( + backup_dir, 'node', + data_dir=os.path.join(self.tmp_path, self.module_name, self.fname, 'node_restored_full'), + backup_id=base36enc(startTime)) + + #FULL backup with incorrect start time + try: + startTime = str(int(time()-100000)) + self.backup_node( + backup_dir, 'node', node, backup_type='full', + options=['--stream', '--start-time={0}'.format(startTime)]) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + 'Expecting Error because start time for new backup must be newer ' + '\n Output: {0} \n CMD: {1}'.format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertRegex( + e.message, + r"ERROR: Can't assign backup_id from requested start_time \(\w*\), this time must be later that backup \w*\n", + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) + + # DELTA backup + startTime = int(time()) + self.backup_node( + backup_dir, 'node', node, backup_type='delta', + options=['--stream', '--start-time={0}'.format(str(startTime))]) + # restore DELTA backup by backup_id calculated from start-time + self.restore_node( + backup_dir, 'node', + data_dir=os.path.join(self.tmp_path, self.module_name, self.fname, 'node_restored_delta'), + backup_id=base36enc(startTime)) + + # PAGE backup + startTime = int(time()) + self.backup_node( + backup_dir, 'node', node, backup_type='page', + options=['--stream', '--start-time={0}'.format(str(startTime))]) + # restore PAGE backup by backup_id calculated from start-time + self.restore_node( + backup_dir, 'node', + data_dir=os.path.join(self.tmp_path, self.module_name, self.fname, 'node_restored_page'), + backup_id=base36enc(startTime)) + + # PTRACK backup + if self.ptrack: + node.safe_psql( + 'postgres', + 'create extension ptrack') + + startTime = int(time()) + self.backup_node( + backup_dir, 'node', node, backup_type='ptrack', + options=['--stream', '--start-time={0}'.format(str(startTime))]) + # restore PTRACK backup by backup_id calculated from start-time + self.restore_node( + backup_dir, 'node', + data_dir=os.path.join(self.tmp_path, self.module_name, self.fname, 'node_restored_ptrack'), + backup_id=base36enc(startTime)) + + # @unittest.skip("skip") + def test_start_time_few_nodes(self): + """Test, that we can synchronize backup_id's for different DBs""" + node1 = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node1'), + set_replication=True, + ptrack_enable=self.ptrack, + initdb_params=['--data-checksums']) + + backup_dir1 = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup1') + self.init_pb(backup_dir1) + self.add_instance(backup_dir1, 'node1', node1) + self.set_archiving(backup_dir1, 'node1', node1) + node1.slow_start() + + node2 = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node2'), + set_replication=True, + ptrack_enable=self.ptrack, + initdb_params=['--data-checksums']) + + backup_dir2 = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup2') + self.init_pb(backup_dir2) + self.add_instance(backup_dir2, 'node2', node2) + self.set_archiving(backup_dir2, 'node2', node2) + node2.slow_start() + + # FULL backup + startTime = str(int(time())) + self.backup_node( + backup_dir1, 'node1', node1, backup_type='full', + options=['--stream', '--start-time={0}'.format(startTime)]) + self.backup_node( + backup_dir2, 'node2', node2, backup_type='full', + options=['--stream', '--start-time={0}'.format(startTime)]) + show_backup1 = self.show_pb(backup_dir1, 'node1')[0] + show_backup2 = self.show_pb(backup_dir2, 'node2')[0] + self.assertEqual(show_backup1['id'], show_backup2['id']) + + # DELTA backup + startTime = str(int(time())) + self.backup_node( + backup_dir1, 'node1', node1, backup_type='delta', + options=['--stream', '--start-time={0}'.format(startTime)]) + self.backup_node( + backup_dir2, 'node2', node2, backup_type='delta', + options=['--stream', '--start-time={0}'.format(startTime)]) + show_backup1 = self.show_pb(backup_dir1, 'node1')[1] + show_backup2 = self.show_pb(backup_dir2, 'node2')[1] + self.assertEqual(show_backup1['id'], show_backup2['id']) + + # PAGE backup + startTime = str(int(time())) + self.backup_node( + backup_dir1, 'node1', node1, backup_type='page', + options=['--stream', '--start-time={0}'.format(startTime)]) + self.backup_node( + backup_dir2, 'node2', node2, backup_type='page', + options=['--stream', '--start-time={0}'.format(startTime)]) + show_backup1 = self.show_pb(backup_dir1, 'node1')[2] + show_backup2 = self.show_pb(backup_dir2, 'node2')[2] + self.assertEqual(show_backup1['id'], show_backup2['id']) + + # PTRACK backup + if self.ptrack: + node1.safe_psql( + 'postgres', + 'create extension ptrack') + node2.safe_psql( + 'postgres', + 'create extension ptrack') + + startTime = str(int(time())) + self.backup_node( + backup_dir1, 'node1', node1, backup_type='ptrack', + options=['--stream', '--start-time={0}'.format(startTime)]) + self.backup_node( + backup_dir2, 'node2', node2, backup_type='ptrack', + options=['--stream', '--start-time={0}'.format(startTime)]) + show_backup1 = self.show_pb(backup_dir1, 'node1')[3] + show_backup2 = self.show_pb(backup_dir2, 'node2')[3] + self.assertEqual(show_backup1['id'], show_backup2['id']) + diff --git a/tests/cfs_backup_test.py b/tests/cfs_backup_test.py new file mode 100644 index 000000000..28ef275df --- /dev/null +++ b/tests/cfs_backup_test.py @@ -0,0 +1,1235 @@ +import os +import unittest +import random +import shutil + +from .helpers.cfs_helpers import find_by_extensions, find_by_name, find_by_pattern, corrupt_file +from .helpers.ptrack_helpers import ProbackupTest, ProbackupException + +tblspace_name = 'cfs_tblspace' + + +class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase): + # --- Begin --- # + @unittest.skipUnless(ProbackupTest.enterprise, 'skip') + def setUp(self): + self.backup_dir = os.path.join( + self.tmp_path, self.module_name, self.fname, 'backup') + self.node = self.make_simple_node( + base_dir="{0}/{1}/node".format(self.module_name, self.fname), + set_replication=True, + ptrack_enable=True, + initdb_params=['--data-checksums'], + pg_options={ + 'cfs_encryption': 'off', + 'max_wal_senders': '2', + 'shared_buffers': '200MB' + } + ) + + self.init_pb(self.backup_dir) + self.add_instance(self.backup_dir, 'node', self.node) + self.set_archiving(self.backup_dir, 'node', self.node) + + self.node.slow_start() + + self.node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") + + self.create_tblspace_in_node(self.node, tblspace_name, cfs=True) + + tblspace = self.node.safe_psql( + "postgres", + "SELECT * FROM pg_tablespace WHERE spcname='{0}'".format( + tblspace_name)) + + self.assertIn( + tblspace_name, str(tblspace), + "ERROR: The tablespace not created " + "or it create without compressions") + + self.assertIn( + "compression=true", str(tblspace), + "ERROR: The tablespace not created " + "or it create without compressions") + + self.assertTrue( + find_by_name( + [self.get_tblspace_path(self.node, tblspace_name)], + ['pg_compression']), + "ERROR: File pg_compression not found" + ) + + # --- Section: Full --- # + # @unittest.expectedFailure + # @unittest.skip("skip") + @unittest.skipUnless(ProbackupTest.enterprise, 'skip') + def test_fullbackup_empty_tablespace(self): + """Case: Check fullbackup empty compressed tablespace""" + + backup_id = None + try: + backup_id = self.backup_node( + self.backup_dir, 'node', self.node, backup_type='full') + except ProbackupException as e: + self.fail( + "ERROR: Full backup failed.\n {0} \n {1}".format( + repr(self.cmd), + repr(e.message) + ) + ) + show_backup = self.show_pb(self.backup_dir, 'node', backup_id) + self.assertEqual( + "OK", + show_backup["status"], + "ERROR: Full backup status is not valid. \n " + "Current backup status={0}".format(show_backup["status"]) + ) + self.assertTrue( + find_by_name( + [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], + ['pg_compression']), + "ERROR: File pg_compression not found in backup dir" + ) + + # @unittest.expectedFailure + # @unittest.skip("skip") + @unittest.skipUnless(ProbackupTest.enterprise, 'skip') + def test_fullbackup_empty_tablespace_stream(self): + """Case: Check fullbackup empty compressed tablespace with options stream""" + + backup_id = None + try: + backup_id = self.backup_node( + self.backup_dir, 'node', self.node, + backup_type='full', options=['--stream']) + except ProbackupException as e: + self.fail( + "ERROR: Full backup failed.\n {0} \n {1}".format( + repr(self.cmd), + repr(e.message) + ) + ) + + show_backup = self.show_pb(self.backup_dir, 'node', backup_id) + self.assertEqual( + "OK", + show_backup["status"], + "ERROR: Full backup status is not valid. \n " + "Current backup status={0}".format(show_backup["status"]) + ) + self.assertTrue( + find_by_name( + [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], + ['pg_compression']), + "ERROR: File pg_compression not found in backup dir" + ) + + # @unittest.expectedFailure + # @unittest.skip("skip") + @unittest.skipUnless(ProbackupTest.enterprise, 'skip') + # PGPRO-1018 invalid file size + def test_fullbackup_after_create_table(self): + """Case: Make full backup after created table in the tablespace""" + if not self.enterprise: + return + + self.node.safe_psql( + "postgres", + "CREATE TABLE {0} TABLESPACE {1} " + "AS SELECT i AS id, MD5(i::text) AS text, " + "MD5(repeat(i::text,10))::tsvector AS tsvector " + "FROM generate_series(0,256) i".format('t1', tblspace_name) + ) + + backup_id = None + try: + backup_id = self.backup_node( + self.backup_dir, 'node', self.node, backup_type='full') + except ProbackupException as e: + self.fail( + "\n ERROR: {0}\n CMD: {1}".format( + repr(e.message), + repr(self.cmd) + ) + ) + return False + show_backup = self.show_pb(self.backup_dir, 'node', backup_id) + self.assertEqual( + "OK", + show_backup["status"], + "ERROR: Full backup status is not valid. \n " + "Current backup status={0}".format(show_backup["status"]) + ) + self.assertTrue( + find_by_name( + [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], + ['pg_compression']), + "ERROR: File pg_compression not found in {0}".format( + os.path.join(self.backup_dir, 'node', backup_id)) + ) + + # check cfm size + cfms = find_by_extensions( + [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], + ['.cfm']) + self.assertTrue(cfms, "ERROR: .cfm files not found in backup dir") + for cfm in cfms: + size = os.stat(cfm).st_size + self.assertLessEqual(size, 4096, + "ERROR: {0} is not truncated (has size {1} > 4096)".format( + cfm, size + )) + + # @unittest.expectedFailure + # @unittest.skip("skip") + @unittest.skipUnless(ProbackupTest.enterprise, 'skip') + # PGPRO-1018 invalid file size + def test_fullbackup_after_create_table_stream(self): + """ + Case: Make full backup after created table in the tablespace with option --stream + """ + + self.node.safe_psql( + "postgres", + "CREATE TABLE {0} TABLESPACE {1} " + "AS SELECT i AS id, MD5(i::text) AS text, " + "MD5(repeat(i::text,10))::tsvector AS tsvector " + "FROM generate_series(0,256) i".format('t1', tblspace_name) + ) + + backup_id = None + try: + backup_id = self.backup_node( + self.backup_dir, 'node', self.node, + backup_type='full', options=['--stream']) + except ProbackupException as e: + self.fail( + "ERROR: Full backup failed.\n {0} \n {1}".format( + repr(self.cmd), + repr(e.message) + ) + ) + show_backup = self.show_pb(self.backup_dir, 'node', backup_id) + self.assertEqual( + "OK", + show_backup["status"], + "ERROR: Full backup status is not valid. \n " + "Current backup status={0}".format(show_backup["status"]) + ) + self.assertTrue( + find_by_name( + [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], + ['pg_compression']), + "ERROR: File pg_compression not found in backup dir" + ) + self.assertTrue( + find_by_extensions( + [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], + ['.cfm']), + "ERROR: .cfm files not found in backup dir" + ) + + # --- Section: Incremental from empty tablespace --- # + # @unittest.expectedFailure + # @unittest.skip("skip") + @unittest.skipUnless(ProbackupTest.enterprise, 'skip') + def test_fullbackup_empty_tablespace_ptrack_after_create_table(self): + """ + Case: Make full backup before created table in the tablespace. + Make ptrack backup after create table + """ + + try: + self.backup_node( + self.backup_dir, 'node', self.node, backup_type='full') + except ProbackupException as e: + self.fail( + "ERROR: Full backup failed.\n {0} \n {1}".format( + repr(self.cmd), + repr(e.message) + ) + ) + + self.node.safe_psql( + "postgres", + "CREATE TABLE {0} TABLESPACE {1} " + "AS SELECT i AS id, MD5(i::text) AS text, " + "MD5(repeat(i::text,10))::tsvector AS tsvector " + "FROM generate_series(0,256) i".format('t1', tblspace_name) + ) + + backup_id = None + try: + backup_id = self.backup_node( + self.backup_dir, 'node', self.node, backup_type='ptrack') + except ProbackupException as e: + self.fail( + "ERROR: Incremental backup failed.\n {0} \n {1}".format( + repr(self.cmd), + repr(e.message) + ) + ) + show_backup = self.show_pb(self.backup_dir, 'node', backup_id) + self.assertEqual( + "OK", + show_backup["status"], + "ERROR: Incremental backup status is not valid. \n " + "Current backup status={0}".format(show_backup["status"]) + ) + self.assertTrue( + find_by_name( + [self.get_tblspace_path(self.node, tblspace_name)], + ['pg_compression']), + "ERROR: File pg_compression not found" + ) + self.assertTrue( + find_by_extensions( + [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], + ['.cfm']), + "ERROR: .cfm files not found in backup dir" + ) + + # @unittest.expectedFailure + # @unittest.skip("skip") + @unittest.skipUnless(ProbackupTest.enterprise, 'skip') + def test_fullbackup_empty_tablespace_ptrack_after_create_table_stream(self): + """ + Case: Make full backup before created table in the tablespace. + Make ptrack backup after create table + """ + + try: + self.backup_node( + self.backup_dir, 'node', self.node, + backup_type='full', options=['--stream']) + except ProbackupException as e: + self.fail( + "ERROR: Full backup failed.\n {0} \n {1}".format( + repr(self.cmd), + repr(e.message) + ) + ) + + self.node.safe_psql( + "postgres", + "CREATE TABLE {0} TABLESPACE {1} " + "AS SELECT i AS id, MD5(i::text) AS text, " + "MD5(repeat(i::text,10))::tsvector AS tsvector " + "FROM generate_series(0,256) i".format('t1', tblspace_name) + ) + + backup_id = None + try: + backup_id = self.backup_node( + self.backup_dir, 'node', self.node, + backup_type='ptrack', options=['--stream']) + except ProbackupException as e: + self.fail( + "ERROR: Incremental backup failed.\n {0} \n {1}".format( + repr(self.cmd), + repr(e.message) + ) + ) + show_backup = self.show_pb(self.backup_dir, 'node', backup_id) + self.assertEqual( + "OK", + show_backup["status"], + "ERROR: Incremental backup status is not valid. \n " + "Current backup status={0}".format(show_backup["status"]) + ) + self.assertTrue( + find_by_name( + [self.get_tblspace_path(self.node, tblspace_name)], + ['pg_compression']), + "ERROR: File pg_compression not found" + ) + self.assertTrue( + find_by_extensions( + [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], + ['.cfm']), + "ERROR: .cfm files not found in backup dir" + ) + self.assertFalse( + find_by_extensions( + [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], + ['_ptrack']), + "ERROR: _ptrack files was found in backup dir" + ) + + # @unittest.expectedFailure + # @unittest.skip("skip") + @unittest.skipUnless(ProbackupTest.enterprise, 'skip') + def test_fullbackup_empty_tablespace_page_after_create_table(self): + """ + Case: Make full backup before created table in the tablespace. + Make page backup after create table + """ + + try: + self.backup_node( + self.backup_dir, 'node', self.node, backup_type='full') + except ProbackupException as e: + self.fail( + "ERROR: Full backup failed.\n {0} \n {1}".format( + repr(self.cmd), + repr(e.message) + ) + ) + + self.node.safe_psql( + "postgres", + "CREATE TABLE {0} TABLESPACE {1} " + "AS SELECT i AS id, MD5(i::text) AS text, " + "MD5(repeat(i::text,10))::tsvector AS tsvector " + "FROM generate_series(0,256) i".format('t1', tblspace_name) + ) + + backup_id = None + try: + backup_id = self.backup_node( + self.backup_dir, 'node', self.node, backup_type='page') + except ProbackupException as e: + self.fail( + "ERROR: Incremental backup failed.\n {0} \n {1}".format( + repr(self.cmd), + repr(e.message) + ) + ) + show_backup = self.show_pb(self.backup_dir, 'node', backup_id) + self.assertEqual( + "OK", + show_backup["status"], + "ERROR: Incremental backup status is not valid. \n " + "Current backup status={0}".format(show_backup["status"]) + ) + self.assertTrue( + find_by_name( + [self.get_tblspace_path(self.node, tblspace_name)], + ['pg_compression']), + "ERROR: File pg_compression not found" + ) + self.assertTrue( + find_by_extensions( + [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], + ['.cfm']), + "ERROR: .cfm files not found in backup dir" + ) + + @unittest.skipUnless(ProbackupTest.enterprise, 'skip') + def test_page_doesnt_store_unchanged_cfm(self): + """ + Case: Test page backup doesn't store cfm file if table were not modified + """ + + self.node.safe_psql( + "postgres", + "CREATE TABLE {0} TABLESPACE {1} " + "AS SELECT i AS id, MD5(i::text) AS text, " + "MD5(repeat(i::text,10))::tsvector AS tsvector " + "FROM generate_series(0,256) i".format('t1', tblspace_name) + ) + + try: + backup_id_full = self.backup_node( + self.backup_dir, 'node', self.node, backup_type='full') + except ProbackupException as e: + self.fail( + "ERROR: Full backup failed.\n {0} \n {1}".format( + repr(self.cmd), + repr(e.message) + ) + ) + + self.assertTrue( + find_by_extensions( + [os.path.join(self.backup_dir, 'backups', 'node', backup_id_full)], + ['.cfm']), + "ERROR: .cfm files not found in backup dir" + ) + + try: + backup_id = self.backup_node( + self.backup_dir, 'node', self.node, backup_type='page') + except ProbackupException as e: + self.fail( + "ERROR: Incremental backup failed.\n {0} \n {1}".format( + repr(self.cmd), + repr(e.message) + ) + ) + + show_backup = self.show_pb(self.backup_dir, 'node', backup_id) + self.assertEqual( + "OK", + show_backup["status"], + "ERROR: Incremental backup status is not valid. \n " + "Current backup status={0}".format(show_backup["status"]) + ) + self.assertTrue( + find_by_name( + [self.get_tblspace_path(self.node, tblspace_name)], + ['pg_compression']), + "ERROR: File pg_compression not found" + ) + self.assertFalse( + find_by_extensions( + [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], + ['.cfm']), + "ERROR: .cfm files is found in backup dir" + ) + + # @unittest.expectedFailure + # @unittest.skip("skip") + @unittest.skipUnless(ProbackupTest.enterprise, 'skip') + def test_fullbackup_empty_tablespace_page_after_create_table_stream(self): + """ + Case: Make full backup before created table in the tablespace. + Make page backup after create table + """ + + try: + self.backup_node( + self.backup_dir, 'node', self.node, + backup_type='full', options=['--stream']) + except ProbackupException as e: + self.fail( + "ERROR: Full backup failed.\n {0} \n {1}".format( + repr(self.cmd), + repr(e.message) + ) + ) + + self.node.safe_psql( + "postgres", + "CREATE TABLE {0} TABLESPACE {1} " + "AS SELECT i AS id, MD5(i::text) AS text, " + "MD5(repeat(i::text,10))::tsvector AS tsvector " + "FROM generate_series(0,256) i".format('t1', tblspace_name) + ) + + backup_id = None + try: + backup_id = self.backup_node( + self.backup_dir, 'node', self.node, + backup_type='page', options=['--stream']) + except ProbackupException as e: + self.fail( + "ERROR: Incremental backup failed.\n {0} \n {1}".format( + repr(self.cmd), + repr(e.message) + ) + ) + show_backup = self.show_pb(self.backup_dir, 'node', backup_id) + self.assertEqual( + "OK", + show_backup["status"], + "ERROR: Incremental backup status is not valid. \n " + "Current backup status={0}".format(show_backup["status"]) + ) + self.assertTrue( + find_by_name( + [self.get_tblspace_path(self.node, tblspace_name)], + ['pg_compression']), + "ERROR: File pg_compression not found" + ) + self.assertTrue( + find_by_extensions( + [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], + ['.cfm']), + "ERROR: .cfm files not found in backup dir" + ) + self.assertFalse( + find_by_extensions( + [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], + ['_ptrack']), + "ERROR: _ptrack files was found in backup dir" + ) + + # --- Section: Incremental from fill tablespace --- # + # @unittest.expectedFailure + # @unittest.skip("skip") + @unittest.skipUnless(ProbackupTest.enterprise, 'skip') + def test_fullbackup_after_create_table_ptrack_after_create_table(self): + """ + Case: Make full backup before created table in the tablespace. + Make ptrack backup after create table. + Check: incremental backup will not greater as full + """ + + self.node.safe_psql( + "postgres", + "CREATE TABLE {0} TABLESPACE {1} " + "AS SELECT i AS id, MD5(i::text) AS text, " + "MD5(repeat(i::text,10))::tsvector AS tsvector " + "FROM generate_series(0,1005000) i".format('t1', tblspace_name) + ) + + backup_id_full = None + try: + backup_id_full = self.backup_node( + self.backup_dir, 'node', self.node, backup_type='full') + except ProbackupException as e: + self.fail( + "ERROR: Full backup failed.\n {0} \n {1}".format( + repr(self.cmd), + repr(e.message) + ) + ) + + self.node.safe_psql( + "postgres", + "CREATE TABLE {0} TABLESPACE {1} " + "AS SELECT i AS id, MD5(i::text) AS text, " + "MD5(repeat(i::text,10))::tsvector AS tsvector " + "FROM generate_series(0,10) i".format('t2', tblspace_name) + ) + + backup_id_ptrack = None + try: + backup_id_ptrack = self.backup_node( + self.backup_dir, 'node', self.node, backup_type='ptrack') + except ProbackupException as e: + self.fail( + "ERROR: Incremental backup failed.\n {0} \n {1}".format( + repr(self.cmd), + repr(e.message) + ) + ) + + show_backup_full = self.show_pb( + self.backup_dir, 'node', backup_id_full) + show_backup_ptrack = self.show_pb( + self.backup_dir, 'node', backup_id_ptrack) + self.assertGreater( + show_backup_full["data-bytes"], + show_backup_ptrack["data-bytes"], + "ERROR: Size of incremental backup greater than full. \n " + "INFO: {0} >{1}".format( + show_backup_ptrack["data-bytes"], + show_backup_full["data-bytes"] + ) + ) + + # @unittest.expectedFailure + # @unittest.skip("skip") + @unittest.skipUnless(ProbackupTest.enterprise, 'skip') + def test_fullbackup_after_create_table_ptrack_after_create_table_stream(self): + """ + Case: Make full backup before created table in the tablespace(--stream). + Make ptrack backup after create table(--stream). + Check: incremental backup size should not be greater than full + """ + + self.node.safe_psql( + "postgres", + "CREATE TABLE {0} TABLESPACE {1} " + "AS SELECT i AS id, MD5(i::text) AS text, " + "MD5(repeat(i::text,10))::tsvector AS tsvector " + "FROM generate_series(0,1005000) i".format('t1', tblspace_name) + ) + + backup_id_full = None + try: + backup_id_full = self.backup_node( + self.backup_dir, 'node', self.node, + backup_type='full', options=['--stream']) + except ProbackupException as e: + self.fail( + "ERROR: Full backup failed.\n {0} \n {1}".format( + repr(self.cmd), + repr(e.message) + ) + ) + + self.node.safe_psql( + "postgres", + "CREATE TABLE {0} TABLESPACE {1} " + "AS SELECT i AS id, MD5(i::text) AS text, " + "MD5(repeat(i::text,10))::tsvector AS tsvector " + "FROM generate_series(0,25) i".format('t2', tblspace_name) + ) + + backup_id_ptrack = None + try: + backup_id_ptrack = self.backup_node( + self.backup_dir, 'node', self.node, + backup_type='ptrack', options=['--stream']) + except ProbackupException as e: + self.fail( + "ERROR: Incremental backup failed.\n {0} \n {1}".format( + repr(self.cmd), + repr(e.message) + ) + ) + + show_backup_full = self.show_pb( + self.backup_dir, 'node', backup_id_full) + show_backup_ptrack = self.show_pb( + self.backup_dir, 'node', backup_id_ptrack) + self.assertGreater( + show_backup_full["data-bytes"], + show_backup_ptrack["data-bytes"], + "ERROR: Size of incremental backup greater than full. \n " + "INFO: {0} >{1}".format( + show_backup_ptrack["data-bytes"], + show_backup_full["data-bytes"] + ) + ) + + # @unittest.expectedFailure + # @unittest.skip("skip") + @unittest.skipUnless(ProbackupTest.enterprise, 'skip') + def test_fullbackup_after_create_table_page_after_create_table(self): + """ + Case: Make full backup before created table in the tablespace. + Make ptrack backup after create table. + Check: incremental backup size should not be greater than full + """ + + self.node.safe_psql( + "postgres", + "CREATE TABLE {0} TABLESPACE {1} " + "AS SELECT i AS id, MD5(i::text) AS text, " + "MD5(repeat(i::text,10))::tsvector AS tsvector " + "FROM generate_series(0,1005000) i".format('t1', tblspace_name) + ) + + backup_id_full = None + try: + backup_id_full = self.backup_node( + self.backup_dir, 'node', self.node, backup_type='full') + except ProbackupException as e: + self.fail( + "ERROR: Full backup failed.\n {0} \n {1}".format( + repr(self.cmd), + repr(e.message) + ) + ) + + self.node.safe_psql( + "postgres", + "CREATE TABLE {0} TABLESPACE {1} " + "AS SELECT i AS id, MD5(i::text) AS text, " + "MD5(repeat(i::text,10))::tsvector AS tsvector " + "FROM generate_series(0,10) i".format('t2', tblspace_name) + ) + + backup_id_page = None + try: + backup_id_page = self.backup_node( + self.backup_dir, 'node', self.node, backup_type='page') + except ProbackupException as e: + self.fail( + "ERROR: Incremental backup failed.\n {0} \n {1}".format( + repr(self.cmd), + repr(e.message) + ) + ) + + show_backup_full = self.show_pb( + self.backup_dir, 'node', backup_id_full) + show_backup_page = self.show_pb( + self.backup_dir, 'node', backup_id_page) + self.assertGreater( + show_backup_full["data-bytes"], + show_backup_page["data-bytes"], + "ERROR: Size of incremental backup greater than full. \n " + "INFO: {0} >{1}".format( + show_backup_page["data-bytes"], + show_backup_full["data-bytes"] + ) + ) + + # @unittest.expectedFailure + # @unittest.skip("skip") + @unittest.skipUnless(ProbackupTest.enterprise, 'skip') + def test_multiple_segments(self): + """ + Case: Make full backup before created table in the tablespace. + Make ptrack backup after create table. + Check: incremental backup will not greater as full + """ + + self.node.safe_psql( + "postgres", + "CREATE TABLE {0} TABLESPACE {1} " + "AS SELECT i AS id, MD5(i::text) AS text, " + "MD5(repeat(i::text,10))::tsvector AS tsvector " + "FROM generate_series(0,1005000) i".format( + 't_heap', tblspace_name) + ) + + full_result = self.node.safe_psql("postgres", "SELECT * FROM t_heap") + + try: + backup_id_full = self.backup_node( + self.backup_dir, 'node', self.node, backup_type='full') + except ProbackupException as e: + self.fail( + "ERROR: Full backup failed.\n {0} \n {1}".format( + repr(self.cmd), + repr(e.message) + ) + ) + + self.node.safe_psql( + "postgres", + "INSERT INTO {0} " + "SELECT i AS id, MD5(i::text) AS text, " + "MD5(repeat(i::text,10))::tsvector AS tsvector " + "FROM generate_series(0,10) i".format( + 't_heap') + ) + + page_result = self.node.safe_psql("postgres", "SELECT * FROM t_heap") + + try: + backup_id_page = self.backup_node( + self.backup_dir, 'node', self.node, backup_type='page') + except ProbackupException as e: + self.fail( + "ERROR: Incremental backup failed.\n {0} \n {1}".format( + repr(self.cmd), + repr(e.message) + ) + ) + + show_backup_full = self.show_pb( + self.backup_dir, 'node', backup_id_full) + show_backup_page = self.show_pb( + self.backup_dir, 'node', backup_id_page) + self.assertGreater( + show_backup_full["data-bytes"], + show_backup_page["data-bytes"], + "ERROR: Size of incremental backup greater than full. \n " + "INFO: {0} >{1}".format( + show_backup_page["data-bytes"], + show_backup_full["data-bytes"] + ) + ) + + # CHECK FULL BACKUP + self.node.stop() + self.node.cleanup() + shutil.rmtree(self.get_tblspace_path(self.node, tblspace_name)) + self.restore_node( + self.backup_dir, 'node', self.node, backup_id=backup_id_full, + options=[ + "-j", "4", + "--recovery-target=immediate", + "--recovery-target-action=promote"]) + + self.node.slow_start() + self.assertEqual( + full_result, + self.node.safe_psql("postgres", "SELECT * FROM t_heap"), + 'Lost data after restore') + + # CHECK PAGE BACKUP + self.node.stop() + self.node.cleanup() + shutil.rmtree( + self.get_tblspace_path(self.node, tblspace_name), + ignore_errors=True) + self.restore_node( + self.backup_dir, 'node', self.node, backup_id=backup_id_page, + options=[ + "-j", "4", + "--recovery-target=immediate", + "--recovery-target-action=promote"]) + + self.node.slow_start() + self.assertEqual( + page_result, + self.node.safe_psql("postgres", "SELECT * FROM t_heap"), + 'Lost data after restore') + + # @unittest.expectedFailure + # @unittest.skip("skip") + @unittest.skipUnless(ProbackupTest.enterprise, 'skip') + def test_multiple_segments_in_multiple_tablespaces(self): + """ + Case: Make full backup before created table in the tablespace. + Make ptrack backup after create table. + Check: incremental backup will not greater as full + """ + tblspace_name_1 = 'tblspace_name_1' + tblspace_name_2 = 'tblspace_name_2' + + self.create_tblspace_in_node(self.node, tblspace_name_1, cfs=True) + self.create_tblspace_in_node(self.node, tblspace_name_2, cfs=True) + + self.node.safe_psql( + "postgres", + "CREATE TABLE {0} TABLESPACE {1} " + "AS SELECT i AS id, MD5(i::text) AS text, " + "MD5(repeat(i::text,10))::tsvector AS tsvector " + "FROM generate_series(0,1005000) i".format( + 't_heap_1', tblspace_name_1)) + + self.node.safe_psql( + "postgres", + "CREATE TABLE {0} TABLESPACE {1} " + "AS SELECT i AS id, MD5(i::text) AS text, " + "MD5(repeat(i::text,10))::tsvector AS tsvector " + "FROM generate_series(0,1005000) i".format( + 't_heap_2', tblspace_name_2)) + + full_result_1 = self.node.safe_psql( + "postgres", "SELECT * FROM t_heap_1") + full_result_2 = self.node.safe_psql( + "postgres", "SELECT * FROM t_heap_2") + + try: + backup_id_full = self.backup_node( + self.backup_dir, 'node', self.node, backup_type='full') + except ProbackupException as e: + self.fail( + "ERROR: Full backup failed.\n {0} \n {1}".format( + repr(self.cmd), + repr(e.message) + ) + ) + + self.node.safe_psql( + "postgres", + "INSERT INTO {0} " + "SELECT i AS id, MD5(i::text) AS text, " + "MD5(repeat(i::text,10))::tsvector AS tsvector " + "FROM generate_series(0,10) i".format( + 't_heap_1') + ) + + self.node.safe_psql( + "postgres", + "INSERT INTO {0} " + "SELECT i AS id, MD5(i::text) AS text, " + "MD5(repeat(i::text,10))::tsvector AS tsvector " + "FROM generate_series(0,10) i".format( + 't_heap_2') + ) + + page_result_1 = self.node.safe_psql( + "postgres", "SELECT * FROM t_heap_1") + page_result_2 = self.node.safe_psql( + "postgres", "SELECT * FROM t_heap_2") + + try: + backup_id_page = self.backup_node( + self.backup_dir, 'node', self.node, backup_type='page') + except ProbackupException as e: + self.fail( + "ERROR: Incremental backup failed.\n {0} \n {1}".format( + repr(self.cmd), + repr(e.message) + ) + ) + + show_backup_full = self.show_pb( + self.backup_dir, 'node', backup_id_full) + show_backup_page = self.show_pb( + self.backup_dir, 'node', backup_id_page) + self.assertGreater( + show_backup_full["data-bytes"], + show_backup_page["data-bytes"], + "ERROR: Size of incremental backup greater than full. \n " + "INFO: {0} >{1}".format( + show_backup_page["data-bytes"], + show_backup_full["data-bytes"] + ) + ) + + # CHECK FULL BACKUP + self.node.stop() + + self.restore_node( + self.backup_dir, 'node', self.node, + backup_id=backup_id_full, + options=[ + "-j", "4", "--incremental-mode=checksum", + "--recovery-target=immediate", + "--recovery-target-action=promote"]) + self.node.slow_start() + + self.assertEqual( + full_result_1, + self.node.safe_psql("postgres", "SELECT * FROM t_heap_1"), + 'Lost data after restore') + self.assertEqual( + full_result_2, + self.node.safe_psql("postgres", "SELECT * FROM t_heap_2"), + 'Lost data after restore') + + # CHECK PAGE BACKUP + self.node.stop() + + self.restore_node( + self.backup_dir, 'node', self.node, + backup_id=backup_id_page, + options=[ + "-j", "4", "--incremental-mode=checksum", + "--recovery-target=immediate", + "--recovery-target-action=promote"]) + self.node.slow_start() + + self.assertEqual( + page_result_1, + self.node.safe_psql("postgres", "SELECT * FROM t_heap_1"), + 'Lost data after restore') + self.assertEqual( + page_result_2, + self.node.safe_psql("postgres", "SELECT * FROM t_heap_2"), + 'Lost data after restore') + + # @unittest.expectedFailure + # @unittest.skip("skip") + @unittest.skipUnless(ProbackupTest.enterprise, 'skip') + def test_fullbackup_after_create_table_page_after_create_table_stream(self): + """ + Case: Make full backup before created table in the tablespace(--stream). + Make ptrack backup after create table(--stream). + Check: incremental backup will not greater as full + """ + + self.node.safe_psql( + "postgres", + "CREATE TABLE {0} TABLESPACE {1} " + "AS SELECT i AS id, MD5(i::text) AS text, " + "MD5(repeat(i::text,10))::tsvector AS tsvector " + "FROM generate_series(0,1005000) i".format('t1', tblspace_name) + ) + + backup_id_full = None + try: + backup_id_full = self.backup_node( + self.backup_dir, 'node', self.node, + backup_type='full', options=['--stream']) + except ProbackupException as e: + self.fail( + "ERROR: Full backup failed.\n {0} \n {1}".format( + repr(self.cmd), + repr(e.message) + ) + ) + + self.node.safe_psql( + "postgres", + "CREATE TABLE {0} TABLESPACE {1} " + "AS SELECT i AS id, MD5(i::text) AS text, " + "MD5(repeat(i::text,10))::tsvector AS tsvector " + "FROM generate_series(0,10) i".format('t2', tblspace_name) + ) + + backup_id_page = None + try: + backup_id_page = self.backup_node( + self.backup_dir, 'node', self.node, + backup_type='page', options=['--stream']) + except ProbackupException as e: + self.fail( + "ERROR: Incremental backup failed.\n {0} \n {1}".format( + repr(self.cmd), + repr(e.message) + ) + ) + + show_backup_full = self.show_pb( + self.backup_dir, 'node', backup_id_full) + show_backup_page = self.show_pb( + self.backup_dir, 'node', backup_id_page) + self.assertGreater( + show_backup_full["data-bytes"], + show_backup_page["data-bytes"], + "ERROR: Size of incremental backup greater than full. \n " + "INFO: {0} >{1}".format( + show_backup_page["data-bytes"], + show_backup_full["data-bytes"] + ) + ) + + # --- Make backup with not valid data(broken .cfm) --- # + @unittest.expectedFailure + # @unittest.skip("skip") + @unittest.skipUnless(ProbackupTest.enterprise, 'skip') + def test_delete_random_cfm_file_from_tablespace_dir(self): + self.node.safe_psql( + "postgres", + "CREATE TABLE {0} TABLESPACE {1} " + "AS SELECT i AS id, MD5(i::text) AS text, " + "MD5(repeat(i::text,10))::tsvector AS tsvector " + "FROM generate_series(0,256) i".format('t1', tblspace_name) + ) + + self.node.safe_psql( + "postgres", + "CHECKPOINT" + ) + + list_cmf = find_by_extensions( + [self.get_tblspace_path(self.node, tblspace_name)], + ['.cfm']) + self.assertTrue( + list_cmf, + "ERROR: .cfm-files not found into tablespace dir" + ) + + os.remove(random.choice(list_cmf)) + + self.assertRaises( + ProbackupException, + self.backup_node, + self.backup_dir, + 'node', + self.node, + backup_type='full' + ) + + @unittest.expectedFailure + # @unittest.skip("skip") + @unittest.skipUnless(ProbackupTest.enterprise, 'skip') + def test_delete_file_pg_compression_from_tablespace_dir(self): + os.remove( + find_by_name( + [self.get_tblspace_path(self.node, tblspace_name)], + ['pg_compression'])[0]) + + self.assertRaises( + ProbackupException, + self.backup_node, + self.backup_dir, + 'node', + self.node, + backup_type='full' + ) + + @unittest.expectedFailure + # @unittest.skip("skip") + @unittest.skipUnless(ProbackupTest.enterprise, 'skip') + def test_delete_random_data_file_from_tablespace_dir(self): + self.node.safe_psql( + "postgres", + "CREATE TABLE {0} TABLESPACE {1} " + "AS SELECT i AS id, MD5(i::text) AS text, " + "MD5(repeat(i::text,10))::tsvector AS tsvector " + "FROM generate_series(0,256) i".format('t1', tblspace_name) + ) + + self.node.safe_psql( + "postgres", + "CHECKPOINT" + ) + + list_data_files = find_by_pattern( + [self.get_tblspace_path(self.node, tblspace_name)], + '^.*/\d+$') + self.assertTrue( + list_data_files, + "ERROR: Files of data not found into tablespace dir" + ) + + os.remove(random.choice(list_data_files)) + + self.assertRaises( + ProbackupException, + self.backup_node, + self.backup_dir, + 'node', + self.node, + backup_type='full' + ) + + @unittest.expectedFailure + # @unittest.skip("skip") + @unittest.skipUnless(ProbackupTest.enterprise, 'skip') + def test_broken_random_cfm_file_into_tablespace_dir(self): + self.node.safe_psql( + "postgres", + "CREATE TABLE {0} TABLESPACE {1} " + "AS SELECT i AS id, MD5(i::text) AS text, " + "MD5(repeat(i::text,10))::tsvector AS tsvector " + "FROM generate_series(0,256) i".format('t1', tblspace_name) + ) + + list_cmf = find_by_extensions( + [self.get_tblspace_path(self.node, tblspace_name)], + ['.cfm']) + self.assertTrue( + list_cmf, + "ERROR: .cfm-files not found into tablespace dir" + ) + + corrupt_file(random.choice(list_cmf)) + + self.assertRaises( + ProbackupException, + self.backup_node, + self.backup_dir, + 'node', + self.node, + backup_type='full' + ) + + @unittest.expectedFailure + # @unittest.skip("skip") + @unittest.skipUnless(ProbackupTest.enterprise, 'skip') + def test_broken_random_data_file_into_tablespace_dir(self): + self.node.safe_psql( + "postgres", + "CREATE TABLE {0} TABLESPACE {1} " + "AS SELECT i AS id, MD5(i::text) AS text, " + "MD5(repeat(i::text,10))::tsvector AS tsvector " + "FROM generate_series(0,256) i".format('t1', tblspace_name) + ) + + list_data_files = find_by_pattern( + [self.get_tblspace_path(self.node, tblspace_name)], + '^.*/\d+$') + self.assertTrue( + list_data_files, + "ERROR: Files of data not found into tablespace dir" + ) + + corrupt_file(random.choice(list_data_files)) + + self.assertRaises( + ProbackupException, + self.backup_node, + self.backup_dir, + 'node', + self.node, + backup_type='full' + ) + + @unittest.expectedFailure + # @unittest.skip("skip") + @unittest.skipUnless(ProbackupTest.enterprise, 'skip') + def test_broken_file_pg_compression_into_tablespace_dir(self): + + corrupted_file = find_by_name( + [self.get_tblspace_path(self.node, tblspace_name)], + ['pg_compression'])[0] + + self.assertTrue( + corrupt_file(corrupted_file), + "ERROR: File is not corrupted or it missing" + ) + + self.assertRaises( + ProbackupException, + self.backup_node, + self.backup_dir, + 'node', + self.node, + backup_type='full' + ) + +# # --- End ---# + + +#class CfsBackupEncTest(CfsBackupNoEncTest): +# # --- Begin --- # +# def setUp(self): +# os.environ["PG_CIPHER_KEY"] = "super_secret_cipher_key" +# super(CfsBackupEncTest, self).setUp() diff --git a/tests/cfs_catchup_test.py b/tests/cfs_catchup_test.py new file mode 100644 index 000000000..43c3f18f1 --- /dev/null +++ b/tests/cfs_catchup_test.py @@ -0,0 +1,117 @@ +import os +import unittest +import random +import shutil + +from .helpers.cfs_helpers import find_by_extensions, find_by_name, find_by_pattern, corrupt_file +from .helpers.ptrack_helpers import ProbackupTest, ProbackupException + + +class CfsCatchupNoEncTest(ProbackupTest, unittest.TestCase): + + @unittest.skipUnless(ProbackupTest.enterprise, 'skip') + def test_full_catchup_with_tablespace(self): + """ + Test tablespace transfers + """ + # preparation + src_pg = self.make_simple_node( + base_dir = os.path.join(self.module_name, self.fname, 'src'), + set_replication = True + ) + src_pg.slow_start() + tblspace1_old_path = self.get_tblspace_path(src_pg, 'tblspace1_old') + self.create_tblspace_in_node(src_pg, 'tblspace1', tblspc_path = tblspace1_old_path, cfs=True) + src_pg.safe_psql( + "postgres", + "CREATE TABLE ultimate_question TABLESPACE tblspace1 AS SELECT 42 AS answer") + src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + src_pg.safe_psql( + "postgres", + "CHECKPOINT") + + # do full catchup with tablespace mapping + dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) + tblspace1_new_path = self.get_tblspace_path(dst_pg, 'tblspace1_new') + self.catchup_node( + backup_mode = 'FULL', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = [ + '-d', 'postgres', + '-p', str(src_pg.port), + '--stream', + '-T', '{0}={1}'.format(tblspace1_old_path, tblspace1_new_path) + ] + ) + + # 1st check: compare data directories + self.compare_pgdata( + self.pgdata_content(src_pg.data_dir), + self.pgdata_content(dst_pg.data_dir) + ) + + # check cfm size + cfms = find_by_extensions([os.path.join(dst_pg.data_dir)], ['.cfm']) + self.assertTrue(cfms, "ERROR: .cfm files not found in backup dir") + for cfm in cfms: + size = os.stat(cfm).st_size + self.assertLessEqual(size, 4096, + "ERROR: {0} is not truncated (has size {1} > 4096)".format( + cfm, size + )) + + # make changes in master tablespace + src_pg.safe_psql( + "postgres", + "UPDATE ultimate_question SET answer = -1") + src_pg.safe_psql( + "postgres", + "CHECKPOINT") + + # run&recover catchup'ed instance + dst_options = {} + dst_options['port'] = str(dst_pg.port) + self.set_auto_conf(dst_pg, dst_options) + dst_pg.slow_start() + + # 2nd check: run verification query + dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy') + + # and now delta backup + dst_pg.stop() + + self.catchup_node( + backup_mode = 'DELTA', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = [ + '-d', 'postgres', + '-p', str(src_pg.port), + '--stream', + '-T', '{0}={1}'.format(tblspace1_old_path, tblspace1_new_path) + ] + ) + + # check cfm size again + cfms = find_by_extensions([os.path.join(dst_pg.data_dir)], ['.cfm']) + self.assertTrue(cfms, "ERROR: .cfm files not found in backup dir") + for cfm in cfms: + size = os.stat(cfm).st_size + self.assertLessEqual(size, 4096, + "ERROR: {0} is not truncated (has size {1} > 4096)".format( + cfm, size + )) + + # run&recover catchup'ed instance + dst_options = {} + dst_options['port'] = str(dst_pg.port) + self.set_auto_conf(dst_pg, dst_options) + dst_pg.slow_start() + + + # 3rd check: run verification query + src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy') diff --git a/tests/cfs_restore_test.py b/tests/cfs_restore_test.py new file mode 100644 index 000000000..6b69b4ffe --- /dev/null +++ b/tests/cfs_restore_test.py @@ -0,0 +1,450 @@ +""" +restore + Syntax: + + pg_probackup restore -B backupdir --instance instance_name + [-D datadir] + [ -i backup_id | [{--time=time | --xid=xid | --lsn=lsn } [--inclusive=boolean]]][--timeline=timeline] [-T OLDDIR=NEWDIR] + [-j num_threads] [--progress] [-q] [-v] + +""" +import os +import unittest +import shutil + +from .helpers.cfs_helpers import find_by_name +from .helpers.ptrack_helpers import ProbackupTest, ProbackupException + +tblspace_name = 'cfs_tblspace' +tblspace_name_new = 'cfs_tblspace_new' + + +class CfsRestoreBase(ProbackupTest, unittest.TestCase): + @unittest.skipUnless(ProbackupTest.enterprise, 'skip') + def setUp(self): + self.backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + + self.node = self.make_simple_node( + base_dir="{0}/{1}/node".format(self.module_name, self.fname), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ +# 'ptrack_enable': 'on', + 'cfs_encryption': 'off', + } + ) + + self.init_pb(self.backup_dir) + self.add_instance(self.backup_dir, 'node', self.node) + self.set_archiving(self.backup_dir, 'node', self.node) + + self.node.slow_start() + self.create_tblspace_in_node(self.node, tblspace_name, cfs=True) + + self.add_data_in_cluster() + + self.backup_id = None + try: + self.backup_id = self.backup_node(self.backup_dir, 'node', self.node, backup_type='full') + except ProbackupException as e: + self.fail( + "ERROR: Full backup failed \n {0} \n {1}".format( + repr(self.cmd), + repr(e.message) + ) + ) + + def add_data_in_cluster(self): + pass + + +class CfsRestoreNoencEmptyTablespaceTest(CfsRestoreBase): + # @unittest.expectedFailure + # @unittest.skip("skip") + @unittest.skipUnless(ProbackupTest.enterprise, 'skip') + def test_restore_empty_tablespace_from_fullbackup(self): + """ + Case: Restore empty tablespace from valid full backup. + """ + self.node.stop(["-m", "immediate"]) + self.node.cleanup() + shutil.rmtree(self.get_tblspace_path(self.node, tblspace_name)) + + try: + self.restore_node(self.backup_dir, 'node', self.node, backup_id=self.backup_id) + except ProbackupException as e: + self.fail( + "ERROR: Restore failed. \n {0} \n {1}".format( + repr(self.cmd), + repr(e.message) + ) + ) + self.assertTrue( + find_by_name([self.get_tblspace_path(self.node, tblspace_name)], ["pg_compression"]), + "ERROR: Restored data is not valid. pg_compression not found in tablespace dir." + ) + + try: + self.node.slow_start() + except ProbackupException as e: + self.fail( + "ERROR: Instance not started after restore. \n {0} \n {1}".format( + repr(self.cmd), + repr(e.message) + ) + ) + tblspace = self.node.safe_psql( + "postgres", + "SELECT * FROM pg_tablespace WHERE spcname='{0}'".format(tblspace_name) + ).decode("UTF-8") + self.assertTrue( + tblspace_name in tblspace and "compression=true" in tblspace, + "ERROR: The tablespace not restored or it restored without compressions" + ) + + +class CfsRestoreNoencTest(CfsRestoreBase): + def add_data_in_cluster(self): + self.node.safe_psql( + "postgres", + 'CREATE TABLE {0} TABLESPACE {1} \ + AS SELECT i AS id, MD5(i::text) AS text, \ + MD5(repeat(i::text,10))::tsvector AS tsvector \ + FROM generate_series(0,1e5) i'.format('t1', tblspace_name) + ) + self.table_t1 = self.node.safe_psql( + "postgres", + "SELECT * FROM t1" + ) + + # --- Restore from full backup ---# + # @unittest.expectedFailure + # @unittest.skip("skip") + @unittest.skipUnless(ProbackupTest.enterprise, 'skip') + def test_restore_from_fullbackup_to_old_location(self): + """ + Case: Restore instance from valid full backup to old location. + """ + self.node.stop() + self.node.cleanup() + shutil.rmtree(self.get_tblspace_path(self.node, tblspace_name)) + + try: + self.restore_node(self.backup_dir, 'node', self.node, backup_id=self.backup_id) + except ProbackupException as e: + self.fail( + "ERROR: Restore from full backup failed. \n {0} \n {1}".format( + repr(self.cmd), + repr(e.message) + ) + ) + + self.assertTrue( + find_by_name([self.get_tblspace_path(self.node, tblspace_name)], ['pg_compression']), + "ERROR: File pg_compression not found in tablespace dir" + ) + try: + self.node.slow_start() + except ProbackupException as e: + self.fail( + "ERROR: Instance not started after restore. \n {0} \n {1}".format( + repr(self.cmd), + repr(e.message) + ) + ) + + self.assertEqual( + repr(self.node.safe_psql("postgres", "SELECT * FROM %s" % 't1')), + repr(self.table_t1) + ) + + # @unittest.expectedFailure + # @unittest.skip("skip") + @unittest.skipUnless(ProbackupTest.enterprise, 'skip') + def test_restore_from_fullbackup_to_old_location_3_jobs(self): + """ + Case: Restore instance from valid full backup to old location. + """ + self.node.stop() + self.node.cleanup() + shutil.rmtree(self.get_tblspace_path(self.node, tblspace_name)) + + try: + self.restore_node(self.backup_dir, 'node', self.node, backup_id=self.backup_id, options=['-j', '3']) + except ProbackupException as e: + self.fail( + "ERROR: Restore from full backup failed. \n {0} \n {1}".format( + repr(self.cmd), + repr(e.message) + ) + ) + self.assertTrue( + find_by_name([self.get_tblspace_path(self.node, tblspace_name)], ['pg_compression']), + "ERROR: File pg_compression not found in backup dir" + ) + try: + self.node.slow_start() + except ProbackupException as e: + self.fail( + "ERROR: Instance not started after restore. \n {0} \n {1}".format( + repr(self.cmd), + repr(e.message) + ) + ) + + self.assertEqual( + repr(self.node.safe_psql("postgres", "SELECT * FROM %s" % 't1')), + repr(self.table_t1) + ) + + # @unittest.expectedFailure + # @unittest.skip("skip") + @unittest.skipUnless(ProbackupTest.enterprise, 'skip') + def test_restore_from_fullbackup_to_new_location(self): + """ + Case: Restore instance from valid full backup to new location. + """ + self.node.stop() + self.node.cleanup() + shutil.rmtree(self.get_tblspace_path(self.node, tblspace_name)) + + node_new = self.make_simple_node(base_dir="{0}/{1}/node_new_location".format(self.module_name, self.fname)) + node_new.cleanup() + + try: + self.restore_node(self.backup_dir, 'node', node_new, backup_id=self.backup_id) + self.set_auto_conf(node_new, {'port': node_new.port}) + except ProbackupException as e: + self.fail( + "ERROR: Restore from full backup failed. \n {0} \n {1}".format( + repr(self.cmd), + repr(e.message) + ) + ) + self.assertTrue( + find_by_name([self.get_tblspace_path(self.node, tblspace_name)], ['pg_compression']), + "ERROR: File pg_compression not found in backup dir" + ) + try: + node_new.slow_start() + except ProbackupException as e: + self.fail( + "ERROR: Instance not started after restore. \n {0} \n {1}".format( + repr(self.cmd), + repr(e.message) + ) + ) + + self.assertEqual( + repr(node_new.safe_psql("postgres", "SELECT * FROM %s" % 't1')), + repr(self.table_t1) + ) + node_new.cleanup() + + # @unittest.expectedFailure + # @unittest.skip("skip") + @unittest.skipUnless(ProbackupTest.enterprise, 'skip') + def test_restore_from_fullbackup_to_new_location_5_jobs(self): + """ + Case: Restore instance from valid full backup to new location. + """ + self.node.stop() + self.node.cleanup() + shutil.rmtree(self.get_tblspace_path(self.node, tblspace_name)) + + node_new = self.make_simple_node(base_dir="{0}/{1}/node_new_location".format(self.module_name, self.fname)) + node_new.cleanup() + + try: + self.restore_node(self.backup_dir, 'node', node_new, backup_id=self.backup_id, options=['-j', '5']) + self.set_auto_conf(node_new, {'port': node_new.port}) + except ProbackupException as e: + self.fail( + "ERROR: Restore from full backup failed. \n {0} \n {1}".format( + repr(self.cmd), + repr(e.message) + ) + ) + self.assertTrue( + find_by_name([self.get_tblspace_path(self.node, tblspace_name)], ['pg_compression']), + "ERROR: File pg_compression not found in backup dir" + ) + try: + node_new.slow_start() + except ProbackupException as e: + self.fail( + "ERROR: Instance not started after restore. \n {0} \n {1}".format( + repr(self.cmd), + repr(e.message) + ) + ) + + self.assertEqual( + repr(node_new.safe_psql("postgres", "SELECT * FROM %s" % 't1')), + repr(self.table_t1) + ) + node_new.cleanup() + + # @unittest.expectedFailure + # @unittest.skip("skip") + @unittest.skipUnless(ProbackupTest.enterprise, 'skip') + def test_restore_from_fullbackup_to_old_location_tablespace_new_location(self): + self.node.stop() + self.node.cleanup() + shutil.rmtree(self.get_tblspace_path(self.node, tblspace_name)) + + os.mkdir(self.get_tblspace_path(self.node, tblspace_name_new)) + + try: + self.restore_node( + self.backup_dir, + 'node', self.node, + backup_id=self.backup_id, + options=["-T", "{0}={1}".format( + self.get_tblspace_path(self.node, tblspace_name), + self.get_tblspace_path(self.node, tblspace_name_new) + ) + ] + ) + except ProbackupException as e: + self.fail( + "ERROR: Restore from full backup failed. \n {0} \n {1}".format( + repr(self.cmd), + repr(e.message) + ) + ) + self.assertTrue( + find_by_name([self.get_tblspace_path(self.node, tblspace_name_new)], ['pg_compression']), + "ERROR: File pg_compression not found in new tablespace location" + ) + try: + self.node.slow_start() + except ProbackupException as e: + self.fail( + "ERROR: Instance not started after restore. \n {0} \n {1}".format( + repr(self.cmd), + repr(e.message) + ) + ) + + self.assertEqual( + repr(self.node.safe_psql("postgres", "SELECT * FROM %s" % 't1')), + repr(self.table_t1) + ) + + # @unittest.expectedFailure + # @unittest.skip("skip") + @unittest.skipUnless(ProbackupTest.enterprise, 'skip') + def test_restore_from_fullbackup_to_old_location_tablespace_new_location_3_jobs(self): + self.node.stop() + self.node.cleanup() + shutil.rmtree(self.get_tblspace_path(self.node, tblspace_name)) + + os.mkdir(self.get_tblspace_path(self.node, tblspace_name_new)) + + try: + self.restore_node( + self.backup_dir, + 'node', self.node, + backup_id=self.backup_id, + options=["-j", "3", "-T", "{0}={1}".format( + self.get_tblspace_path(self.node, tblspace_name), + self.get_tblspace_path(self.node, tblspace_name_new) + ) + ] + ) + except ProbackupException as e: + self.fail( + "ERROR: Restore from full backup failed. \n {0} \n {1}".format( + repr(self.cmd), + repr(e.message) + ) + ) + self.assertTrue( + find_by_name([self.get_tblspace_path(self.node, tblspace_name_new)], ['pg_compression']), + "ERROR: File pg_compression not found in new tablespace location" + ) + try: + self.node.slow_start() + except ProbackupException as e: + self.fail( + "ERROR: Instance not started after restore. \n {0} \n {1}".format( + repr(self.cmd), + repr(e.message) + ) + ) + + self.assertEqual( + repr(self.node.safe_psql("postgres", "SELECT * FROM %s" % 't1')), + repr(self.table_t1) + ) + + # @unittest.expectedFailure + @unittest.skip("skip") + def test_restore_from_fullbackup_to_new_location_tablespace_new_location(self): + pass + + # @unittest.expectedFailure + @unittest.skip("skip") + def test_restore_from_fullbackup_to_new_location_tablespace_new_location_5_jobs(self): + pass + + # @unittest.expectedFailure + @unittest.skip("skip") + def test_restore_from_ptrack(self): + """ + Case: Restore from backup to old location + """ + pass + + # @unittest.expectedFailure + @unittest.skip("skip") + def test_restore_from_ptrack_jobs(self): + """ + Case: Restore from backup to old location, four jobs + """ + pass + + # @unittest.expectedFailure + @unittest.skip("skip") + def test_restore_from_ptrack_new_jobs(self): + pass + +# --------------------------------------------------------- # + # @unittest.expectedFailure + @unittest.skip("skip") + def test_restore_from_page(self): + """ + Case: Restore from backup to old location + """ + pass + + # @unittest.expectedFailure + @unittest.skip("skip") + def test_restore_from_page_jobs(self): + """ + Case: Restore from backup to old location, four jobs + """ + pass + + # @unittest.expectedFailure + @unittest.skip("skip") + def test_restore_from_page_new_jobs(self): + """ + Case: Restore from backup to new location, four jobs + """ + pass + + +#class CfsRestoreEncEmptyTablespaceTest(CfsRestoreNoencEmptyTablespaceTest): +# # --- Begin --- # +# def setUp(self): +# os.environ["PG_CIPHER_KEY"] = "super_secret_cipher_key" +# super(CfsRestoreNoencEmptyTablespaceTest, self).setUp() +# +# +#class CfsRestoreEncTest(CfsRestoreNoencTest): +# # --- Begin --- # +# def setUp(self): +# os.environ["PG_CIPHER_KEY"] = "super_secret_cipher_key" +# super(CfsRestoreNoencTest, self).setUp() diff --git a/tests/cfs_validate_backup_test.py b/tests/cfs_validate_backup_test.py new file mode 100644 index 000000000..343020dfc --- /dev/null +++ b/tests/cfs_validate_backup_test.py @@ -0,0 +1,24 @@ +import os +import unittest +import random + +from .helpers.cfs_helpers import find_by_extensions, find_by_name, find_by_pattern, corrupt_file +from .helpers.ptrack_helpers import ProbackupTest, ProbackupException + +tblspace_name = 'cfs_tblspace' + + +class CfsValidateBackupNoenc(ProbackupTest,unittest.TestCase): + def setUp(self): + pass + + def test_validate_fullbackup_empty_tablespace_after_delete_pg_compression(self): + pass + + def tearDown(self): + pass + + +#class CfsValidateBackupNoenc(CfsValidateBackupNoenc): +# os.environ["PG_CIPHER_KEY"] = "super_secret_cipher_key" +# super(CfsValidateBackupNoenc).setUp() diff --git a/tests/checkdb_test.py b/tests/checkdb_test.py new file mode 100644 index 000000000..2caf4fcb2 --- /dev/null +++ b/tests/checkdb_test.py @@ -0,0 +1,849 @@ +import os +import unittest +from .helpers.ptrack_helpers import ProbackupTest, ProbackupException +from datetime import datetime, timedelta +import subprocess +from testgres import QueryException +import shutil +import sys +import time + + +class CheckdbTest(ProbackupTest, unittest.TestCase): + + # @unittest.skip("skip") + def test_checkdb_amcheck_only_sanity(self): + """""" + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir="{0}/{1}/node".format(self.module_name, self.fname), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + "postgres", + "create table t_heap as select i" + " as id from generate_series(0,100) i") + + node.safe_psql( + "postgres", + "create index on t_heap(id)") + + node.safe_psql( + "postgres", + "create table idxpart (a int) " + "partition by range (a)") + + node.safe_psql( + "postgres", + "create index on idxpart(a)") + + try: + node.safe_psql( + "postgres", + "create extension amcheck") + except QueryException as e: + node.safe_psql( + "postgres", + "create extension amcheck_next") + + log_file_path = os.path.join( + backup_dir, 'log', 'pg_probackup.log') + + # simple sanity + try: + self.checkdb_node( + options=['--skip-block-validation']) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because --amcheck option is missing\n" + " Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + "ERROR: Option '--skip-block-validation' must be " + "used with '--amcheck' option", + e.message, + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) + + # simple sanity + output = self.checkdb_node( + options=[ + '--amcheck', + '--skip-block-validation', + '-d', 'postgres', '-p', str(node.port)]) + + self.assertIn( + 'INFO: checkdb --amcheck finished successfully', + output) + self.assertIn( + 'All checked indexes are valid', + output) + + # logging to file sanity + try: + self.checkdb_node( + options=[ + '--amcheck', + '--skip-block-validation', + '--log-level-file=verbose', + '-d', 'postgres', '-p', str(node.port)]) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because log_directory missing\n" + " Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + "ERROR: Cannot save checkdb logs to a file. " + "You must specify --log-directory option when " + "running checkdb with --log-level-file option enabled", + e.message, + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) + + # If backup_dir provided, then instance name must be + # provided too + try: + self.checkdb_node( + backup_dir, + options=[ + '--amcheck', + '--skip-block-validation', + '--log-level-file=verbose', + '-d', 'postgres', '-p', str(node.port)]) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because log_directory missing\n" + " Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + "ERROR: required parameter not specified: --instance", + e.message, + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) + + # checkdb can use default or set in config values, + # if backup_dir and instance name are provided + self.checkdb_node( + backup_dir, + 'node', + options=[ + '--amcheck', + '--skip-block-validation', + '--log-level-file=verbose', + '-d', 'postgres', '-p', str(node.port)]) + + # check that file present and full of messages + os.path.isfile(log_file_path) + with open(log_file_path) as f: + log_file_content = f.read() + self.assertIn( + 'INFO: checkdb --amcheck finished successfully', + log_file_content) + self.assertIn( + 'VERBOSE: (query)', + log_file_content) + os.unlink(log_file_path) + + # log-level-file and log-directory are provided + self.checkdb_node( + backup_dir, + 'node', + options=[ + '--amcheck', + '--skip-block-validation', + '--log-level-file=verbose', + '--log-directory={0}'.format( + os.path.join(backup_dir, 'log')), + '-d', 'postgres', '-p', str(node.port)]) + + # check that file present and full of messages + os.path.isfile(log_file_path) + with open(log_file_path) as f: + log_file_content = f.read() + self.assertIn( + 'INFO: checkdb --amcheck finished successfully', + log_file_content) + self.assertIn( + 'VERBOSE: (query)', + log_file_content) + os.unlink(log_file_path) + + gdb = self.checkdb_node( + gdb=True, + options=[ + '--amcheck', + '--skip-block-validation', + '--log-level-file=verbose', + '--log-directory={0}'.format( + os.path.join(backup_dir, 'log')), + '-d', 'postgres', '-p', str(node.port)]) + + gdb.set_breakpoint('amcheck_one_index') + gdb.run_until_break() + + node.safe_psql( + "postgres", + "drop table t_heap") + + gdb.remove_all_breakpoints() + + gdb.continue_execution_until_exit() + + # check that message about missing index is present + with open(log_file_path) as f: + log_file_content = f.read() + self.assertIn( + 'ERROR: checkdb --amcheck finished with failure', + log_file_content) + self.assertIn( + "WARNING: Thread [1]. Amcheck failed in database 'postgres' " + "for index: 'public.t_heap_id_idx':", + log_file_content) + self.assertIn( + 'ERROR: could not open relation with OID', + log_file_content) + + # Clean after yourself + gdb.kill() + node.stop() + + # @unittest.skip("skip") + def test_basic_checkdb_amcheck_only_sanity(self): + """""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir="{0}/{1}/node".format(self.module_name, self.fname), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + # create two databases + node.safe_psql("postgres", "create database db1") + try: + node.safe_psql( + "db1", + "create extension amcheck") + except QueryException as e: + node.safe_psql( + "db1", + "create extension amcheck_next") + + node.safe_psql("postgres", "create database db2") + try: + node.safe_psql( + "db2", + "create extension amcheck") + except QueryException as e: + node.safe_psql( + "db2", + "create extension amcheck_next") + + # init pgbench in two databases and corrupt both indexes + node.pgbench_init(scale=5, dbname='db1') + node.pgbench_init(scale=5, dbname='db2') + + node.safe_psql( + "db2", + "alter index pgbench_accounts_pkey rename to some_index") + + index_path_1 = os.path.join( + node.data_dir, + node.safe_psql( + "db1", + "select pg_relation_filepath('pgbench_accounts_pkey')").decode('utf-8').rstrip()) + + index_path_2 = os.path.join( + node.data_dir, + node.safe_psql( + "db2", + "select pg_relation_filepath('some_index')").decode('utf-8').rstrip()) + + try: + self.checkdb_node( + options=[ + '--amcheck', + '--skip-block-validation', + '-d', 'postgres', '-p', str(node.port)]) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because some db was not amchecked" + " Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + "ERROR: Some databases were not amchecked", + e.message, + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) + + node.stop() + + # Let`s do index corruption + with open(index_path_1, "rb+", 0) as f: + f.seek(42000) + f.write(b"blablahblahs") + f.flush() + f.close + + with open(index_path_2, "rb+", 0) as f: + f.seek(42000) + f.write(b"blablahblahs") + f.flush() + f.close + + node.slow_start() + + log_file_path = os.path.join( + backup_dir, 'log', 'pg_probackup.log') + + try: + self.checkdb_node( + options=[ + '--amcheck', + '--skip-block-validation', + '--log-level-file=verbose', + '--log-directory={0}'.format( + os.path.join(backup_dir, 'log')), + '-d', 'postgres', '-p', str(node.port)]) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because some db was not amchecked" + " Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + "ERROR: checkdb --amcheck finished with failure", + e.message, + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) + + # corruption of both indexes in db1 and db2 must be detected + # also the that amcheck is not installed in 'postgres' + # should be logged + with open(log_file_path) as f: + log_file_content = f.read() + self.assertIn( + "WARNING: Thread [1]. Amcheck failed in database 'db1' " + "for index: 'public.pgbench_accounts_pkey':", + log_file_content) + + self.assertIn( + "WARNING: Thread [1]. Amcheck failed in database 'db2' " + "for index: 'public.some_index':", + log_file_content) + + self.assertIn( + "ERROR: checkdb --amcheck finished with failure", + log_file_content) + + # Clean after yourself + node.stop() + + # @unittest.skip("skip") + def test_checkdb_block_validation_sanity(self): + """make node, corrupt some pages, check that checkdb failed""" + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + "postgres", + "create table t_heap as select 1 as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,1000) i") + node.safe_psql( + "postgres", + "CHECKPOINT;") + + heap_path = node.safe_psql( + "postgres", + "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip() + + # sanity + try: + self.checkdb_node() + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because pgdata must be specified\n" + " Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + "ERROR: required parameter not specified: PGDATA (-D, --pgdata)", + e.message, + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) + + self.checkdb_node( + data_dir=node.data_dir, + options=['-d', 'postgres', '-p', str(node.port)]) + + self.checkdb_node( + backup_dir, 'node', + options=['-d', 'postgres', '-p', str(node.port)]) + + heap_full_path = os.path.join(node.data_dir, heap_path) + + with open(heap_full_path, "rb+", 0) as f: + f.seek(9000) + f.write(b"bla") + f.flush() + f.close + + with open(heap_full_path, "rb+", 0) as f: + f.seek(42000) + f.write(b"bla") + f.flush() + f.close + + try: + self.checkdb_node( + backup_dir, 'node', + options=['-d', 'postgres', '-p', str(node.port)]) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because of data corruption\n" + " Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + "ERROR: Checkdb failed", + e.message, + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) + + self.assertIn( + 'WARNING: Corruption detected in file "{0}", block 1'.format( + os.path.normpath(heap_full_path)), + e.message) + + self.assertIn( + 'WARNING: Corruption detected in file "{0}", block 5'.format( + os.path.normpath(heap_full_path)), + e.message) + + # Clean after yourself + node.stop() + + def test_checkdb_checkunique(self): + """Test checkunique parameter of amcheck.bt_index_check function""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + node.slow_start() + + try: + node.safe_psql( + "postgres", + "create extension amcheck") + except QueryException as e: + node.safe_psql( + "postgres", + "create extension amcheck_next") + + # Part of https://commitfest.postgresql.org/32/2976/ patch test + node.safe_psql( + "postgres", + "CREATE TABLE bttest_unique(a varchar(50), b varchar(1500), c bytea, d varchar(50)); " + "ALTER TABLE bttest_unique SET (autovacuum_enabled = false); " + "CREATE UNIQUE INDEX bttest_unique_idx ON bttest_unique(a,b); " + "UPDATE pg_catalog.pg_index SET indisunique = false " + "WHERE indrelid = (SELECT oid FROM pg_catalog.pg_class WHERE relname = 'bttest_unique'); " + "INSERT INTO bttest_unique " + " SELECT i::text::varchar, " + " array_to_string(array( " + " SELECT substr('ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789', ((random()*(36-1)+1)::integer), 1) " + " FROM generate_series(1,1300)),'')::varchar, " + " i::text::bytea, i::text::varchar " + " FROM generate_series(0,1) AS i, generate_series(0,30) AS x; " + "UPDATE pg_catalog.pg_index SET indisunique = true " + "WHERE indrelid = (SELECT oid FROM pg_catalog.pg_class WHERE relname = 'bttest_unique'); " + "DELETE FROM bttest_unique WHERE ctid::text='(0,2)'; " + "DELETE FROM bttest_unique WHERE ctid::text='(4,2)'; " + "DELETE FROM bttest_unique WHERE ctid::text='(4,3)'; " + "DELETE FROM bttest_unique WHERE ctid::text='(9,3)';") + + # run without checkunique option (error will not detected) + output = self.checkdb_node( + options=[ + '--amcheck', + '--skip-block-validation', + '-d', 'postgres', '-p', str(node.port)]) + + self.assertIn( + 'INFO: checkdb --amcheck finished successfully', + output) + self.assertIn( + 'All checked indexes are valid', + output) + + # run with checkunique option + try: + self.checkdb_node( + options=[ + '--amcheck', + '--skip-block-validation', + '--checkunique', + '-d', 'postgres', '-p', str(node.port)]) + if (ProbackupTest.enterprise and + (self.get_version(node) >= 111300 and self.get_version(node) < 120000 + or self.get_version(node) >= 120800 and self.get_version(node) < 130000 + or self.get_version(node) >= 130400)): + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because of index corruption\n" + " Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + else: + self.assertRegex( + self.output, + r"WARNING: Extension 'amcheck(|_next)' version [\d.]* in schema 'public' do not support 'checkunique' parameter") + except ProbackupException as e: + self.assertIn( + "ERROR: checkdb --amcheck finished with failure. Not all checked indexes are valid. All databases were amchecked.", + e.message, + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) + + self.assertIn( + "Amcheck failed in database 'postgres' for index: 'public.bttest_unique_idx': ERROR: index \"bttest_unique_idx\" is corrupted. There are tuples violating UNIQUE constraint", + e.message) + + # Clean after yourself + node.stop() + + # @unittest.skip("skip") + def test_checkdb_sigint_handling(self): + """""" + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + try: + node.safe_psql( + "postgres", + "create extension amcheck") + except QueryException as e: + node.safe_psql( + "postgres", + "create extension amcheck_next") + + # FULL backup + gdb = self.checkdb_node( + backup_dir, 'node', gdb=True, + options=[ + '-d', 'postgres', '-j', '2', + '--skip-block-validation', + '--progress', + '--amcheck', '-p', str(node.port)]) + + gdb.set_breakpoint('amcheck_one_index') + gdb.run_until_break() + + gdb.continue_execution_until_break(20) + gdb.remove_all_breakpoints() + + gdb._execute('signal SIGINT') + gdb.continue_execution_until_error() + + with open(node.pg_log_file, 'r') as f: + output = f.read() + + self.assertNotIn('could not receive data from client', output) + self.assertNotIn('could not send data to client', output) + self.assertNotIn('connection to client lost', output) + + # Clean after yourself + gdb.kill() + node.stop() + + # @unittest.skip("skip") + def test_checkdb_with_least_privileges(self): + """""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + 'postgres', + 'CREATE DATABASE backupdb') + + try: + node.safe_psql( + "backupdb", + "create extension amcheck") + except QueryException as e: + node.safe_psql( + "backupdb", + "create extension amcheck_next") + + node.safe_psql( + 'backupdb', + "REVOKE ALL ON DATABASE backupdb from PUBLIC; " + "REVOKE ALL ON SCHEMA public from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC;") + + # PG 9.5 + if self.get_version(node) < 90600: + node.safe_psql( + 'backupdb', + 'CREATE ROLE backup WITH LOGIN; ' + 'GRANT CONNECT ON DATABASE backupdb to backup; ' + 'GRANT USAGE ON SCHEMA pg_catalog TO backup; ' + 'GRANT USAGE ON SCHEMA public TO backup; ' + 'GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; ' + 'GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; ' + 'GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; ' + 'GRANT SELECT ON TABLE pg_catalog.pg_am TO backup; ' + 'GRANT SELECT ON TABLE pg_catalog.pg_class TO backup; ' + 'GRANT SELECT ON TABLE pg_catalog.pg_index TO backup; ' + 'GRANT SELECT ON TABLE pg_catalog.pg_namespace TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.texteq(text, text) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.namene(name, name) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.int8(integer) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.charne("char", "char") TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.string_to_array(text, text) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.array_position(anyarray, anyelement) TO backup; ' + 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool) TO backup;') # amcheck-next function + + # PG 9.6 + elif self.get_version(node) > 90600 and self.get_version(node) < 100000: + node.safe_psql( + 'backupdb', + 'CREATE ROLE backup WITH LOGIN; ' + 'GRANT CONNECT ON DATABASE backupdb to backup; ' + 'GRANT USAGE ON SCHEMA pg_catalog TO backup; ' + 'GRANT USAGE ON SCHEMA public TO backup; ' + 'GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; ' + 'GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; ' + 'GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; ' + 'GRANT SELECT ON TABLE pg_catalog.pg_am TO backup; ' + 'GRANT SELECT ON TABLE pg_catalog.pg_class TO backup; ' + 'GRANT SELECT ON TABLE pg_catalog.pg_index TO backup; ' + 'GRANT SELECT ON TABLE pg_catalog.pg_namespace TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.texteq(text, text) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.namene(name, name) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.int8(integer) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.charne("char", "char") TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.string_to_array(text, text) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.array_position(anyarray, anyelement) TO backup; ' +# 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass) TO backup; ' + 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool) TO backup;') + + # PG 10 + elif self.get_version(node) > 100000 and self.get_version(node) < 110000: + node.safe_psql( + 'backupdb', + 'CREATE ROLE backup WITH LOGIN; ' + 'GRANT CONNECT ON DATABASE backupdb to backup; ' + 'GRANT USAGE ON SCHEMA pg_catalog TO backup; ' + 'GRANT USAGE ON SCHEMA public TO backup; ' + 'GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; ' + 'GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; ' + 'GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; ' + 'GRANT SELECT ON TABLE pg_catalog.pg_am TO backup; ' + 'GRANT SELECT ON TABLE pg_catalog.pg_class TO backup; ' + 'GRANT SELECT ON TABLE pg_catalog.pg_index TO backup; ' + 'GRANT SELECT ON TABLE pg_catalog.pg_namespace TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.texteq(text, text) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.namene(name, name) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.int8(integer) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.charne("char", "char") TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.string_to_array(text, text) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.array_position(anyarray, anyelement) TO backup;' + 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass) TO backup;') + + if ProbackupTest.enterprise: + # amcheck-1.1 + node.safe_psql( + 'backupdb', + 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool) TO backup') + else: + # amcheck-1.0 + node.safe_psql( + 'backupdb', + 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass) TO backup') + # >= 11 < 14 + elif self.get_version(node) > 110000 and self.get_version(node) < 140000: + node.safe_psql( + 'backupdb', + 'CREATE ROLE backup WITH LOGIN; ' + 'GRANT CONNECT ON DATABASE backupdb to backup; ' + 'GRANT USAGE ON SCHEMA pg_catalog TO backup; ' + 'GRANT USAGE ON SCHEMA public TO backup; ' + 'GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; ' + 'GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; ' + 'GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; ' + 'GRANT SELECT ON TABLE pg_catalog.pg_am TO backup; ' + 'GRANT SELECT ON TABLE pg_catalog.pg_class TO backup; ' + 'GRANT SELECT ON TABLE pg_catalog.pg_index TO backup; ' + 'GRANT SELECT ON TABLE pg_catalog.pg_namespace TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.texteq(text, text) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.namene(name, name) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.int8(integer) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.charne("char", "char") TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.string_to_array(text, text) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.array_position(anyarray, anyelement) TO backup; ' + 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass) TO backup; ' + 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool) TO backup;') + + # checkunique parameter + if ProbackupTest.enterprise: + if (self.get_version(node) >= 111300 and self.get_version(node) < 120000 + or self.get_version(node) >= 120800 and self.get_version(node) < 130000 + or self.get_version(node) >= 130400): + node.safe_psql( + "backupdb", + "GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool, bool) TO backup") + # >= 14 + else: + node.safe_psql( + 'backupdb', + 'CREATE ROLE backup WITH LOGIN; ' + 'GRANT CONNECT ON DATABASE backupdb to backup; ' + 'GRANT USAGE ON SCHEMA pg_catalog TO backup; ' + 'GRANT USAGE ON SCHEMA public TO backup; ' + 'GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; ' + 'GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; ' + 'GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; ' + 'GRANT SELECT ON TABLE pg_catalog.pg_am TO backup; ' + 'GRANT SELECT ON TABLE pg_catalog.pg_class TO backup; ' + 'GRANT SELECT ON TABLE pg_catalog.pg_index TO backup; ' + 'GRANT SELECT ON TABLE pg_catalog.pg_namespace TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.texteq(text, text) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.namene(name, name) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.int8(integer) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.charne("char", "char") TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.string_to_array(text, text) TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.array_position(anycompatiblearray, anycompatible) TO backup; ' + 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass) TO backup; ' + 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool) TO backup;') + + # checkunique parameter + if ProbackupTest.enterprise: + node.safe_psql( + "backupdb", + "GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool, bool) TO backup") + + if ProbackupTest.enterprise: + node.safe_psql( + 'backupdb', + 'GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;') + + # checkdb + try: + self.checkdb_node( + backup_dir, 'node', + options=[ + '--amcheck', '-U', 'backup', + '-d', 'backupdb', '-p', str(node.port)]) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because permissions are missing\n" + " Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + "INFO: Amcheck succeeded for database 'backupdb'", + e.message, + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) + + self.assertIn( + "WARNING: Extension 'amcheck' or 'amcheck_next' are " + "not installed in database postgres", + e.message, + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) + + self.assertIn( + "ERROR: Some databases were not amchecked", + e.message, + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) + + # Clean after yourself + node.stop() diff --git a/tests/compatibility_test.py b/tests/compatibility_test.py new file mode 100644 index 000000000..591afb069 --- /dev/null +++ b/tests/compatibility_test.py @@ -0,0 +1,1500 @@ +import unittest +import subprocess +import os +from .helpers.ptrack_helpers import ProbackupTest, ProbackupException +from sys import exit +import shutil + + +def check_manual_tests_enabled(): + return 'PGPROBACKUP_MANUAL' in os.environ and os.environ['PGPROBACKUP_MANUAL'] == 'ON' + + +def check_ssh_agent_path_exists(): + return 'PGPROBACKUP_SSH_AGENT_PATH' in os.environ + + +class CompatibilityTest(ProbackupTest, unittest.TestCase): + + def setUp(self): + self.fname = self.id().split('.')[3] + + # @unittest.expectedFailure + @unittest.skipUnless(check_manual_tests_enabled(), 'skip manual test') + @unittest.skipUnless(check_ssh_agent_path_exists(), 'skip no ssh agent path exist') + # @unittest.skip("skip") + def test_catchup_with_different_remote_major_pg(self): + """ + Decription in jira issue PBCKP-236 + This test exposures ticket error using pg_probackup builds for both PGPROEE11 and PGPROEE9_6 + + Prerequisites: + - pg_probackup git tag for PBCKP 2.5.1 + - master pg_probackup build should be made for PGPROEE11 + - agent pg_probackup build should be made for PGPROEE9_6 + + Calling probackup PGPROEE9_6 pg_probackup agent from PGPROEE11 pg_probackup master for DELTA backup causes + the PBCKP-236 problem + + Please give env variables PROBACKUP_MANUAL=ON;PGPROBACKUP_SSH_AGENT_PATH= + for the test + + Please make path for agent's pgprobackup_ssh_agent_path = '/home/avaness/postgres/postgres.build.ee.9.6/bin/' + without pg_probackup executable + """ + + self.verbose = True + self.remote = True + # please use your own local path like + # pgprobackup_ssh_agent_path = '/home/avaness/postgres/postgres.build.clean/bin/' + pgprobackup_ssh_agent_path = os.environ['PGPROBACKUP_SSH_AGENT_PATH'] + + src_pg = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'src'), + set_replication=True, + ) + src_pg.slow_start() + src_pg.safe_psql( + "postgres", + "CREATE TABLE ultimate_question AS SELECT 42 AS answer") + + # do full catchup + dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) + self.catchup_node( + backup_mode='FULL', + source_pgdata=src_pg.data_dir, + destination_node=dst_pg, + options=['-d', 'postgres', '-p', str(src_pg.port), '--stream'] + ) + + dst_options = {'port': str(dst_pg.port)} + self.set_auto_conf(dst_pg, dst_options) + dst_pg.slow_start() + dst_pg.stop() + + src_pg.safe_psql( + "postgres", + "CREATE TABLE ultimate_question2 AS SELECT 42 AS answer") + + # do delta catchup with remote pg_probackup agent with another postgres major version + # this DELTA backup should fail without PBCKP-236 patch. + self.catchup_node( + backup_mode='DELTA', + source_pgdata=src_pg.data_dir, + destination_node=dst_pg, + # here's substitution of --remoge-path pg_probackup agent compiled with another postgres version + options=['-d', 'postgres', '-p', str(src_pg.port), '--stream', '--remote-path=' + pgprobackup_ssh_agent_path] + ) + + # @unittest.expectedFailure + # @unittest.skip("skip") + def test_backward_compatibility_page(self): + """Description in jira issue PGPRO-434""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir, old_binary=True) + self.show_pb(backup_dir) + + self.add_instance(backup_dir, 'node', node, old_binary=True) + self.show_pb(backup_dir) + + self.set_archiving(backup_dir, 'node', node, old_binary=True) + node.slow_start() + + node.pgbench_init(scale=10) + + # FULL backup with old binary + self.backup_node( + backup_dir, 'node', node, old_binary=True) + + if self.paranoia: + pgdata = self.pgdata_content(node.data_dir) + + self.show_pb(backup_dir) + + self.validate_pb(backup_dir) + + # RESTORE old FULL with new binary + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + + node_restored.cleanup() + + self.restore_node( + backup_dir, 'node', node_restored, options=["-j", "4"]) + + if self.paranoia: + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # Page BACKUP with old binary + pgbench = node.pgbench( + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + options=["-c", "4", "-T", "20"] + ) + pgbench.wait() + pgbench.stdout.close() + + self.backup_node( + backup_dir, 'node', node, backup_type='page', + old_binary=True) + + if self.paranoia: + pgdata = self.pgdata_content(node.data_dir) + + node_restored.cleanup() + self.restore_node( + backup_dir, 'node', node_restored, options=["-j", "4"]) + + if self.paranoia: + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # Page BACKUP with new binary + pgbench = node.pgbench( + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + options=["-c", "4", "-T", "20"]) + + pgbench.wait() + pgbench.stdout.close() + + self.backup_node( + backup_dir, 'node', node, backup_type='page') + + if self.paranoia: + pgdata = self.pgdata_content(node.data_dir) + + node_restored.cleanup() + + self.restore_node( + backup_dir, 'node', node_restored, options=["-j", "4"]) + + if self.paranoia: + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + node.safe_psql( + 'postgres', + 'create table tmp as select * from pgbench_accounts where aid < 1000') + + node.safe_psql( + 'postgres', + 'delete from pgbench_accounts') + + node.safe_psql( + 'postgres', + 'VACUUM') + + self.backup_node(backup_dir, 'node', node, backup_type='page') + + pgdata = self.pgdata_content(node.data_dir) + + node_restored.cleanup() + self.restore_node( + backup_dir, 'node', node_restored, options=["-j", "4"]) + + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + node.safe_psql( + 'postgres', + 'insert into pgbench_accounts select * from pgbench_accounts') + + self.backup_node(backup_dir, 'node', node, backup_type='page') + + pgdata = self.pgdata_content(node.data_dir) + + node_restored.cleanup() + self.restore_node( + backup_dir, 'node', node_restored, options=["-j", "4"]) + + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.expectedFailure + # @unittest.skip("skip") + def test_backward_compatibility_delta(self): + """Description in jira issue PGPRO-434""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir, old_binary=True) + self.show_pb(backup_dir) + + self.add_instance(backup_dir, 'node', node, old_binary=True) + self.show_pb(backup_dir) + + self.set_archiving(backup_dir, 'node', node, old_binary=True) + node.slow_start() + + node.pgbench_init(scale=10) + + # FULL backup with old binary + self.backup_node( + backup_dir, 'node', node, old_binary=True) + + if self.paranoia: + pgdata = self.pgdata_content(node.data_dir) + + self.show_pb(backup_dir) + + self.validate_pb(backup_dir) + + # RESTORE old FULL with new binary + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + + node_restored.cleanup() + + self.restore_node( + backup_dir, 'node', node_restored, options=["-j", "4"]) + + if self.paranoia: + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # Delta BACKUP with old binary + pgbench = node.pgbench( + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + options=["-c", "4", "-T", "20"] + ) + pgbench.wait() + pgbench.stdout.close() + + self.backup_node( + backup_dir, 'node', node, backup_type='delta', + old_binary=True) + + if self.paranoia: + pgdata = self.pgdata_content(node.data_dir) + + node_restored.cleanup() + self.restore_node( + backup_dir, 'node', node_restored, options=["-j", "4"]) + + if self.paranoia: + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # Delta BACKUP with new binary + pgbench = node.pgbench( + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + options=["-c", "4", "-T", "20"] + ) + pgbench.wait() + pgbench.stdout.close() + + self.backup_node(backup_dir, 'node', node, backup_type='delta') + + if self.paranoia: + pgdata = self.pgdata_content(node.data_dir) + + node_restored.cleanup() + + self.restore_node( + backup_dir, 'node', node_restored, options=["-j", "4"]) + + if self.paranoia: + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + node.safe_psql( + 'postgres', + 'create table tmp as select * from pgbench_accounts where aid < 1000') + + node.safe_psql( + 'postgres', + 'delete from pgbench_accounts') + + node.safe_psql( + 'postgres', + 'VACUUM') + + self.backup_node(backup_dir, 'node', node, backup_type='delta') + + pgdata = self.pgdata_content(node.data_dir) + + node_restored.cleanup() + self.restore_node( + backup_dir, 'node', node_restored, options=["-j", "4"]) + + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + node.safe_psql( + 'postgres', + 'insert into pgbench_accounts select * from pgbench_accounts') + + self.backup_node(backup_dir, 'node', node, backup_type='delta') + + pgdata = self.pgdata_content(node.data_dir) + + node_restored.cleanup() + self.restore_node( + backup_dir, 'node', node_restored, options=["-j", "4"]) + + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.expectedFailure + # @unittest.skip("skip") + def test_backward_compatibility_ptrack(self): + """Description in jira issue PGPRO-434""" + + if not self.ptrack: + self.skipTest('Skipped because ptrack support is disabled') + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + ptrack_enable=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir, old_binary=True) + self.show_pb(backup_dir) + + self.add_instance(backup_dir, 'node', node, old_binary=True) + self.show_pb(backup_dir) + + self.set_archiving(backup_dir, 'node', node, old_binary=True) + node.slow_start() + + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") + + node.pgbench_init(scale=10) + + # FULL backup with old binary + self.backup_node( + backup_dir, 'node', node, old_binary=True) + + if self.paranoia: + pgdata = self.pgdata_content(node.data_dir) + + self.show_pb(backup_dir) + + self.validate_pb(backup_dir) + + # RESTORE old FULL with new binary + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + + node_restored.cleanup() + + self.restore_node( + backup_dir, 'node', node_restored, options=["-j", "4"]) + + if self.paranoia: + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # ptrack BACKUP with old binary + pgbench = node.pgbench( + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + options=["-c", "4", "-T", "20"] + ) + pgbench.wait() + pgbench.stdout.close() + + self.backup_node( + backup_dir, 'node', node, backup_type='ptrack', + old_binary=True) + + if self.paranoia: + pgdata = self.pgdata_content(node.data_dir) + + node_restored.cleanup() + self.restore_node( + backup_dir, 'node', node_restored, + options=[ + "-j", "4", + "--recovery-target=latest", + "--recovery-target-action=promote"]) + + if self.paranoia: + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # Ptrack BACKUP with new binary + pgbench = node.pgbench( + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + options=["-c", "4", "-T", "20"] + ) + pgbench.wait() + pgbench.stdout.close() + + self.backup_node( + backup_dir, 'node', node, backup_type='ptrack') + + if self.paranoia: + pgdata = self.pgdata_content(node.data_dir) + + node_restored.cleanup() + + self.restore_node( + backup_dir, 'node', node_restored, + options=[ + "-j", "4", + "--recovery-target=latest", + "--recovery-target-action=promote"]) + + if self.paranoia: + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.expectedFailure + # @unittest.skip("skip") + def test_backward_compatibility_compression(self): + """Description in jira issue PGPRO-434""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir, old_binary=True) + self.add_instance(backup_dir, 'node', node, old_binary=True) + + self.set_archiving(backup_dir, 'node', node, old_binary=True) + node.slow_start() + + node.pgbench_init(scale=10) + + # FULL backup with OLD binary + backup_id = self.backup_node( + backup_dir, 'node', node, + old_binary=True, + options=['--compress']) + + if self.paranoia: + pgdata = self.pgdata_content(node.data_dir) + + # restore OLD FULL with new binary + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + + node_restored.cleanup() + + self.restore_node( + backup_dir, 'node', node_restored, + options=["-j", "4"]) + + if self.paranoia: + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # PAGE backup with OLD binary + pgbench = node.pgbench( + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + options=["-c", "4", "-T", "10"]) + pgbench.wait() + pgbench.stdout.close() + + self.backup_node( + backup_dir, 'node', node, + backup_type='page', + old_binary=True, + options=['--compress']) + + if self.paranoia: + pgdata = self.pgdata_content(node.data_dir) + + node_restored.cleanup() + self.restore_node( + backup_dir, 'node', node_restored, + options=["-j", "4"]) + + if self.paranoia: + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # PAGE backup with new binary + pgbench = node.pgbench( + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + options=["-c", "4", "-T", "10"]) + pgbench.wait() + pgbench.stdout.close() + + self.backup_node( + backup_dir, 'node', node, + backup_type='page', + options=['--compress']) + + if self.paranoia: + pgdata = self.pgdata_content(node.data_dir) + + node_restored.cleanup() + + self.restore_node( + backup_dir, 'node', node_restored, + options=["-j", "4"]) + + if self.paranoia: + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # Delta backup with old binary + self.delete_pb(backup_dir, 'node', backup_id) + + self.backup_node( + backup_dir, 'node', node, + old_binary=True, + options=['--compress']) + + pgbench = node.pgbench( + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + options=["-c", "4", "-T", "10"]) + + pgbench.wait() + pgbench.stdout.close() + + self.backup_node( + backup_dir, 'node', node, + backup_type='delta', + options=['--compress'], + old_binary=True) + + if self.paranoia: + pgdata = self.pgdata_content(node.data_dir) + + node_restored.cleanup() + + self.restore_node( + backup_dir, 'node', node_restored, + options=["-j", "4"]) + + if self.paranoia: + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # Delta backup with new binary + pgbench = node.pgbench( + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + options=["-c", "4", "-T", "10"]) + + pgbench.wait() + pgbench.stdout.close() + + self.backup_node( + backup_dir, 'node', node, + backup_type='delta', + options=['--compress']) + + if self.paranoia: + pgdata = self.pgdata_content(node.data_dir) + + node_restored.cleanup() + + self.restore_node( + backup_dir, 'node', node_restored, + options=["-j", "4"]) + + if self.paranoia: + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.expectedFailure + # @unittest.skip("skip") + def test_backward_compatibility_merge(self): + """ + Create node, take FULL and PAGE backups with old binary, + merge them with new binary + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir, old_binary=True) + self.add_instance(backup_dir, 'node', node, old_binary=True) + + self.set_archiving(backup_dir, 'node', node, old_binary=True) + node.slow_start() + + # FULL backup with OLD binary + self.backup_node( + backup_dir, 'node', node, + old_binary=True) + + node.pgbench_init(scale=1) + + # PAGE backup with OLD binary + backup_id = self.backup_node( + backup_dir, 'node', node, + backup_type='page', old_binary=True) + + if self.paranoia: + pgdata = self.pgdata_content(node.data_dir) + + self.merge_backup(backup_dir, "node", backup_id) + + self.show_pb(backup_dir, as_text=True, as_json=False) + + # restore OLD FULL with new binary + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + + node_restored.cleanup() + + self.restore_node( + backup_dir, 'node', node_restored, options=["-j", "4"]) + + if self.paranoia: + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.expectedFailure + # @unittest.skip("skip") + def test_backward_compatibility_merge_1(self): + """ + Create node, take FULL and PAGE backups with old binary, + merge them with new binary. + old binary version =< 2.2.7 + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir, old_binary=True) + self.add_instance(backup_dir, 'node', node, old_binary=True) + + self.set_archiving(backup_dir, 'node', node, old_binary=True) + node.slow_start() + + node.pgbench_init(scale=20) + + # FULL backup with OLD binary + self.backup_node(backup_dir, 'node', node, old_binary=True) + + pgbench = node.pgbench( + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + options=["-c", "1", "-T", "10", "--no-vacuum"]) + pgbench.wait() + pgbench.stdout.close() + + # PAGE1 backup with OLD binary + self.backup_node( + backup_dir, 'node', node, backup_type='page', old_binary=True) + + node.safe_psql( + 'postgres', + 'DELETE from pgbench_accounts') + + node.safe_psql( + 'postgres', + 'VACUUM pgbench_accounts') + + # PAGE2 backup with OLD binary + backup_id = self.backup_node( + backup_dir, 'node', node, backup_type='page', old_binary=True) + + pgdata = self.pgdata_content(node.data_dir) + + # merge chain created by old binary with new binary + output = self.merge_backup(backup_dir, "node", backup_id) + + # check that in-place is disabled + self.assertIn( + "WARNING: In-place merge is disabled " + "because of storage format incompatibility", output) + + # restore merged backup + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored.cleanup() + + self.restore_node(backup_dir, 'node', node_restored) + + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.expectedFailure + # @unittest.skip("skip") + def test_backward_compatibility_merge_2(self): + """ + Create node, take FULL and PAGE backups with old binary, + merge them with new binary. + old binary version =< 2.2.7 + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir, old_binary=True) + self.add_instance(backup_dir, 'node', node, old_binary=True) + + self.set_archiving(backup_dir, 'node', node, old_binary=True) + node.slow_start() + + node.pgbench_init(scale=50) + + node.safe_psql( + 'postgres', + 'VACUUM pgbench_accounts') + + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + + # FULL backup with OLD binary + self.backup_node(backup_dir, 'node', node, old_binary=True) + + pgbench = node.pgbench( + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + options=["-c", "1", "-T", "10", "--no-vacuum"]) + pgbench.wait() + pgbench.stdout.close() + + # PAGE1 backup with OLD binary + page1 = self.backup_node( + backup_dir, 'node', node, + backup_type='page', old_binary=True) + + pgdata1 = self.pgdata_content(node.data_dir) + + node.safe_psql( + 'postgres', + "DELETE from pgbench_accounts where ctid > '(10,1)'") + + # PAGE2 backup with OLD binary + page2 = self.backup_node( + backup_dir, 'node', node, + backup_type='page', old_binary=True) + + pgdata2 = self.pgdata_content(node.data_dir) + + # PAGE3 backup with OLD binary + page3 = self.backup_node( + backup_dir, 'node', node, + backup_type='page', old_binary=True) + + pgdata3 = self.pgdata_content(node.data_dir) + + pgbench = node.pgbench( + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + options=["-c", "1", "-T", "10", "--no-vacuum"]) + pgbench.wait() + pgbench.stdout.close() + + # PAGE4 backup with NEW binary + page4 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + pgdata4 = self.pgdata_content(node.data_dir) + + # merge backups one by one and check data correctness + # merge PAGE1 + self.merge_backup( + backup_dir, "node", page1, options=['--log-level-file=VERBOSE']) + + # check data correctness for PAGE1 + node_restored.cleanup() + self.restore_node( + backup_dir, 'node', node_restored, backup_id=page1, + options=['--log-level-file=VERBOSE']) + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata1, pgdata_restored) + + # merge PAGE2 + self.merge_backup(backup_dir, "node", page2) + + # check data correctness for PAGE2 + node_restored.cleanup() + self.restore_node(backup_dir, 'node', node_restored, backup_id=page2) + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata2, pgdata_restored) + + # merge PAGE3 + self.show_pb(backup_dir, 'node', page3) + self.merge_backup(backup_dir, "node", page3) + self.show_pb(backup_dir, 'node', page3) + + # check data correctness for PAGE3 + node_restored.cleanup() + self.restore_node(backup_dir, 'node', node_restored, backup_id=page3) + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata3, pgdata_restored) + + # merge PAGE4 + self.merge_backup(backup_dir, "node", page4) + + # check data correctness for PAGE4 + node_restored.cleanup() + self.restore_node(backup_dir, 'node', node_restored, backup_id=page4) + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata4, pgdata_restored) + + # @unittest.expectedFailure + # @unittest.skip("skip") + def test_backward_compatibility_merge_3(self): + """ + Create node, take FULL and PAGE backups with old binary, + merge them with new binary. + old binary version =< 2.2.7 + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir, old_binary=True) + self.add_instance(backup_dir, 'node', node, old_binary=True) + + self.set_archiving(backup_dir, 'node', node, old_binary=True) + node.slow_start() + + node.pgbench_init(scale=50) + + node.safe_psql( + 'postgres', + 'VACUUM pgbench_accounts') + + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + + # FULL backup with OLD binary + self.backup_node( + backup_dir, 'node', node, old_binary=True, options=['--compress']) + + pgbench = node.pgbench( + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + options=["-c", "1", "-T", "10", "--no-vacuum"]) + pgbench.wait() + pgbench.stdout.close() + + # PAGE1 backup with OLD binary + page1 = self.backup_node( + backup_dir, 'node', node, + backup_type='page', old_binary=True, options=['--compress']) + + pgdata1 = self.pgdata_content(node.data_dir) + + node.safe_psql( + 'postgres', + "DELETE from pgbench_accounts where ctid > '(10,1)'") + + # PAGE2 backup with OLD binary + page2 = self.backup_node( + backup_dir, 'node', node, + backup_type='page', old_binary=True, options=['--compress']) + + pgdata2 = self.pgdata_content(node.data_dir) + + # PAGE3 backup with OLD binary + page3 = self.backup_node( + backup_dir, 'node', node, + backup_type='page', old_binary=True, options=['--compress']) + + pgdata3 = self.pgdata_content(node.data_dir) + + pgbench = node.pgbench( + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + options=["-c", "1", "-T", "10", "--no-vacuum"]) + pgbench.wait() + pgbench.stdout.close() + + # PAGE4 backup with NEW binary + page4 = self.backup_node( + backup_dir, 'node', node, backup_type='page', options=['--compress']) + pgdata4 = self.pgdata_content(node.data_dir) + + # merge backups one by one and check data correctness + # merge PAGE1 + self.merge_backup( + backup_dir, "node", page1, options=['--log-level-file=VERBOSE']) + + # check data correctness for PAGE1 + node_restored.cleanup() + self.restore_node( + backup_dir, 'node', node_restored, backup_id=page1, + options=['--log-level-file=VERBOSE']) + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata1, pgdata_restored) + + # merge PAGE2 + self.merge_backup(backup_dir, "node", page2) + + # check data correctness for PAGE2 + node_restored.cleanup() + self.restore_node(backup_dir, 'node', node_restored, backup_id=page2) + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata2, pgdata_restored) + + # merge PAGE3 + self.show_pb(backup_dir, 'node', page3) + self.merge_backup(backup_dir, "node", page3) + self.show_pb(backup_dir, 'node', page3) + + # check data correctness for PAGE3 + node_restored.cleanup() + self.restore_node(backup_dir, 'node', node_restored, backup_id=page3) + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata3, pgdata_restored) + + # merge PAGE4 + self.merge_backup(backup_dir, "node", page4) + + # check data correctness for PAGE4 + node_restored.cleanup() + self.restore_node(backup_dir, 'node', node_restored, backup_id=page4) + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata4, pgdata_restored) + + # @unittest.expectedFailure + # @unittest.skip("skip") + def test_backward_compatibility_merge_4(self): + """ + Start merge between minor version, crash and retry it. + old binary version =< 2.4.0 + """ + if self.version_to_num(self.old_probackup_version) > self.version_to_num('2.4.0'): + self.assertTrue( + False, 'You need pg_probackup old_binary =< 2.4.0 for this test') + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir, old_binary=True) + self.add_instance(backup_dir, 'node', node, old_binary=True) + + self.set_archiving(backup_dir, 'node', node, old_binary=True) + node.slow_start() + + node.pgbench_init(scale=20) + + node.safe_psql( + 'postgres', + 'VACUUM pgbench_accounts') + + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + + # FULL backup with OLD binary + self.backup_node( + backup_dir, 'node', node, old_binary=True, options=['--compress']) + + pgbench = node.pgbench( + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + options=["-c", "1", "-T", "20", "--no-vacuum"]) + pgbench.wait() + pgbench.stdout.close() + + # PAGE backup with NEW binary + page_id = self.backup_node( + backup_dir, 'node', node, backup_type='page', options=['--compress']) + pgdata = self.pgdata_content(node.data_dir) + + # merge PAGE4 + gdb = self.merge_backup(backup_dir, "node", page_id, gdb=True) + + gdb.set_breakpoint('rename') + gdb.run_until_break() + gdb.continue_execution_until_break(500) + gdb._execute('signal SIGKILL') + + try: + self.merge_backup(backup_dir, "node", page_id) + self.assertEqual( + 1, 0, + "Expecting Error because of format changes.\n " + "Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + "ERROR: Retry of failed merge for backups with different " + "between minor versions is forbidden to avoid data corruption " + "because of storage format changes introduced in 2.4.0 version, " + "please take a new full backup", + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + # @unittest.expectedFailure + # @unittest.skip("skip") + def test_backward_compatibility_merge_5(self): + """ + Create node, take FULL and PAGE backups with old binary, + merge them with new binary. + old binary version >= STORAGE_FORMAT_VERSION (2.4.4) + """ + if self.version_to_num(self.old_probackup_version) < self.version_to_num('2.4.4'): + self.assertTrue( + False, 'OLD pg_probackup binary must be >= 2.4.4 for this test') + + self.assertNotEqual( + self.version_to_num(self.old_probackup_version), + self.version_to_num(self.probackup_version)) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir, old_binary=True) + self.add_instance(backup_dir, 'node', node, old_binary=True) + + self.set_archiving(backup_dir, 'node', node, old_binary=True) + node.slow_start() + + node.pgbench_init(scale=20) + + # FULL backup with OLD binary + self.backup_node(backup_dir, 'node', node, old_binary=True) + + pgbench = node.pgbench( + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + options=["-c", "1", "-T", "10", "--no-vacuum"]) + pgbench.wait() + pgbench.stdout.close() + + # PAGE1 backup with OLD binary + self.backup_node( + backup_dir, 'node', node, backup_type='page', old_binary=True) + + node.safe_psql( + 'postgres', + 'DELETE from pgbench_accounts') + + node.safe_psql( + 'postgres', + 'VACUUM pgbench_accounts') + + # PAGE2 backup with OLD binary + backup_id = self.backup_node( + backup_dir, 'node', node, backup_type='page', old_binary=True) + + pgdata = self.pgdata_content(node.data_dir) + + # merge chain created by old binary with new binary + output = self.merge_backup(backup_dir, "node", backup_id) + + # check that in-place is disabled + self.assertNotIn( + "WARNING: In-place merge is disabled " + "because of storage format incompatibility", output) + + # restore merged backup + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored.cleanup() + + self.restore_node(backup_dir, 'node', node_restored) + + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + def test_page_vacuum_truncate(self): + """ + make node, create table, take full backup, + delete all data, vacuum relation, + take page backup, insert some data, + take second page backup, + restore latest page backup using new binary + and check data correctness + old binary should be 2.2.x version + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir, old_binary=True) + self.add_instance(backup_dir, 'node', node, old_binary=True) + self.set_archiving(backup_dir, 'node', node, old_binary=True) + node.slow_start() + + node.safe_psql( + "postgres", + "create sequence t_seq; " + "create table t_heap as select i as id, " + "md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,1024) i") + + node.safe_psql( + "postgres", + "vacuum t_heap") + + id1 = self.backup_node(backup_dir, 'node', node, old_binary=True) + pgdata1 = self.pgdata_content(node.data_dir) + + node.safe_psql( + "postgres", + "delete from t_heap") + + node.safe_psql( + "postgres", + "vacuum t_heap") + + id2 = self.backup_node( + backup_dir, 'node', node, backup_type='page', old_binary=True) + pgdata2 = self.pgdata_content(node.data_dir) + + node.safe_psql( + "postgres", + "insert into t_heap select i as id, " + "md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,1) i") + + id3 = self.backup_node( + backup_dir, 'node', node, backup_type='page', old_binary=True) + pgdata3 = self.pgdata_content(node.data_dir) + + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored.cleanup() + + self.restore_node( + backup_dir, 'node', node_restored, + data_dir=node_restored.data_dir, backup_id=id1) + + # Physical comparison + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata1, pgdata_restored) + + self.set_auto_conf(node_restored, {'port': node_restored.port}) + node_restored.slow_start() + node_restored.cleanup() + + self.restore_node( + backup_dir, 'node', node_restored, + data_dir=node_restored.data_dir, backup_id=id2) + + # Physical comparison + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata2, pgdata_restored) + + self.set_auto_conf(node_restored, {'port': node_restored.port}) + node_restored.slow_start() + node_restored.cleanup() + + self.restore_node( + backup_dir, 'node', node_restored, + data_dir=node_restored.data_dir, backup_id=id3) + + # Physical comparison + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata3, pgdata_restored) + + self.set_auto_conf(node_restored, {'port': node_restored.port}) + node_restored.slow_start() + node_restored.cleanup() + + # @unittest.skip("skip") + def test_page_vacuum_truncate_compression(self): + """ + make node, create table, take full backup, + delete all data, vacuum relation, + take page backup, insert some data, + take second page backup, + restore latest page backup using new binary + and check data correctness + old binary should be 2.2.x version + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir, old_binary=True) + self.add_instance(backup_dir, 'node', node, old_binary=True) + self.set_archiving(backup_dir, 'node', node, old_binary=True) + node.slow_start() + + node.safe_psql( + "postgres", + "create sequence t_seq; " + "create table t_heap as select i as id, " + "md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,1024) i") + + node.safe_psql( + "postgres", + "vacuum t_heap") + + self.backup_node( + backup_dir, 'node',node, old_binary=True, options=['--compress']) + + node.safe_psql( + "postgres", + "delete from t_heap") + + node.safe_psql( + "postgres", + "vacuum t_heap") + + self.backup_node( + backup_dir, 'node', node, backup_type='page', + old_binary=True, options=['--compress']) + + node.safe_psql( + "postgres", + "insert into t_heap select i as id, " + "md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,1) i") + + self.backup_node( + backup_dir, 'node', node, backup_type='page', + old_binary=True, options=['--compress']) + + pgdata = self.pgdata_content(node.data_dir) + + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored.cleanup() + + self.restore_node(backup_dir, 'node', node_restored) + + # Physical comparison + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + self.set_auto_conf(node_restored, {'port': node_restored.port}) + node_restored.slow_start() + + # @unittest.skip("skip") + def test_page_vacuum_truncate_compressed_1(self): + """ + make node, create table, take full backup, + delete all data, vacuum relation, + take page backup, insert some data, + take second page backup, + restore latest page backup using new binary + and check data correctness + old binary should be 2.2.x version + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir, old_binary=True) + self.add_instance(backup_dir, 'node', node, old_binary=True) + self.set_archiving(backup_dir, 'node', node, old_binary=True) + node.slow_start() + + node.safe_psql( + "postgres", + "create sequence t_seq; " + "create table t_heap as select i as id, " + "md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,1024) i") + + node.safe_psql( + "postgres", + "vacuum t_heap") + + id1 = self.backup_node( + backup_dir, 'node', node, + old_binary=True, options=['--compress']) + pgdata1 = self.pgdata_content(node.data_dir) + + node.safe_psql( + "postgres", + "delete from t_heap") + + node.safe_psql( + "postgres", + "vacuum t_heap") + + id2 = self.backup_node( + backup_dir, 'node', node, backup_type='page', + old_binary=True, options=['--compress']) + pgdata2 = self.pgdata_content(node.data_dir) + + node.safe_psql( + "postgres", + "insert into t_heap select i as id, " + "md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,1) i") + + id3 = self.backup_node( + backup_dir, 'node', node, backup_type='page', + old_binary=True, options=['--compress']) + pgdata3 = self.pgdata_content(node.data_dir) + + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored.cleanup() + + self.restore_node( + backup_dir, 'node', node_restored, + data_dir=node_restored.data_dir, backup_id=id1) + + # Physical comparison + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata1, pgdata_restored) + + self.set_auto_conf(node_restored, {'port': node_restored.port}) + node_restored.slow_start() + node_restored.cleanup() + + self.restore_node( + backup_dir, 'node', node_restored, + data_dir=node_restored.data_dir, backup_id=id2) + + # Physical comparison + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata2, pgdata_restored) + + self.set_auto_conf(node_restored, {'port': node_restored.port}) + node_restored.slow_start() + node_restored.cleanup() + + self.restore_node( + backup_dir, 'node', node_restored, + data_dir=node_restored.data_dir, backup_id=id3) + + # Physical comparison + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata3, pgdata_restored) + + self.set_auto_conf(node_restored, {'port': node_restored.port}) + node_restored.slow_start() + node_restored.cleanup() + + # @unittest.skip("skip") + def test_hidden_files(self): + """ + old_version should be < 2.3.0 + Create hidden file in pgdata, take backup + with old binary, then try to delete backup + with new binary + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir, old_binary=True) + self.add_instance(backup_dir, 'node', node, old_binary=True) + node.slow_start() + + open(os.path.join(node.data_dir, ".hidden_stuff"), 'a').close() + + backup_id = self.backup_node( + backup_dir, 'node',node, old_binary=True, options=['--stream']) + + self.delete_pb(backup_dir, 'node', backup_id) + + # @unittest.skip("skip") + def test_compatibility_tablespace(self): + """ + https://github.com/postgrespro/pg_probackup/issues/348 + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node, old_binary=True) + node.slow_start() + + backup_id = self.backup_node( + backup_dir, 'node', node, backup_type="full", + options=["-j", "4", "--stream"], old_binary=True) + + tblspace_old_path = self.get_tblspace_path(node, 'tblspace_old') + + self.create_tblspace_in_node( + node, 'tblspace', + tblspc_path=tblspace_old_path) + + node.safe_psql( + "postgres", + "create table t_heap_lame tablespace tblspace " + "as select 1 as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,1000) i") + + tblspace_new_path = self.get_tblspace_path(node, 'tblspace_new') + + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored.cleanup() + + try: + self.restore_node( + backup_dir, 'node', node_restored, + options=[ + "-j", "4", + "-T", "{0}={1}".format( + tblspace_old_path, tblspace_new_path)]) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because tablespace mapping is incorrect" + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: Backup {0} has no tablespaceses, ' + 'nothing to remap'.format(backup_id), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.backup_node( + backup_dir, 'node', node, backup_type="delta", + options=["-j", "4", "--stream"], old_binary=True) + + self.restore_node( + backup_dir, 'node', node_restored, + options=[ + "-j", "4", + "-T", "{0}={1}".format( + tblspace_old_path, tblspace_new_path)]) + + if self.paranoia: + pgdata = self.pgdata_content(node.data_dir) + + if self.paranoia: + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) diff --git a/tests/compression_test.py b/tests/compression_test.py new file mode 100644 index 000000000..94f2dffff --- /dev/null +++ b/tests/compression_test.py @@ -0,0 +1,495 @@ +import os +import unittest +from .helpers.ptrack_helpers import ProbackupTest, ProbackupException, idx_ptrack +from datetime import datetime, timedelta +import subprocess + + +class CompressionTest(ProbackupTest, unittest.TestCase): + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_basic_compression_stream_zlib(self): + """ + make archive node, make full and page stream backups, + check data correctness in restored instance + """ + self.maxDiff = None + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # FULL BACKUP + node.safe_psql( + "postgres", + "create table t_heap as select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,256) i") + full_result = node.execute("postgres", "SELECT * FROM t_heap") + full_backup_id = self.backup_node( + backup_dir, 'node', node, backup_type='full', + options=[ + '--stream', + '--compress-algorithm=zlib']) + + # PAGE BACKUP + node.safe_psql( + "postgres", + "insert into t_heap select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(256,512) i") + page_result = node.execute("postgres", "SELECT * FROM t_heap") + page_backup_id = self.backup_node( + backup_dir, 'node', node, backup_type='page', + options=[ + '--stream', '--compress-algorithm=zlib']) + + # DELTA BACKUP + node.safe_psql( + "postgres", + "insert into t_heap select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(512,768) i") + delta_result = node.execute("postgres", "SELECT * FROM t_heap") + delta_backup_id = self.backup_node( + backup_dir, 'node', node, backup_type='delta', + options=['--stream', '--compress-algorithm=zlib']) + + # Drop Node + node.cleanup() + + # Check full backup + self.assertIn( + "INFO: Restore of backup {0} completed.".format(full_backup_id), + self.restore_node( + backup_dir, 'node', node, backup_id=full_backup_id, + options=[ + "-j", "4", "--immediate", + "--recovery-target-action=promote"]), + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd)) + node.slow_start() + + full_result_new = node.execute("postgres", "SELECT * FROM t_heap") + self.assertEqual(full_result, full_result_new) + node.cleanup() + + # Check page backup + self.assertIn( + "INFO: Restore of backup {0} completed.".format(page_backup_id), + self.restore_node( + backup_dir, 'node', node, backup_id=page_backup_id, + options=[ + "-j", "4", "--immediate", + "--recovery-target-action=promote"]), + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd)) + node.slow_start() + + page_result_new = node.execute("postgres", "SELECT * FROM t_heap") + self.assertEqual(page_result, page_result_new) + node.cleanup() + + # Check delta backup + self.assertIn( + "INFO: Restore of backup {0} completed.".format(delta_backup_id), + self.restore_node( + backup_dir, 'node', node, backup_id=delta_backup_id, + options=[ + "-j", "4", "--immediate", + "--recovery-target-action=promote"]), + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd)) + node.slow_start() + + delta_result_new = node.execute("postgres", "SELECT * FROM t_heap") + self.assertEqual(delta_result, delta_result_new) + + def test_compression_archive_zlib(self): + """ + make archive node, make full and page backups, + check data correctness in restored instance + """ + self.maxDiff = None + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # FULL BACKUP + node.safe_psql( + "postgres", + "create table t_heap as select i as id, md5(i::text) as text, " + "md5(i::text)::tsvector as tsvector from generate_series(0,1) i") + full_result = node.execute("postgres", "SELECT * FROM t_heap") + full_backup_id = self.backup_node( + backup_dir, 'node', node, backup_type='full', + options=["--compress-algorithm=zlib"]) + + # PAGE BACKUP + node.safe_psql( + "postgres", + "insert into t_heap select i as id, md5(i::text) as text, " + "md5(i::text)::tsvector as tsvector " + "from generate_series(0,2) i") + page_result = node.execute("postgres", "SELECT * FROM t_heap") + page_backup_id = self.backup_node( + backup_dir, 'node', node, backup_type='page', + options=["--compress-algorithm=zlib"]) + + # DELTA BACKUP + node.safe_psql( + "postgres", + "insert into t_heap select i as id, md5(i::text) as text, " + "md5(i::text)::tsvector as tsvector from generate_series(0,3) i") + delta_result = node.execute("postgres", "SELECT * FROM t_heap") + delta_backup_id = self.backup_node( + backup_dir, 'node', node, backup_type='delta', + options=['--compress-algorithm=zlib']) + + # Drop Node + node.cleanup() + + # Check full backup + self.assertIn( + "INFO: Restore of backup {0} completed.".format(full_backup_id), + self.restore_node( + backup_dir, 'node', node, backup_id=full_backup_id, + options=[ + "-j", "4", "--immediate", + "--recovery-target-action=promote"]), + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd)) + node.slow_start() + + full_result_new = node.execute("postgres", "SELECT * FROM t_heap") + self.assertEqual(full_result, full_result_new) + node.cleanup() + + # Check page backup + self.assertIn( + "INFO: Restore of backup {0} completed.".format(page_backup_id), + self.restore_node( + backup_dir, 'node', node, backup_id=page_backup_id, + options=[ + "-j", "4", "--immediate", + "--recovery-target-action=promote"]), + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd)) + node.slow_start() + + page_result_new = node.execute("postgres", "SELECT * FROM t_heap") + self.assertEqual(page_result, page_result_new) + node.cleanup() + + # Check delta backup + self.assertIn( + "INFO: Restore of backup {0} completed.".format(delta_backup_id), + self.restore_node( + backup_dir, 'node', node, backup_id=delta_backup_id, + options=[ + "-j", "4", "--immediate", + "--recovery-target-action=promote"]), + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd)) + node.slow_start() + + delta_result_new = node.execute("postgres", "SELECT * FROM t_heap") + self.assertEqual(delta_result, delta_result_new) + node.cleanup() + + def test_compression_stream_pglz(self): + """ + make archive node, make full and page stream backups, + check data correctness in restored instance + """ + self.maxDiff = None + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # FULL BACKUP + node.safe_psql( + "postgres", + "create table t_heap as select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,256) i") + full_result = node.execute("postgres", "SELECT * FROM t_heap") + full_backup_id = self.backup_node( + backup_dir, 'node', node, backup_type='full', + options=['--stream', '--compress-algorithm=pglz']) + + # PAGE BACKUP + node.safe_psql( + "postgres", + "insert into t_heap select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(256,512) i") + page_result = node.execute("postgres", "SELECT * FROM t_heap") + page_backup_id = self.backup_node( + backup_dir, 'node', node, backup_type='page', + options=['--stream', '--compress-algorithm=pglz']) + + # DELTA BACKUP + node.safe_psql( + "postgres", + "insert into t_heap select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(512,768) i") + delta_result = node.execute("postgres", "SELECT * FROM t_heap") + delta_backup_id = self.backup_node( + backup_dir, 'node', node, backup_type='delta', + options=['--stream', '--compress-algorithm=pglz']) + + # Drop Node + node.cleanup() + + # Check full backup + self.assertIn( + "INFO: Restore of backup {0} completed.".format(full_backup_id), + self.restore_node( + backup_dir, 'node', node, backup_id=full_backup_id, + options=[ + "-j", "4", "--immediate", + "--recovery-target-action=promote"]), + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd)) + node.slow_start() + + full_result_new = node.execute("postgres", "SELECT * FROM t_heap") + self.assertEqual(full_result, full_result_new) + node.cleanup() + + # Check page backup + self.assertIn( + "INFO: Restore of backup {0} completed.".format(page_backup_id), + self.restore_node( + backup_dir, 'node', node, backup_id=page_backup_id, + options=[ + "-j", "4", "--immediate", + "--recovery-target-action=promote"]), + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd)) + node.slow_start() + + page_result_new = node.execute("postgres", "SELECT * FROM t_heap") + self.assertEqual(page_result, page_result_new) + node.cleanup() + + # Check delta backup + self.assertIn( + "INFO: Restore of backup {0} completed.".format(delta_backup_id), + self.restore_node( + backup_dir, 'node', node, backup_id=delta_backup_id, + options=[ + "-j", "4", "--immediate", + "--recovery-target-action=promote"]), + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd)) + node.slow_start() + + delta_result_new = node.execute("postgres", "SELECT * FROM t_heap") + self.assertEqual(delta_result, delta_result_new) + node.cleanup() + + def test_compression_archive_pglz(self): + """ + make archive node, make full and page backups, + check data correctness in restored instance + """ + self.maxDiff = None + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # FULL BACKUP + node.safe_psql( + "postgres", + "create table t_heap as select i as id, md5(i::text) as text, " + "md5(i::text)::tsvector as tsvector " + "from generate_series(0,100) i") + full_result = node.execute("postgres", "SELECT * FROM t_heap") + full_backup_id = self.backup_node( + backup_dir, 'node', node, backup_type='full', + options=['--compress-algorithm=pglz']) + + # PAGE BACKUP + node.safe_psql( + "postgres", + "insert into t_heap select i as id, md5(i::text) as text, " + "md5(i::text)::tsvector as tsvector " + "from generate_series(100,200) i") + page_result = node.execute("postgres", "SELECT * FROM t_heap") + page_backup_id = self.backup_node( + backup_dir, 'node', node, backup_type='page', + options=['--compress-algorithm=pglz']) + + # DELTA BACKUP + node.safe_psql( + "postgres", + "insert into t_heap select i as id, md5(i::text) as text, " + "md5(i::text)::tsvector as tsvector " + "from generate_series(200,300) i") + delta_result = node.execute("postgres", "SELECT * FROM t_heap") + delta_backup_id = self.backup_node( + backup_dir, 'node', node, backup_type='delta', + options=['--compress-algorithm=pglz']) + + # Drop Node + node.cleanup() + + # Check full backup + self.assertIn( + "INFO: Restore of backup {0} completed.".format(full_backup_id), + self.restore_node( + backup_dir, 'node', node, backup_id=full_backup_id, + options=[ + "-j", "4", "--immediate", + "--recovery-target-action=promote"]), + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd)) + node.slow_start() + + full_result_new = node.execute("postgres", "SELECT * FROM t_heap") + self.assertEqual(full_result, full_result_new) + node.cleanup() + + # Check page backup + self.assertIn( + "INFO: Restore of backup {0} completed.".format(page_backup_id), + self.restore_node( + backup_dir, 'node', node, backup_id=page_backup_id, + options=[ + "-j", "4", "--immediate", + "--recovery-target-action=promote"]), + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd)) + node.slow_start() + + page_result_new = node.execute("postgres", "SELECT * FROM t_heap") + self.assertEqual(page_result, page_result_new) + node.cleanup() + + # Check delta backup + self.assertIn( + "INFO: Restore of backup {0} completed.".format(delta_backup_id), + self.restore_node( + backup_dir, 'node', node, backup_id=delta_backup_id, + options=[ + "-j", "4", "--immediate", + "--recovery-target-action=promote"]), + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd)) + node.slow_start() + + delta_result_new = node.execute("postgres", "SELECT * FROM t_heap") + self.assertEqual(delta_result, delta_result_new) + node.cleanup() + + def test_compression_wrong_algorithm(self): + """ + make archive node, make full and page backups, + check data correctness in restored instance + """ + self.maxDiff = None + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + try: + self.backup_node( + backup_dir, 'node', node, + backup_type='full', options=['--compress-algorithm=bla-blah']) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because compress-algorithm is invalid.\n " + "Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertEqual( + e.message, + 'ERROR: invalid compress algorithm value "bla-blah"\n', + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + # @unittest.skip("skip") + def test_incompressible_pages(self): + """ + make archive node, create table with incompressible toast pages, + take backup with compression, make sure that page was not compressed, + restore backup and check data correctness + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # Full + self.backup_node( + backup_dir, 'node', node, + options=[ + '--compress-algorithm=zlib', + '--compress-level=0']) + + node.pgbench_init(scale=3) + + self.backup_node( + backup_dir, 'node', node, + backup_type='delta', + options=[ + '--compress-algorithm=zlib', + '--compress-level=0']) + + pgdata = self.pgdata_content(node.data_dir) + + node.cleanup() + + self.restore_node(backup_dir, 'node', node) + + # Physical comparison + if self.paranoia: + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + node.slow_start() diff --git a/tests/config_test.py b/tests/config_test.py new file mode 100644 index 000000000..b1a0f9295 --- /dev/null +++ b/tests/config_test.py @@ -0,0 +1,113 @@ +import unittest +import subprocess +import os +from .helpers.ptrack_helpers import ProbackupTest, ProbackupException +from sys import exit +from shutil import copyfile + + +class ConfigTest(ProbackupTest, unittest.TestCase): + + # @unittest.expectedFailure + # @unittest.skip("skip") + def test_remove_instance_config(self): + """remove pg_probackup.conself.f""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.show_pb(backup_dir) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + self.backup_node(backup_dir, 'node', node) + + self.backup_node( + backup_dir, 'node', node, backup_type='page') + + conf_file = os.path.join( + backup_dir, 'backups','node', 'pg_probackup.conf') + + os.unlink(os.path.join(backup_dir, 'backups','node', 'pg_probackup.conf')) + + try: + self.backup_node( + backup_dir, 'node', node, backup_type='page') + self.assertEqual( + 1, 0, + "Expecting Error because pg_probackup.conf is missing. " + ".\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: could not open file "{0}": ' + 'No such file or directory'.format(conf_file), + e.message, + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) + + # @unittest.expectedFailure + # @unittest.skip("skip") + def test_corrupt_backup_content(self): + """corrupt backup_content.control""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + full1_id = self.backup_node(backup_dir, 'node', node) + + node.safe_psql( + 'postgres', + 'create table t1()') + + fulle2_id = self.backup_node(backup_dir, 'node', node) + + fulle1_conf_file = os.path.join( + backup_dir, 'backups','node', full1_id, 'backup_content.control') + + fulle2_conf_file = os.path.join( + backup_dir, 'backups','node', fulle2_id, 'backup_content.control') + + copyfile(fulle2_conf_file, fulle1_conf_file) + + try: + self.validate_pb(backup_dir, 'node') + self.assertEqual( + 1, 0, + "Expecting Error because pg_probackup.conf is missing. " + ".\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + "WARNING: Invalid CRC of backup control file '{0}':".format(fulle1_conf_file), + e.message, + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) + + self.assertIn( + "WARNING: Failed to get file list for backup {0}".format(full1_id), + e.message, + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) + + self.assertIn( + "WARNING: Backup {0} file list is corrupted".format(full1_id), + e.message, + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) + + self.show_pb(backup_dir, 'node', full1_id)['status'] + + self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], 'CORRUPT') + self.assertEqual(self.show_pb(backup_dir, 'node')[1]['status'], 'OK') diff --git a/tests/delete_test.py b/tests/delete_test.py new file mode 100644 index 000000000..10100887d --- /dev/null +++ b/tests/delete_test.py @@ -0,0 +1,822 @@ +import unittest +import os +from .helpers.ptrack_helpers import ProbackupTest, ProbackupException +import subprocess + + +class DeleteTest(ProbackupTest, unittest.TestCase): + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_delete_full_backups(self): + """delete full backups""" + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # full backup + self.backup_node(backup_dir, 'node', node) + + pgbench = node.pgbench( + stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + pgbench.wait() + pgbench.stdout.close() + + self.backup_node(backup_dir, 'node', node) + + pgbench = node.pgbench( + stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + pgbench.wait() + pgbench.stdout.close() + + self.backup_node(backup_dir, 'node', node) + + show_backups = self.show_pb(backup_dir, 'node') + id_1 = show_backups[0]['id'] + id_2 = show_backups[1]['id'] + id_3 = show_backups[2]['id'] + self.delete_pb(backup_dir, 'node', id_2) + show_backups = self.show_pb(backup_dir, 'node') + self.assertEqual(show_backups[0]['id'], id_1) + self.assertEqual(show_backups[1]['id'], id_3) + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_del_instance_archive(self): + """delete full backups""" + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # full backup + self.backup_node(backup_dir, 'node', node) + + # full backup + self.backup_node(backup_dir, 'node', node) + + # restore + node.cleanup() + self.restore_node(backup_dir, 'node', node) + node.slow_start() + + # Delete instance + self.del_instance(backup_dir, 'node') + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_delete_archive_mix_compress_and_non_compressed_segments(self): + """delete full backups""" + node = self.make_simple_node( + base_dir="{0}/{1}/node".format(self.module_name, self.fname), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving( + backup_dir, 'node', node, compress=False) + node.slow_start() + + # full backup + self.backup_node(backup_dir, 'node', node) + + node.pgbench_init(scale=10) + + # Restart archiving with compression + self.set_archiving(backup_dir, 'node', node, compress=True) + + node.restart() + + # full backup + self.backup_node(backup_dir, 'node', node) + + pgbench = node.pgbench(options=['-T', '10', '-c', '2']) + pgbench.wait() + + self.backup_node( + backup_dir, 'node', node, + options=[ + '--retention-redundancy=3', + '--delete-expired']) + + pgbench = node.pgbench(options=['-T', '10', '-c', '2']) + pgbench.wait() + + self.backup_node( + backup_dir, 'node', node, + options=[ + '--retention-redundancy=3', + '--delete-expired']) + + pgbench = node.pgbench(options=['-T', '10', '-c', '2']) + pgbench.wait() + + self.backup_node( + backup_dir, 'node', node, + options=[ + '--retention-redundancy=3', + '--delete-expired']) + + # @unittest.skip("skip") + def test_delete_increment_page(self): + """delete increment and all after him""" + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # full backup mode + self.backup_node(backup_dir, 'node', node) + # page backup mode + self.backup_node(backup_dir, 'node', node, backup_type="page") + # page backup mode + self.backup_node(backup_dir, 'node', node, backup_type="page") + # full backup mode + self.backup_node(backup_dir, 'node', node) + + show_backups = self.show_pb(backup_dir, 'node') + self.assertEqual(len(show_backups), 4) + + # delete first page backup + self.delete_pb(backup_dir, 'node', show_backups[1]['id']) + + show_backups = self.show_pb(backup_dir, 'node') + self.assertEqual(len(show_backups), 2) + + self.assertEqual(show_backups[0]['backup-mode'], "FULL") + self.assertEqual(show_backups[0]['status'], "OK") + self.assertEqual(show_backups[1]['backup-mode'], "FULL") + self.assertEqual(show_backups[1]['status'], "OK") + + # @unittest.skip("skip") + def test_delete_increment_ptrack(self): + """delete increment and all after him""" + if not self.ptrack: + self.skipTest('Skipped because ptrack support is disabled') + + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + ptrack_enable=self.ptrack, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + 'postgres', + 'CREATE EXTENSION ptrack') + + # full backup mode + self.backup_node(backup_dir, 'node', node) + # ptrack backup mode + self.backup_node(backup_dir, 'node', node, backup_type="ptrack") + # ptrack backup mode + self.backup_node(backup_dir, 'node', node, backup_type="ptrack") + # full backup mode + self.backup_node(backup_dir, 'node', node) + + show_backups = self.show_pb(backup_dir, 'node') + self.assertEqual(len(show_backups), 4) + + # delete first page backup + self.delete_pb(backup_dir, 'node', show_backups[1]['id']) + + show_backups = self.show_pb(backup_dir, 'node') + self.assertEqual(len(show_backups), 2) + + self.assertEqual(show_backups[0]['backup-mode'], "FULL") + self.assertEqual(show_backups[0]['status'], "OK") + self.assertEqual(show_backups[1]['backup-mode'], "FULL") + self.assertEqual(show_backups[1]['status'], "OK") + + # @unittest.skip("skip") + def test_delete_orphaned_wal_segments(self): + """ + make archive node, make three full backups, + delete second backup without --wal option, + then delete orphaned wals via --wal option + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + "postgres", + "create table t_heap as select 1 as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,10000) i") + # first full backup + backup_1_id = self.backup_node(backup_dir, 'node', node) + # second full backup + backup_2_id = self.backup_node(backup_dir, 'node', node) + # third full backup + backup_3_id = self.backup_node(backup_dir, 'node', node) + node.stop() + + # Check wals + wals_dir = os.path.join(backup_dir, 'wal', 'node') + wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join(wals_dir, f))] + original_wal_quantity = len(wals) + + # delete second full backup + self.delete_pb(backup_dir, 'node', backup_2_id) + # check wal quantity + self.validate_pb(backup_dir) + self.assertEqual(self.show_pb(backup_dir, 'node', backup_1_id)['status'], "OK") + self.assertEqual(self.show_pb(backup_dir, 'node', backup_3_id)['status'], "OK") + # try to delete wals for second backup + self.delete_pb(backup_dir, 'node', options=['--wal']) + # check wal quantity + self.validate_pb(backup_dir) + self.assertEqual(self.show_pb(backup_dir, 'node', backup_1_id)['status'], "OK") + self.assertEqual(self.show_pb(backup_dir, 'node', backup_3_id)['status'], "OK") + + # delete first full backup + self.delete_pb(backup_dir, 'node', backup_1_id) + self.validate_pb(backup_dir) + self.assertEqual(self.show_pb(backup_dir, 'node', backup_3_id)['status'], "OK") + + result = self.delete_pb(backup_dir, 'node', options=['--wal']) + # delete useless wals + self.assertTrue('On timeline 1 WAL segments between ' in result + and 'will be removed' in result) + + self.validate_pb(backup_dir) + self.assertEqual(self.show_pb(backup_dir, 'node', backup_3_id)['status'], "OK") + + # Check quantity, it should be lower than original + wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join(wals_dir, f))] + self.assertTrue(original_wal_quantity > len(wals), "Number of wals not changed after 'delete --wal' which is illegal") + + # Delete last backup + self.delete_pb(backup_dir, 'node', backup_3_id, options=['--wal']) + wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join(wals_dir, f))] + self.assertEqual (0, len(wals), "Number of wals should be equal to 0") + + # @unittest.skip("skip") + def test_delete_wal_between_multiple_timelines(self): + """ + /-------B1-- + A1----------------A2---- + + delete A1 backup, check that WAL segments on [A1, A2) and + [A1, B1) are deleted and backups B1 and A2 keep + their WAL + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + A1 = self.backup_node(backup_dir, 'node', node) + + # load some data to node + node.pgbench_init(scale=3) + + node2 = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node2')) + node2.cleanup() + + self.restore_node(backup_dir, 'node', node2) + self.set_auto_conf(node2, {'port': node2.port}) + node2.slow_start() + + # load some more data to node + node.pgbench_init(scale=3) + + # take A2 + A2 = self.backup_node(backup_dir, 'node', node) + + # load some more data to node2 + node2.pgbench_init(scale=2) + + B1 = self.backup_node( + backup_dir, 'node', + node2, data_dir=node2.data_dir) + + self.delete_pb(backup_dir, 'node', backup_id=A1, options=['--wal']) + + self.validate_pb(backup_dir) + + # @unittest.skip("skip") + def test_delete_backup_with_empty_control_file(self): + """ + take backup, truncate its control file, + try to delete it via 'delete' command + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums'], + set_replication=True) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + # full backup mode + self.backup_node( + backup_dir, 'node', node, options=['--stream']) + # page backup mode + self.backup_node( + backup_dir, 'node', node, backup_type="delta", options=['--stream']) + # page backup mode + backup_id = self.backup_node( + backup_dir, 'node', node, backup_type="delta", options=['--stream']) + + with open( + os.path.join(backup_dir, 'backups', 'node', backup_id, 'backup.control'), + 'wt') as f: + f.flush() + f.close() + + show_backups = self.show_pb(backup_dir, 'node') + self.assertEqual(len(show_backups), 3) + + self.delete_pb(backup_dir, 'node', backup_id=backup_id) + + # @unittest.skip("skip") + def test_delete_interleaved_incremental_chains(self): + """complicated case of interleaved backup chains""" + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # Take FULL BACKUPs + backup_id_a = self.backup_node(backup_dir, 'node', node) + backup_id_b = self.backup_node(backup_dir, 'node', node) + + # Change FULLb to ERROR + self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR') + + # FULLb ERROR + # FULLa OK + + # Take PAGEa1 backup + page_id_a1 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # PAGEa1 OK + # FULLb ERROR + # FULLa OK + + # Change FULLb to OK + self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK') + + # Change PAGEa1 to ERROR + self.change_backup_status(backup_dir, 'node', page_id_a1, 'ERROR') + + # PAGEa1 ERROR + # FULLb OK + # FULLa OK + + page_id_b1 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # PAGEb1 OK + # PAGEa1 ERROR + # FULLb OK + # FULLa OK + + # Now we start to play with first generation of PAGE backups + # Change PAGEb1 and FULLb status to ERROR + self.change_backup_status(backup_dir, 'node', page_id_b1, 'ERROR') + self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR') + + # Change PAGEa1 status to OK + self.change_backup_status(backup_dir, 'node', page_id_a1, 'OK') + + # PAGEb1 ERROR + # PAGEa1 OK + # FULLb ERROR + # FULLa OK + + page_id_a2 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # PAGEa2 OK + # PAGEb1 ERROR + # PAGEa1 OK + # FULLb ERROR + # FULLa OK + + # Change PAGEa2 and FULla to ERROR + self.change_backup_status(backup_dir, 'node', page_id_a2, 'ERROR') + self.change_backup_status(backup_dir, 'node', backup_id_a, 'ERROR') + + # Change PAGEb1 and FULlb to OK + self.change_backup_status(backup_dir, 'node', page_id_b1, 'OK') + self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK') + + # PAGEa2 ERROR + # PAGEb1 OK + # PAGEa1 OK + # FULLb OK + # FULLa ERROR + + page_id_b2 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # Change PAGEa2 and FULLa status to OK + self.change_backup_status(backup_dir, 'node', page_id_a2, 'OK') + self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK') + + # PAGEb2 OK + # PAGEa2 OK + # PAGEb1 OK + # PAGEa1 OK + # FULLb OK + # FULLa OK + + self.backup_node(backup_dir, 'node', node) + self.backup_node(backup_dir, 'node', node, backup_type='page') + + # PAGEc1 OK + # FULLc OK + # PAGEb2 OK + # PAGEa2 OK + # PAGEb1 OK + # PAGEa1 OK + # FULLb OK + # FULLa OK + + # Delete FULLb + self.delete_pb( + backup_dir, 'node', backup_id_b) + + self.assertEqual(len(self.show_pb(backup_dir, 'node')), 5) + + print(self.show_pb( + backup_dir, 'node', as_json=False, as_text=True)) + + # @unittest.skip("skip") + def test_delete_multiple_descendants(self): + """ + PAGEb3 + | PAGEa3 + PAGEb2 / + | PAGEa2 / + PAGEb1 \ / + | PAGEa1 + FULLb | + FULLa should be deleted + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # Take FULL BACKUPs + backup_id_a = self.backup_node(backup_dir, 'node', node) + backup_id_b = self.backup_node(backup_dir, 'node', node) + + # Change FULLb to ERROR + self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR') + + page_id_a1 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # Change FULLb to OK + self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK') + + # Change PAGEa1 backup status to ERROR + self.change_backup_status(backup_dir, 'node', page_id_a1, 'ERROR') + + # PAGEa1 ERROR + # FULLb OK + # FULLa OK + + page_id_b1 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # PAGEb1 OK + # PAGEa1 ERROR + # FULLb OK + # FULLa OK + + # Change PAGEa1 to OK + self.change_backup_status(backup_dir, 'node', page_id_a1, 'OK') + + # Change PAGEb1 and FULLb backup status to ERROR + self.change_backup_status(backup_dir, 'node', page_id_b1, 'ERROR') + self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR') + + # PAGEb1 ERROR + # PAGEa1 OK + # FULLb ERROR + # FULLa OK + + page_id_a2 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # PAGEa2 OK + # PAGEb1 ERROR + # PAGEa1 OK + # FULLb ERROR + # FULLa OK + + # Change PAGEb1 and FULLb to OK + self.change_backup_status(backup_dir, 'node', page_id_b1, 'OK') + self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK') + + # Change PAGEa2 and FULLa to ERROR + self.change_backup_status(backup_dir, 'node', page_id_a2, 'ERROR') + self.change_backup_status(backup_dir, 'node', backup_id_a, 'ERROR') + + # PAGEa2 ERROR + # PAGEb1 OK + # PAGEa1 OK + # FULLb OK + # FULLa ERROR + + page_id_b2 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # PAGEb2 OK + # PAGEa2 ERROR + # PAGEb1 OK + # PAGEa1 OK + # FULLb OK + # FULLa ERROR + + # Change PAGEb2, PAGEb1 and FULLb to ERROR + self.change_backup_status(backup_dir, 'node', page_id_b2, 'ERROR') + self.change_backup_status(backup_dir, 'node', page_id_b1, 'ERROR') + self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR') + + # Change FULLa to OK + self.change_backup_status(backup_dir, 'node', backup_id_a, 'OK') + + # PAGEb2 ERROR + # PAGEa2 ERROR + # PAGEb1 ERROR + # PAGEa1 OK + # FULLb ERROR + # FULLa OK + + page_id_a3 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # PAGEa3 OK + # PAGEb2 ERROR + # PAGEa2 ERROR + # PAGEb1 ERROR + # PAGEa1 OK + # FULLb ERROR + # FULLa OK + + # Change PAGEa3 status to ERROR + self.change_backup_status(backup_dir, 'node', page_id_a3, 'ERROR') + + # Change PAGEb2 and FULLb to OK + self.change_backup_status(backup_dir, 'node', page_id_b2, 'OK') + self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK') + + page_id_b3 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # PAGEb3 OK + # PAGEa3 ERROR + # PAGEb2 OK + # PAGEa2 ERROR + # PAGEb1 ERROR + # PAGEa1 OK + # FULLb OK + # FULLa OK + + # Change PAGEa3, PAGEa2 and PAGEb1 to OK + self.change_backup_status(backup_dir, 'node', page_id_a3, 'OK') + self.change_backup_status(backup_dir, 'node', page_id_a2, 'OK') + self.change_backup_status(backup_dir, 'node', page_id_b1, 'OK') + + # PAGEb3 OK + # PAGEa3 OK + # PAGEb2 OK + # PAGEa2 OK + # PAGEb1 OK + # PAGEa1 OK + # FULLb OK + # FULLa OK + + # Check that page_id_a3 and page_id_a2 are both direct descendants of page_id_a1 + self.assertEqual( + self.show_pb(backup_dir, 'node', backup_id=page_id_a3)['parent-backup-id'], + page_id_a1) + + self.assertEqual( + self.show_pb(backup_dir, 'node', backup_id=page_id_a2)['parent-backup-id'], + page_id_a1) + + # Delete FULLa + self.delete_pb(backup_dir, 'node', backup_id_a) + + self.assertEqual(len(self.show_pb(backup_dir, 'node')), 4) + + # @unittest.skip("skip") + def test_delete_multiple_descendants_dry_run(self): + """ + PAGEa3 + PAGEa2 / + \ / + PAGEa1 (delete target) + | + FULLa + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # Take FULL BACKUP + node.pgbench_init(scale=1) + backup_id_a = self.backup_node(backup_dir, 'node', node) + + pgbench = node.pgbench(options=['-T', '10', '-c', '2']) + pgbench.wait() + page_id_a1 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + pgbench = node.pgbench(options=['-T', '10', '-c', '2']) + pgbench.wait() + page_id_a2 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + + # Change PAGEa2 to ERROR + self.change_backup_status(backup_dir, 'node', page_id_a2, 'ERROR') + + pgbench = node.pgbench(options=['-T', '10', '-c', '2']) + pgbench.wait() + page_id_a3 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # Change PAGEa2 to ERROR + self.change_backup_status(backup_dir, 'node', page_id_a2, 'OK') + + # Delete PAGEa1 + output = self.delete_pb( + backup_dir, 'node', page_id_a1, + options=['--dry-run', '--log-level-console=LOG', '--delete-wal']) + + print(output) + self.assertIn( + 'LOG: Backup {0} can be deleted'.format(page_id_a3), + output) + self.assertIn( + 'LOG: Backup {0} can be deleted'.format(page_id_a2), + output) + self.assertIn( + 'LOG: Backup {0} can be deleted'.format(page_id_a1), + output) + + self.assertIn( + 'INFO: Resident data size to free by ' + 'delete of backup {0} :'.format(page_id_a1), + output) + + self.assertIn( + 'On timeline 1 WAL segments between 000000010000000000000001 ' + 'and 000000010000000000000003 can be removed', + output) + + self.assertEqual(len(self.show_pb(backup_dir, 'node')), 4) + + output = self.delete_pb( + backup_dir, 'node', page_id_a1, + options=['--log-level-console=LOG', '--delete-wal']) + + self.assertIn( + 'LOG: Backup {0} will be deleted'.format(page_id_a3), + output) + self.assertIn( + 'LOG: Backup {0} will be deleted'.format(page_id_a2), + output) + self.assertIn( + 'LOG: Backup {0} will be deleted'.format(page_id_a1), + output) + self.assertIn( + 'INFO: Resident data size to free by ' + 'delete of backup {0} :'.format(page_id_a1), + output) + + self.assertIn( + 'On timeline 1 WAL segments between 000000010000000000000001 ' + 'and 000000010000000000000003 will be removed', + output) + + self.assertEqual(len(self.show_pb(backup_dir, 'node')), 1) + + self.validate_pb(backup_dir, 'node') + + def test_delete_error_backups(self): + """delete increment and all after him""" + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # full backup mode + self.backup_node(backup_dir, 'node', node) + # page backup mode + self.backup_node(backup_dir, 'node', node, backup_type="page") + + # Take FULL BACKUP + backup_id_a = self.backup_node(backup_dir, 'node', node) + # Take PAGE BACKUP + backup_id_b = self.backup_node(backup_dir, 'node', node, backup_type="page") + + backup_id_c = self.backup_node(backup_dir, 'node', node, backup_type="page") + + backup_id_d = self.backup_node(backup_dir, 'node', node, backup_type="page") + + # full backup mode + self.backup_node(backup_dir, 'node', node) + self.backup_node(backup_dir, 'node', node, backup_type="page") + backup_id_e = self.backup_node(backup_dir, 'node', node, backup_type="page") + self.backup_node(backup_dir, 'node', node, backup_type="page") + + # Change status to ERROR + self.change_backup_status(backup_dir, 'node', backup_id_a, 'ERROR') + self.change_backup_status(backup_dir, 'node', backup_id_c, 'ERROR') + self.change_backup_status(backup_dir, 'node', backup_id_e, 'ERROR') + + print(self.show_pb(backup_dir, as_text=True, as_json=False)) + + show_backups = self.show_pb(backup_dir, 'node') + self.assertEqual(len(show_backups), 10) + + # delete error backups + output = self.delete_pb(backup_dir, 'node', options=['--status=ERROR', '--dry-run']) + show_backups = self.show_pb(backup_dir, 'node') + self.assertEqual(len(show_backups), 10) + + self.assertIn( + "Deleting all backups with status 'ERROR' in dry run mode", + output) + + self.assertIn( + "INFO: Backup {0} with status OK can be deleted".format(backup_id_d), + output) + + print(self.show_pb(backup_dir, as_text=True, as_json=False)) + + show_backups = self.show_pb(backup_dir, 'node') + output = self.delete_pb(backup_dir, 'node', options=['--status=ERROR']) + print(output) + show_backups = self.show_pb(backup_dir, 'node') + self.assertEqual(len(show_backups), 4) + + self.assertEqual(show_backups[0]['status'], "OK") + self.assertEqual(show_backups[1]['status'], "OK") + self.assertEqual(show_backups[2]['status'], "OK") + self.assertEqual(show_backups[3]['status'], "OK") diff --git a/tests/delta_test.py b/tests/delta_test.py new file mode 100644 index 000000000..23583fd93 --- /dev/null +++ b/tests/delta_test.py @@ -0,0 +1,1201 @@ +import os +import unittest +from .helpers.ptrack_helpers import ProbackupTest, ProbackupException +from datetime import datetime, timedelta +from testgres import QueryException +import subprocess +import time +from threading import Thread + + +class DeltaTest(ProbackupTest, unittest.TestCase): + + # @unittest.skip("skip") + def test_basic_delta_vacuum_truncate(self): + """ + make node, create table, take full backup, + delete last 3 pages, vacuum relation, + take delta backup, take second delta backup, + restore latest delta backup and check data correctness + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node_restored.cleanup() + node.slow_start() + + node.safe_psql( + "postgres", + "create sequence t_seq; " + "create table t_heap as select i as id, " + "md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,1024) i;") + + node.safe_psql( + "postgres", + "vacuum t_heap") + + self.backup_node(backup_dir, 'node', node, options=['--stream']) + + node.safe_psql( + "postgres", + "delete from t_heap where ctid >= '(11,0)'") + + node.safe_psql( + "postgres", + "vacuum t_heap") + + self.backup_node( + backup_dir, 'node', node, backup_type='delta') + + self.backup_node( + backup_dir, 'node', node, backup_type='delta') + + pgdata = self.pgdata_content(node.data_dir) + + self.restore_node( + backup_dir, 'node', node_restored) + + # Physical comparison + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + self.set_auto_conf(node_restored, {'port': node_restored.port}) + node_restored.slow_start() + + # @unittest.skip("skip") + def test_delta_vacuum_truncate_1(self): + """ + make node, create table, take full backup, + delete last 3 pages, vacuum relation, + take delta backup, take second delta backup, + restore latest delta backup and check data correctness + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + ) + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored'), + ) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node_restored.cleanup() + node.slow_start() + self.create_tblspace_in_node(node, 'somedata') + + node.safe_psql( + "postgres", + "create sequence t_seq; " + "create table t_heap tablespace somedata as select i as id, " + "md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,1024) i;" + ) + + node.safe_psql( + "postgres", + "vacuum t_heap" + ) + + self.backup_node(backup_dir, 'node', node) + + node.safe_psql( + "postgres", + "delete from t_heap where ctid >= '(11,0)'" + ) + + node.safe_psql( + "postgres", + "vacuum t_heap" + ) + + self.backup_node( + backup_dir, 'node', node, backup_type='delta' + ) + + self.backup_node( + backup_dir, 'node', node, backup_type='delta' + ) + + pgdata = self.pgdata_content(node.data_dir) + + old_tablespace = self.get_tblspace_path(node, 'somedata') + new_tablespace = self.get_tblspace_path(node_restored, 'somedata_new') + + self.restore_node( + backup_dir, + 'node', + node_restored, + options=[ + "-T", "{0}={1}".format( + old_tablespace, new_tablespace)] + ) + + # Physical comparison + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + self.set_auto_conf(node_restored, {'port': node_restored.port}) + node_restored.slow_start() + + # @unittest.skip("skip") + def test_delta_vacuum_truncate_2(self): + """ + make node, create table, take full backup, + delete last 3 pages, vacuum relation, + take delta backup, take second delta backup, + restore latest delta backup and check data correctness + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + ) + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored'), + ) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node_restored.cleanup() + node.slow_start() + + node.safe_psql( + "postgres", + "create table t_heap as select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,10100000) i;" + ) + filepath = node.safe_psql( + "postgres", + "select pg_relation_filepath('t_heap')" + ).decode('utf-8').rstrip() + + self.backup_node(backup_dir, 'node', node) + + print(os.path.join(node.data_dir, filepath + '.1')) + os.unlink(os.path.join(node.data_dir, filepath + '.1')) + + self.backup_node( + backup_dir, 'node', node, backup_type='delta') + + self.backup_node( + backup_dir, 'node', node, backup_type='delta') + + pgdata = self.pgdata_content(node.data_dir) + + self.restore_node( + backup_dir, 'node', node_restored) + + # Physical comparison + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + self.set_auto_conf(node_restored, {'port': node_restored.port}) + node_restored.slow_start() + + # @unittest.skip("skip") + def test_delta_stream(self): + """ + make archive node, take full and delta stream backups, + restore them and check data correctness + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'checkpoint_timeout': '30s' + } + ) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # FULL BACKUP + node.safe_psql( + "postgres", + "create table t_heap as select i as id, md5(i::text) as text, " + "md5(i::text)::tsvector as tsvector " + "from generate_series(0,100) i") + + full_result = node.execute("postgres", "SELECT * FROM t_heap") + full_backup_id = self.backup_node( + backup_dir, 'node', node, + backup_type='full', options=['--stream']) + + # delta BACKUP + node.safe_psql( + "postgres", + "insert into t_heap select i as id, md5(i::text) as text, " + "md5(i::text)::tsvector as tsvector " + "from generate_series(100,200) i") + delta_result = node.execute("postgres", "SELECT * FROM t_heap") + delta_backup_id = self.backup_node( + backup_dir, 'node', node, + backup_type='delta', options=['--stream']) + + # Drop Node + node.cleanup() + + # Check full backup + self.assertIn( + "INFO: Restore of backup {0} completed.".format(full_backup_id), + self.restore_node( + backup_dir, 'node', node, + backup_id=full_backup_id, + options=[ + "-j", "4", "--immediate", + "--recovery-target-action=promote"]), + '\n Unexpected Error Message: {0}\n' + ' CMD: {1}'.format(repr(self.output), self.cmd)) + node.slow_start() + full_result_new = node.execute("postgres", "SELECT * FROM t_heap") + self.assertEqual(full_result, full_result_new) + node.cleanup() + + # Check delta backup + self.assertIn( + "INFO: Restore of backup {0} completed.".format(delta_backup_id), + self.restore_node( + backup_dir, 'node', node, + backup_id=delta_backup_id, + options=[ + "-j", "4", "--immediate", + "--recovery-target-action=promote"]), + '\n Unexpected Error Message: {0}\n' + ' CMD: {1}'.format(repr(self.output), self.cmd)) + node.slow_start() + delta_result_new = node.execute("postgres", "SELECT * FROM t_heap") + self.assertEqual(delta_result, delta_result_new) + node.cleanup() + + # @unittest.skip("skip") + def test_delta_archive(self): + """ + make archive node, take full and delta archive backups, + restore them and check data correctness + """ + self.maxDiff = None + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # FULL BACKUP + node.safe_psql( + "postgres", + "create table t_heap as select i as id, md5(i::text) as text, " + "md5(i::text)::tsvector as tsvector from generate_series(0,1) i") + full_result = node.execute("postgres", "SELECT * FROM t_heap") + full_backup_id = self.backup_node( + backup_dir, 'node', node, backup_type='full') + + # delta BACKUP + node.safe_psql( + "postgres", + "insert into t_heap select i as id, md5(i::text) as text, " + "md5(i::text)::tsvector as tsvector from generate_series(0,2) i") + delta_result = node.execute("postgres", "SELECT * FROM t_heap") + delta_backup_id = self.backup_node( + backup_dir, 'node', node, backup_type='delta') + + # Drop Node + node.cleanup() + + # Restore and check full backup + self.assertIn( + "INFO: Restore of backup {0} completed.".format(full_backup_id), + self.restore_node( + backup_dir, 'node', node, + backup_id=full_backup_id, + options=[ + "-j", "4", "--immediate", + "--recovery-target-action=promote"]), + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd)) + node.slow_start() + full_result_new = node.execute("postgres", "SELECT * FROM t_heap") + self.assertEqual(full_result, full_result_new) + node.cleanup() + + # Restore and check delta backup + self.assertIn( + "INFO: Restore of backup {0} completed.".format(delta_backup_id), + self.restore_node( + backup_dir, 'node', node, + backup_id=delta_backup_id, + options=[ + "-j", "4", "--immediate", + "--recovery-target-action=promote"]), + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd)) + node.slow_start() + delta_result_new = node.execute("postgres", "SELECT * FROM t_heap") + self.assertEqual(delta_result, delta_result_new) + node.cleanup() + + # @unittest.skip("skip") + def test_delta_multiple_segments(self): + """ + Make node, create table with multiple segments, + write some data to it, check delta and data correctness + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'fsync': 'off', + 'shared_buffers': '1GB', + 'maintenance_work_mem': '1GB', + 'full_page_writes': 'off' + } + ) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + # self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + self.create_tblspace_in_node(node, 'somedata') + + # CREATE TABLE + node.pgbench_init( + scale=100, + options=['--tablespace=somedata', '--no-vacuum']) + # FULL BACKUP + self.backup_node(backup_dir, 'node', node, options=['--stream']) + + # PGBENCH STUFF + pgbench = node.pgbench(options=['-T', '50', '-c', '1', '--no-vacuum']) + pgbench.wait() + node.safe_psql("postgres", "checkpoint") + + # GET LOGICAL CONTENT FROM NODE + result = node.safe_psql("postgres", "select count(*) from pgbench_accounts") + # delta BACKUP + self.backup_node( + backup_dir, 'node', node, + backup_type='delta', options=['--stream']) + # GET PHYSICAL CONTENT FROM NODE + pgdata = self.pgdata_content(node.data_dir) + + # RESTORE NODE + restored_node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'restored_node')) + restored_node.cleanup() + tblspc_path = self.get_tblspace_path(node, 'somedata') + tblspc_path_new = self.get_tblspace_path( + restored_node, 'somedata_restored') + + self.restore_node( + backup_dir, 'node', restored_node, + options=[ + "-j", "4", "-T", "{0}={1}".format( + tblspc_path, tblspc_path_new)]) + + # GET PHYSICAL CONTENT FROM NODE_RESTORED + pgdata_restored = self.pgdata_content(restored_node.data_dir) + + # START RESTORED NODE + self.set_auto_conf(restored_node, {'port': restored_node.port}) + restored_node.slow_start() + + result_new = restored_node.safe_psql( + "postgres", + "select count(*) from pgbench_accounts") + + # COMPARE RESTORED FILES + self.assertEqual(result, result_new, 'data is lost') + + if self.paranoia: + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + def test_delta_vacuum_full(self): + """ + make node, make full and delta stream backups, + restore them and check data correctness + """ + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored.cleanup() + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + self.create_tblspace_in_node(node, 'somedata') + + self.backup_node(backup_dir, 'node', node, options=['--stream']) + + node.safe_psql( + "postgres", + "create table t_heap tablespace somedata as select i" + " as id from generate_series(0,1000000) i" + ) + + pg_connect = node.connect("postgres", autocommit=True) + + gdb = self.gdb_attach(pg_connect.pid) + gdb.set_breakpoint('reform_and_rewrite_tuple') + + gdb.continue_execution_until_running() + + process = Thread( + target=pg_connect.execute, args=["VACUUM FULL t_heap"]) + process.start() + + while not gdb.stopped_in_breakpoint: + time.sleep(1) + + gdb.continue_execution_until_break(20) + + self.backup_node( + backup_dir, 'node', node, + backup_type='delta', options=['--stream']) + + if self.paranoia: + pgdata = self.pgdata_content(node.data_dir) + + gdb.remove_all_breakpoints() + gdb._execute('detach') + process.join() + + old_tablespace = self.get_tblspace_path(node, 'somedata') + new_tablespace = self.get_tblspace_path(node_restored, 'somedata_new') + + self.restore_node( + backup_dir, 'node', node_restored, + options=["-j", "4", "-T", "{0}={1}".format( + old_tablespace, new_tablespace)]) + + # Physical comparison + if self.paranoia: + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + self.set_auto_conf(node_restored, {'port': node_restored.port}) + + node_restored.slow_start() + + # @unittest.skip("skip") + def test_create_db(self): + """ + Make node, take full backup, create database db1, take delta backup, + restore database and check it presense + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'max_wal_size': '10GB', + } + ) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + # FULL BACKUP + node.safe_psql( + "postgres", + "create table t_heap as select i as id, md5(i::text) as text, " + "md5(i::text)::tsvector as tsvector from generate_series(0,100) i") + + node.safe_psql("postgres", "SELECT * FROM t_heap") + self.backup_node( + backup_dir, 'node', node, + options=["--stream"]) + + # CREATE DATABASE DB1 + node.safe_psql("postgres", "create database db1") + node.safe_psql( + "db1", + "create table t_heap as select i as id, md5(i::text) as text, " + "md5(i::text)::tsvector as tsvector from generate_series(0,100) i") + + # DELTA BACKUP + backup_id = self.backup_node( + backup_dir, 'node', node, + backup_type='delta', + options=["--stream"] + ) + + if self.paranoia: + pgdata = self.pgdata_content(node.data_dir) + + # RESTORE + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored') + ) + + node_restored.cleanup() + self.restore_node( + backup_dir, + 'node', + node_restored, + backup_id=backup_id, + options=[ + "-j", "4", + "--immediate", + "--recovery-target-action=promote"]) + + # COMPARE PHYSICAL CONTENT + if self.paranoia: + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # START RESTORED NODE + self.set_auto_conf(node_restored, {'port': node_restored.port}) + node_restored.slow_start() + + # DROP DATABASE DB1 + node.safe_psql( + "postgres", "drop database db1") + # SECOND DELTA BACKUP + backup_id = self.backup_node( + backup_dir, 'node', node, + backup_type='delta', options=["--stream"] + ) + + if self.paranoia: + pgdata = self.pgdata_content(node.data_dir) + + # RESTORE SECOND DELTA BACKUP + node_restored.cleanup() + self.restore_node( + backup_dir, + 'node', + node_restored, + backup_id=backup_id, + options=[ + "-j", "4", + "--immediate", + "--recovery-target-action=promote"] + ) + + # COMPARE PHYSICAL CONTENT + if self.paranoia: + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # START RESTORED NODE + self.set_auto_conf(node_restored, {'port': node_restored.port}) + node_restored.slow_start() + + try: + node_restored.safe_psql('db1', 'select 1') + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because we are connecting to deleted database" + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except QueryException as e: + self.assertTrue( + 'FATAL: database "db1" does not exist' in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + # @unittest.skip("skip") + def test_exists_in_previous_backup(self): + """ + Make node, take full backup, create table, take page backup, + take delta backup, check that file is no fully copied to delta backup + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'max_wal_size': '10GB', + 'checkpoint_timeout': '5min', + } + ) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # FULL BACKUP + node.safe_psql( + "postgres", + "create table t_heap as select i as id, md5(i::text) as text, " + "md5(i::text)::tsvector as tsvector from generate_series(0,100) i") + + node.safe_psql("postgres", "SELECT * FROM t_heap") + filepath = node.safe_psql( + "postgres", + "SELECT pg_relation_filepath('t_heap')").decode('utf-8').rstrip() + self.backup_node( + backup_dir, + 'node', + node, + options=["--stream"]) + + # PAGE BACKUP + backup_id = self.backup_node( + backup_dir, + 'node', + node, + backup_type='page' + ) + + fullpath = os.path.join( + backup_dir, 'backups', 'node', backup_id, 'database', filepath) + self.assertFalse(os.path.exists(fullpath)) + +# if self.paranoia: +# pgdata_page = self.pgdata_content( +# os.path.join( +# backup_dir, 'backups', +# 'node', backup_id, 'database')) + + # DELTA BACKUP + backup_id = self.backup_node( + backup_dir, 'node', node, + backup_type='delta', + options=["--stream"] + ) +# if self.paranoia: +# pgdata_delta = self.pgdata_content( +# os.path.join( +# backup_dir, 'backups', +# 'node', backup_id, 'database')) +# self.compare_pgdata( +# pgdata_page, pgdata_delta) + + fullpath = os.path.join( + backup_dir, 'backups', 'node', backup_id, 'database', filepath) + self.assertFalse(os.path.exists(fullpath)) + + if self.paranoia: + pgdata = self.pgdata_content(node.data_dir) + + # RESTORE + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored') + ) + + node_restored.cleanup() + self.restore_node( + backup_dir, + 'node', + node_restored, + backup_id=backup_id, + options=[ + "-j", "4", + "--immediate", + "--recovery-target-action=promote"]) + + # COMPARE PHYSICAL CONTENT + if self.paranoia: + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # START RESTORED NODE + self.set_auto_conf(node_restored, {'port': node_restored.port}) + node_restored.slow_start() + + # @unittest.skip("skip") + def test_alter_table_set_tablespace_delta(self): + """ + Make node, create tablespace with table, take full backup, + alter tablespace location, take delta backup, restore database. + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, initdb_params=['--data-checksums'], + pg_options={ + 'checkpoint_timeout': '30s', + } + ) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + # FULL BACKUP + self.create_tblspace_in_node(node, 'somedata') + node.safe_psql( + "postgres", + "create table t_heap tablespace somedata as select i as id," + " md5(i::text) as text, md5(i::text)::tsvector as tsvector" + " from generate_series(0,100) i") + + # FULL backup + self.backup_node(backup_dir, 'node', node, options=["--stream"]) + + # ALTER TABLESPACE + self.create_tblspace_in_node(node, 'somedata_new') + node.safe_psql( + "postgres", + "alter table t_heap set tablespace somedata_new") + + # DELTA BACKUP + result = node.safe_psql( + "postgres", "select * from t_heap") + self.backup_node( + backup_dir, 'node', node, + backup_type='delta', + options=["--stream"]) + + if self.paranoia: + pgdata = self.pgdata_content(node.data_dir) + + # RESTORE + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored.cleanup() + + self.restore_node( + backup_dir, 'node', node_restored, + options=[ + "-j", "4", + "-T", "{0}={1}".format( + self.get_tblspace_path(node, 'somedata'), + self.get_tblspace_path(node_restored, 'somedata') + ), + "-T", "{0}={1}".format( + self.get_tblspace_path(node, 'somedata_new'), + self.get_tblspace_path(node_restored, 'somedata_new') + ) + ] + ) + + # GET RESTORED PGDATA AND COMPARE + if self.paranoia: + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # START RESTORED NODE + self.set_auto_conf(node_restored, {'port': node_restored.port}) + node_restored.slow_start() + + result_new = node_restored.safe_psql( + "postgres", "select * from t_heap") + + self.assertEqual(result, result_new, 'lost some data after restore') + + # @unittest.skip("skip") + def test_alter_database_set_tablespace_delta(self): + """ + Make node, take full backup, create database, + take delta backup, alter database tablespace location, + take delta backup restore last delta backup. + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + ) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + self.create_tblspace_in_node(node, 'somedata') + + # FULL backup + self.backup_node(backup_dir, 'node', node, options=["--stream"]) + + # CREATE DATABASE DB1 + node.safe_psql( + "postgres", + "create database db1 tablespace = 'somedata'") + node.safe_psql( + "db1", + "create table t_heap as select i as id, md5(i::text) as text, " + "md5(i::text)::tsvector as tsvector from generate_series(0,100) i") + + # DELTA BACKUP + self.backup_node( + backup_dir, 'node', node, + backup_type='delta', + options=["--stream"] + ) + + # ALTER TABLESPACE + self.create_tblspace_in_node(node, 'somedata_new') + node.safe_psql( + "postgres", + "alter database db1 set tablespace somedata_new" + ) + + # DELTA BACKUP + self.backup_node( + backup_dir, 'node', node, + backup_type='delta', + options=["--stream"] + ) + + if self.paranoia: + pgdata = self.pgdata_content(node.data_dir) + + # RESTORE + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored') + ) + node_restored.cleanup() + + self.restore_node( + backup_dir, 'node', node_restored, + options=[ + "-j", "4", + "-T", "{0}={1}".format( + self.get_tblspace_path(node, 'somedata'), + self.get_tblspace_path(node_restored, 'somedata') + ), + "-T", "{0}={1}".format( + self.get_tblspace_path(node, 'somedata_new'), + self.get_tblspace_path(node_restored, 'somedata_new') + ) + ] + ) + + # GET RESTORED PGDATA AND COMPARE + if self.paranoia: + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # START RESTORED NODE + self.set_auto_conf(node_restored, {'port': node_restored.port}) + node_restored.slow_start() + + # @unittest.skip("skip") + def test_delta_delete(self): + """ + Make node, create tablespace with table, take full backup, + alter tablespace location, take delta backup, restore database. + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, initdb_params=['--data-checksums'], + pg_options={ + 'checkpoint_timeout': '30s', + } + ) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + self.create_tblspace_in_node(node, 'somedata') + + # FULL backup + self.backup_node(backup_dir, 'node', node, options=["--stream"]) + + node.safe_psql( + "postgres", + "create table t_heap tablespace somedata as select i as id," + " md5(i::text) as text, md5(i::text)::tsvector as tsvector" + " from generate_series(0,100) i" + ) + + node.safe_psql( + "postgres", + "delete from t_heap" + ) + + node.safe_psql( + "postgres", + "vacuum t_heap" + ) + + # DELTA BACKUP + self.backup_node( + backup_dir, 'node', node, + backup_type='delta', + options=["--stream"] + ) + + if self.paranoia: + pgdata = self.pgdata_content(node.data_dir) + + # RESTORE + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored') + ) + node_restored.cleanup() + + self.restore_node( + backup_dir, 'node', node_restored, + options=[ + "-j", "4", + "-T", "{0}={1}".format( + self.get_tblspace_path(node, 'somedata'), + self.get_tblspace_path(node_restored, 'somedata') + ) + ] + ) + + # GET RESTORED PGDATA AND COMPARE + if self.paranoia: + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # START RESTORED NODE + self.set_auto_conf(node_restored, {'port': node_restored.port}) + node_restored.slow_start() + + def test_delta_nullified_heap_page_backup(self): + """ + make node, take full backup, nullify some heap block, + take delta backup, restore, physically compare pgdata`s + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.pgbench_init(scale=1) + + file_path = node.safe_psql( + "postgres", + "select pg_relation_filepath('pgbench_accounts')").decode('utf-8').rstrip() + + node.safe_psql( + "postgres", + "CHECKPOINT") + + self.backup_node( + backup_dir, 'node', node) + + # Nullify some block in PostgreSQL + file = os.path.join(node.data_dir, file_path).replace("\\", "/") + if os.name == 'nt': + file = file.replace("\\", "/") + + with open(file, 'r+b', 0) as f: + f.seek(8192) + f.write(b"\x00"*8192) + f.flush() + f.close + + self.backup_node( + backup_dir, 'node', node, + backup_type='delta', options=["--log-level-file=verbose"]) + + if self.paranoia: + pgdata = self.pgdata_content(node.data_dir) + + if not self.remote: + log_file_path = os.path.join(backup_dir, "log", "pg_probackup.log") + with open(log_file_path) as f: + content = f.read() + + self.assertIn( + 'VERBOSE: File: "{0}" blknum 1, empty page'.format(file), + content) + self.assertNotIn( + "Skipping blknum 1 in file: {0}".format(file), + content) + + # Restore DELTA backup + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored.cleanup() + + self.restore_node( + backup_dir, 'node', node_restored) + + if self.paranoia: + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + def test_delta_backup_from_past(self): + """ + make node, take FULL stream backup, take DELTA stream backup, + restore FULL backup, try to take second DELTA stream backup + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + backup_id = self.backup_node( + backup_dir, 'node', node, options=['--stream']) + + node.pgbench_init(scale=3) + + # First DELTA + self.backup_node( + backup_dir, 'node', node, + backup_type='delta', options=['--stream']) + + # Restore FULL backup + node.cleanup() + self.restore_node(backup_dir, 'node', node, backup_id=backup_id) + node.slow_start() + + # Second DELTA backup + try: + self.backup_node( + backup_dir, 'node', node, + backup_type='delta', options=['--stream']) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because we are backing up an instance from the past" + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertTrue( + 'ERROR: Current START LSN ' in e.message and + 'is lower than START LSN ' in e.message and + 'of previous backup ' in e.message and + 'It may indicate that we are trying ' + 'to backup PostgreSQL instance from the past' in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + @unittest.skip("skip") + # @unittest.expectedFailure + def test_delta_pg_resetxlog(self): + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'shared_buffers': '512MB', + 'max_wal_size': '3GB'}) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + # Create table + node.safe_psql( + "postgres", + "create extension bloom; create sequence t_seq; " + "create table t_heap " + "as select nextval('t_seq')::int as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " +# "from generate_series(0,25600) i") + "from generate_series(0,2560) i") + + self.backup_node( + backup_dir, 'node', node, options=['--stream']) + + node.safe_psql( + 'postgres', + "update t_heap set id = nextval('t_seq'), text = md5(text), " + "tsvector = md5(repeat(tsvector::text, 10))::tsvector") + + # kill the bastard + if self.verbose: + print('Killing postmaster. Losing Ptrack changes') + node.stop(['-m', 'immediate', '-D', node.data_dir]) + + # now smack it with sledgehammer + if node.major_version >= 10: + pg_resetxlog_path = self.get_bin_path('pg_resetwal') + wal_dir = 'pg_wal' + else: + pg_resetxlog_path = self.get_bin_path('pg_resetxlog') + wal_dir = 'pg_xlog' + + self.run_binary( + [ + pg_resetxlog_path, + '-D', + node.data_dir, + '-o 42', + '-f' + ], + asynchronous=False) + + if not node.status(): + node.slow_start() + else: + print("Die! Die! Why won't you die?... Why won't you die?") + exit(1) + + # take ptrack backup +# self.backup_node( +# backup_dir, 'node', node, +# backup_type='delta', options=['--stream']) + + try: + self.backup_node( + backup_dir, 'node', node, + backup_type='delta', options=['--stream']) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because instance was brutalized by pg_resetxlog" + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd) + ) + except ProbackupException as e: + self.assertIn( + 'Insert error message', + e.message, + '\n Unexpected Error Message: {0}\n' + ' CMD: {1}'.format(repr(e.message), self.cmd)) + +# pgdata = self.pgdata_content(node.data_dir) +# +# node_restored = self.make_simple_node( +# base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) +# node_restored.cleanup() +# +# self.restore_node( +# backup_dir, 'node', node_restored) +# +# pgdata_restored = self.pgdata_content(node_restored.data_dir) +# self.compare_pgdata(pgdata, pgdata_restored) diff --git a/tests/exclude_test.py b/tests/exclude_test.py new file mode 100644 index 000000000..cb3530cd5 --- /dev/null +++ b/tests/exclude_test.py @@ -0,0 +1,338 @@ +import os +import unittest +from .helpers.ptrack_helpers import ProbackupTest, ProbackupException + + +class ExcludeTest(ProbackupTest, unittest.TestCase): + + # @unittest.skip("skip") + def test_exclude_temp_files(self): + """ + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'logging_collector': 'on', + 'log_filename': 'postgresql.log'}) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + oid = node.safe_psql( + 'postgres', + "select oid from pg_database where datname = 'postgres'").rstrip() + + pgsql_tmp_dir = os.path.join(node.data_dir, 'base', 'pgsql_tmp') + + os.mkdir(pgsql_tmp_dir) + + file = os.path.join(pgsql_tmp_dir, 'pgsql_tmp7351.16') + with open(file, 'w') as f: + f.write("HELLO") + f.flush() + f.close + + full_id = self.backup_node( + backup_dir, 'node', node, backup_type='full', options=['--stream']) + + file = os.path.join( + backup_dir, 'backups', 'node', full_id, + 'database', 'base', 'pgsql_tmp', 'pgsql_tmp7351.16') + + self.assertFalse( + os.path.exists(file), + "File must be excluded: {0}".format(file)) + + # TODO check temporary tablespaces + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_exclude_temp_tables(self): + """ + make node without archiving, create temp table, take full backup, + check that temp table not present in backup catalogue + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + with node.connect("postgres") as conn: + + conn.execute( + "create temp table test as " + "select generate_series(0,50050000)::text") + conn.commit() + + temp_schema_name = conn.execute( + "SELECT nspname FROM pg_namespace " + "WHERE oid = pg_my_temp_schema()")[0][0] + conn.commit() + + temp_toast_schema_name = "pg_toast_" + temp_schema_name.replace( + "pg_", "") + conn.commit() + + conn.execute("create index test_idx on test (generate_series)") + conn.commit() + + heap_path = conn.execute( + "select pg_relation_filepath('test')")[0][0] + conn.commit() + + index_path = conn.execute( + "select pg_relation_filepath('test_idx')")[0][0] + conn.commit() + + heap_oid = conn.execute("select 'test'::regclass::oid")[0][0] + conn.commit() + + toast_path = conn.execute( + "select pg_relation_filepath('{0}.{1}')".format( + temp_toast_schema_name, "pg_toast_" + str(heap_oid)))[0][0] + conn.commit() + + toast_idx_path = conn.execute( + "select pg_relation_filepath('{0}.{1}')".format( + temp_toast_schema_name, + "pg_toast_" + str(heap_oid) + "_index"))[0][0] + conn.commit() + + temp_table_filename = os.path.basename(heap_path) + temp_idx_filename = os.path.basename(index_path) + temp_toast_filename = os.path.basename(toast_path) + temp_idx_toast_filename = os.path.basename(toast_idx_path) + + self.backup_node( + backup_dir, 'node', node, backup_type='full', options=['--stream']) + + for root, dirs, files in os.walk(backup_dir): + for file in files: + if file in [ + temp_table_filename, temp_table_filename + ".1", + temp_idx_filename, + temp_idx_filename + ".1", + temp_toast_filename, + temp_toast_filename + ".1", + temp_idx_toast_filename, + temp_idx_toast_filename + ".1" + ]: + self.assertEqual( + 1, 0, + "Found temp table file in backup catalogue.\n " + "Filepath: {0}".format(file)) + + # @unittest.skip("skip") + def test_exclude_unlogged_tables_1(self): + """ + make node without archiving, create unlogged table, take full backup, + alter table to unlogged, take delta backup, restore delta backup, + check that PGDATA`s are physically the same + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + "shared_buffers": "10MB"}) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + conn = node.connect() + with node.connect("postgres") as conn: + + conn.execute( + "create unlogged table test as " + "select generate_series(0,5005000)::text") + conn.commit() + + conn.execute("create index test_idx on test (generate_series)") + conn.commit() + + self.backup_node( + backup_dir, 'node', node, + backup_type='full', options=['--stream']) + + node.safe_psql('postgres', "alter table test set logged") + + self.backup_node( + backup_dir, 'node', node, backup_type='delta', + options=['--stream']) + + pgdata = self.pgdata_content(node.data_dir) + + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + + node_restored.cleanup() + + self.restore_node( + backup_dir, 'node', node_restored, options=["-j", "4"]) + + # Physical comparison + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + def test_exclude_unlogged_tables_2(self): + """ + 1. make node, create unlogged, take FULL, DELTA, PAGE, + check that unlogged table files was not backed up + 2. restore FULL, DELTA, PAGE to empty db, + ensure unlogged table exist and is epmty + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + "shared_buffers": "10MB"}) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + backup_ids = [] + + for backup_type in ['full', 'delta', 'page']: + + if backup_type == 'full': + node.safe_psql( + 'postgres', + 'create unlogged table test as select generate_series(0,20050000)::text') + else: + node.safe_psql( + 'postgres', + 'insert into test select generate_series(0,20050000)::text') + + rel_path = node.execute( + 'postgres', + "select pg_relation_filepath('test')")[0][0] + + backup_id = self.backup_node( + backup_dir, 'node', node, + backup_type=backup_type, options=['--stream']) + + backup_ids.append(backup_id) + + filelist = self.get_backup_filelist( + backup_dir, 'node', backup_id) + + self.assertNotIn( + rel_path, filelist, + "Unlogged table was not excluded") + + self.assertNotIn( + rel_path + '.1', filelist, + "Unlogged table was not excluded") + + self.assertNotIn( + rel_path + '.2', filelist, + "Unlogged table was not excluded") + + self.assertNotIn( + rel_path + '.3', filelist, + "Unlogged table was not excluded") + + # ensure restoring retrieves back only empty unlogged table + for backup_id in backup_ids: + node.stop() + node.cleanup() + + self.restore_node(backup_dir, 'node', node, backup_id=backup_id) + + node.slow_start() + + self.assertEqual( + node.execute( + 'postgres', + 'select count(*) from test')[0][0], + 0) + + # @unittest.skip("skip") + def test_exclude_log_dir(self): + """ + check that by default 'log' and 'pg_log' directories are not backed up + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'logging_collector': 'on', + 'log_filename': 'postgresql.log'}) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + self.backup_node( + backup_dir, 'node', node, + backup_type='full', options=['--stream']) + + log_dir = node.safe_psql( + 'postgres', + 'show log_directory').decode('utf-8').rstrip() + + node.cleanup() + + self.restore_node( + backup_dir, 'node', node, options=["-j", "4"]) + + # check that PGDATA/log or PGDATA/pg_log do not exists + path = os.path.join(node.data_dir, log_dir) + log_file = os.path.join(path, 'postgresql.log') + self.assertTrue(os.path.exists(path)) + self.assertFalse(os.path.exists(log_file)) + + # @unittest.skip("skip") + def test_exclude_log_dir_1(self): + """ + check that "--backup-pg-log" works correctly + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'logging_collector': 'on', + 'log_filename': 'postgresql.log'}) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + log_dir = node.safe_psql( + 'postgres', + 'show log_directory').decode('utf-8').rstrip() + + self.backup_node( + backup_dir, 'node', node, + backup_type='full', options=['--stream', '--backup-pg-log']) + + node.cleanup() + + self.restore_node( + backup_dir, 'node', node, options=["-j", "4"]) + + # check that PGDATA/log or PGDATA/pg_log do not exists + path = os.path.join(node.data_dir, log_dir) + log_file = os.path.join(path, 'postgresql.log') + self.assertTrue(os.path.exists(path)) + self.assertTrue(os.path.exists(log_file)) diff --git a/tests/external_test.py b/tests/external_test.py new file mode 100644 index 000000000..53f3c5449 --- /dev/null +++ b/tests/external_test.py @@ -0,0 +1,2405 @@ +import unittest +import os +from time import sleep +from .helpers.ptrack_helpers import ProbackupTest, ProbackupException +from .helpers.cfs_helpers import find_by_name +import shutil + + +# TODO: add some ptrack tests +class ExternalTest(ProbackupTest, unittest.TestCase): + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_basic_external(self): + """ + make node, create external directory, take backup + with external directory, restore backup, check that + external directory was successfully copied + """ + core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) + shutil.rmtree(core_dir, ignore_errors=True) + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums'], + set_replication=True) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + external_dir = self.get_tblspace_path(node, 'somedirectory') + + # create directory in external_directory + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # take FULL backup with external directory pointing to a file + file_path = os.path.join(core_dir, 'file') + with open(file_path, "w+") as f: + pass + + try: + self.backup_node( + backup_dir, 'node', node, backup_type="full", + options=[ + '--external-dirs={0}'.format(file_path)]) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because external dir point to a file" + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertTrue( + 'ERROR: --external-dirs option' in e.message and + 'directory or symbolic link expected' in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + sleep(1) + + # FULL backup + self.backup_node( + backup_dir, 'node', node, backup_type="full", + options=["-j", "4", "--stream"]) + + # Fill external directories + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir, options=["-j", "4"]) + + # Full backup with external dir + self.backup_node( + backup_dir, 'node', node, + options=[ + '--external-dirs={0}'.format(external_dir)]) + + pgdata = self.pgdata_content( + node.base_dir, exclude_dirs=['logs']) + + node.cleanup() + shutil.rmtree(node.base_dir, ignore_errors=True) + + self.restore_node( + backup_dir, 'node', node, options=["-j", "4"]) + + pgdata_restored = self.pgdata_content( + node.base_dir, exclude_dirs=['logs']) + + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_external_none(self): + """ + make node, create external directory, take backup + with external directory, take delta backup with --external-dirs=none, + restore delta backup, check that + external directory was not copied + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums'], + set_replication=True) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + external_dir = self.get_tblspace_path(node, 'somedirectory') + + # create directory in external_directory + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + # FULL backup + self.backup_node( + backup_dir, 'node', node, backup_type="full", + options=["-j", "4", "--stream"]) + + # Fill external directories with data + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir, options=["-j", "4"]) + + # Full backup with external dir + self.backup_node( + backup_dir, 'node', node, + options=[ + '--stream', + '--external-dirs={0}'.format(external_dir)]) + + # Delta backup without external directory + self.backup_node( + backup_dir, 'node', node, backup_type="delta", + options=['--external-dirs=none', '--stream']) + + shutil.rmtree(external_dir, ignore_errors=True) + pgdata = self.pgdata_content( + node.base_dir, exclude_dirs=['logs']) + + node.cleanup() + shutil.rmtree(node.base_dir, ignore_errors=True) + + self.restore_node( + backup_dir, 'node', node, options=["-j", "4"]) + + pgdata_restored = self.pgdata_content( + node.base_dir, exclude_dirs=['logs']) + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_external_dirs_overlapping(self): + """ + make node, create directory, + take backup with two external directories pointing to + the same directory, backup should fail + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums'], + set_replication=True) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + external_dir1 = self.get_tblspace_path(node, 'external_dir1') + external_dir2 = self.get_tblspace_path(node, 'external_dir2') + + # create directory in external_directory + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + os.mkdir(external_dir1) + os.mkdir(external_dir2) + + # Full backup with external dirs + try: + self.backup_node( + backup_dir, 'node', node, + options=[ + "-j", "4", "--stream", + "-E", "{0}{1}{2}{1}{0}".format( + external_dir1, + self.EXTERNAL_DIRECTORY_DELIMITER, + external_dir2, + self.EXTERNAL_DIRECTORY_DELIMITER, + external_dir1)]) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because tablespace mapping is incorrect" + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertTrue( + 'ERROR: External directory path (-E option)' in e.message and + 'contain another external directory' in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + # @unittest.skip("skip") + def test_external_dir_mapping(self): + """ + make node, take full backup, check that restore with + external-dir mapping will end with error, take page backup, + check that restore with external-dir mapping will end with + success + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + self.backup_node( + backup_dir, 'node', node, backup_type="full", + options=["-j", "4", "--stream"]) + + external_dir1 = self.get_tblspace_path(node, 'external_dir1') + external_dir2 = self.get_tblspace_path(node, 'external_dir2') + + # Fill external directories with data + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir1, options=["-j", "4"]) + + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir2, options=["-j", "4"]) + + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored.cleanup() + + external_dir1_new = self.get_tblspace_path(node_restored, 'external_dir1') + external_dir2_new = self.get_tblspace_path(node_restored, 'external_dir2') + + try: + self.restore_node( + backup_dir, 'node', node_restored, + options=[ + "-j", "4", + "--external-mapping={0}={1}".format( + external_dir1, external_dir1_new), + "--external-mapping={0}={1}".format( + external_dir2, external_dir2_new)]) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because tablespace mapping is incorrect" + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertTrue( + 'ERROR: --external-mapping option' in e.message and + 'have an entry in list of external directories' in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.backup_node( + backup_dir, 'node', node, backup_type="delta", + options=[ + "-j", "4", "--stream", + "-E", "{0}{1}{2}".format( + external_dir1, + self.EXTERNAL_DIRECTORY_DELIMITER, + external_dir2)]) + + pgdata = self.pgdata_content( + node.base_dir, exclude_dirs=['logs']) + + self.restore_node( + backup_dir, 'node', node_restored, + options=[ + "-j", "4", + "--external-mapping={0}={1}".format( + external_dir1, external_dir1_new), + "--external-mapping={0}={1}".format( + external_dir2, external_dir2_new)]) + + pgdata_restored = self.pgdata_content( + node_restored.base_dir, exclude_dirs=['logs']) + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_backup_multiple_external(self): + """check that cmdline has priority over config""" + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + external_dir1 = self.get_tblspace_path(node, 'external_dir1') + external_dir2 = self.get_tblspace_path(node, 'external_dir2') + + # FULL backup + self.backup_node( + backup_dir, 'node', node, backup_type="full", + options=["-j", "4", "--stream"]) + + # fill external directories with data + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir1, options=["-j", "4"]) + + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir2, options=["-j", "4"]) + + self.set_config( + backup_dir, 'node', + options=['-E', external_dir1]) + + # cmdline option MUST override options in config + self.backup_node( + backup_dir, 'node', node, backup_type="delta", + options=[ + "-j", "4", "--stream", + "-E", external_dir2]) + + pgdata = self.pgdata_content( + node.base_dir, exclude_dirs=['logs', 'external_dir1']) + + node.cleanup() + shutil.rmtree(external_dir1, ignore_errors=True) + shutil.rmtree(external_dir2, ignore_errors=True) + + self.restore_node( + backup_dir, 'node', node, + options=["-j", "4"]) + + pgdata_restored = self.pgdata_content( + node.base_dir, exclude_dirs=['logs']) + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.expectedFailure + # @unittest.skip("skip") + def test_external_backward_compatibility(self): + """ + take backup with old binary without external dirs support + take delta backup with new binary and 2 external directories + restore delta backup, check that incremental chain + restored correctly + """ + if not self.probackup_old_path: + self.skipTest("You must specify PGPROBACKUPBIN_OLD" + " for run this test") + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir, old_binary=True) + self.show_pb(backup_dir) + + self.add_instance(backup_dir, 'node', node, old_binary=True) + self.show_pb(backup_dir) + + node.slow_start() + + node.pgbench_init(scale=3) + + # FULL backup with old binary without external dirs support + self.backup_node( + backup_dir, 'node', node, + old_binary=True, options=["-j", "4", "--stream"]) + + external_dir1 = self.get_tblspace_path(node, 'external_dir1') + external_dir2 = self.get_tblspace_path(node, 'external_dir2') + + # fill external directories with data + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir1, options=["-j", "4"]) + + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir2, options=["-j", "4"]) + + pgbench = node.pgbench(options=['-T', '30', '-c', '1', '--no-vacuum']) + pgbench.wait() + + # FULL backup + backup_id = self.backup_node( + backup_dir, 'node', node, + old_binary=True, options=["-j", "4", "--stream"]) + + # fill external directories with changed data + shutil.rmtree(external_dir1, ignore_errors=True) + shutil.rmtree(external_dir2, ignore_errors=True) + + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir1, options=["-j", "4"]) + + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir2, options=["-j", "4"]) + + self.delete_pb(backup_dir, 'node', backup_id=backup_id) + + # delta backup with external directories using new binary + self.backup_node( + backup_dir, 'node', node, backup_type="delta", + options=[ + "-j", "4", "--stream", + "-E", "{0}{1}{2}".format( + external_dir1, + self.EXTERNAL_DIRECTORY_DELIMITER, + external_dir2)]) + + pgdata = self.pgdata_content( + node.base_dir, exclude_dirs=['logs']) + + # RESTORE chain with new binary + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + + node_restored.cleanup() + + external_dir1_new = self.get_tblspace_path(node_restored, 'external_dir1') + external_dir2_new = self.get_tblspace_path(node_restored, 'external_dir2') + + self.restore_node( + backup_dir, 'node', node_restored, + options=[ + "-j", "4", + "--external-mapping={0}={1}".format(external_dir1, external_dir1_new), + "--external-mapping={0}={1}".format(external_dir2, external_dir2_new)]) + + pgdata_restored = self.pgdata_content( + node_restored.base_dir, exclude_dirs=['logs']) + + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.expectedFailure + # @unittest.skip("skip") + def test_external_backward_compatibility_merge_1(self): + """ + take backup with old binary without external dirs support + take delta backup with new binary and 2 external directories + merge delta backup ajd restore it + """ + if not self.probackup_old_path: + self.skipTest("You must specify PGPROBACKUPBIN_OLD" + " for run this test") + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir, old_binary=True) + self.show_pb(backup_dir) + + self.add_instance(backup_dir, 'node', node, old_binary=True) + self.show_pb(backup_dir) + + node.slow_start() + + node.pgbench_init(scale=3) + + # tmp FULL backup with old binary + tmp_id = self.backup_node( + backup_dir, 'node', node, + old_binary=True, options=["-j", "4", "--stream"]) + + external_dir1 = self.get_tblspace_path(node, 'external_dir1') + external_dir2 = self.get_tblspace_path(node, 'external_dir2') + + # fill external directories with data + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir1, options=["-j", "4"]) + + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir2, options=["-j", "4"]) + + self.delete_pb(backup_dir, 'node', backup_id=tmp_id) + + # FULL backup with old binary without external dirs support + self.backup_node( + backup_dir, 'node', node, + old_binary=True, options=["-j", "4", "--stream"]) + + pgbench = node.pgbench(options=['-T', '30', '-c', '1']) + pgbench.wait() + + # delta backup with external directories using new binary + backup_id = self.backup_node( + backup_dir, 'node', node, backup_type="delta", + options=[ + "-j", "4", "--stream", + "-E", "{0}{1}{2}".format( + external_dir1, + self.EXTERNAL_DIRECTORY_DELIMITER, + external_dir2)]) + + pgdata = self.pgdata_content( + node.base_dir, exclude_dirs=['logs']) + + # Merge chain chain with new binary + self.merge_backup(backup_dir, 'node', backup_id=backup_id) + + # Restore merged backup + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + + node_restored.cleanup() + + external_dir1_new = self.get_tblspace_path(node_restored, 'external_dir1') + external_dir2_new = self.get_tblspace_path(node_restored, 'external_dir2') + + self.restore_node( + backup_dir, 'node', node_restored, + options=[ + "-j", "4", + "--external-mapping={0}={1}".format(external_dir1, external_dir1_new), + "--external-mapping={0}={1}".format(external_dir2, external_dir2_new)]) + + pgdata_restored = self.pgdata_content( + node_restored.base_dir, exclude_dirs=['logs']) + + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.expectedFailure + # @unittest.skip("skip") + def test_external_backward_compatibility_merge_2(self): + """ + take backup with old binary without external dirs support + take delta backup with new binary and 2 external directories + merge delta backup and restore it + """ + if not self.probackup_old_path: + self.skipTest("You must specify PGPROBACKUPBIN_OLD" + " for run this test") + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir, old_binary=True) + self.show_pb(backup_dir) + + self.add_instance(backup_dir, 'node', node, old_binary=True) + self.show_pb(backup_dir) + + node.slow_start() + + node.pgbench_init(scale=3) + + # tmp FULL backup with old binary + tmp_id = self.backup_node( + backup_dir, 'node', node, + old_binary=True, options=["-j", "4", "--stream"]) + + external_dir1 = self.get_tblspace_path(node, 'external_dir1') + external_dir2 = self.get_tblspace_path(node, 'external_dir2') + + # fill external directories with data + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir1, options=["-j", "4"]) + + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir2, options=["-j", "4"]) + + self.delete_pb(backup_dir, 'node', backup_id=tmp_id) + + # FULL backup with old binary without external dirs support + self.backup_node( + backup_dir, 'node', node, + old_binary=True, options=["-j", "4", "--stream"]) + + pgbench = node.pgbench(options=['-T', '30', '-c', '1']) + pgbench.wait() + + # delta backup with external directories using new binary + self.backup_node( + backup_dir, 'node', node, + backup_type="delta", + options=[ + "-j", "4", "--stream", + "-E", "{0}{1}{2}".format( + external_dir1, + self.EXTERNAL_DIRECTORY_DELIMITER, + external_dir2)]) + + pgbench = node.pgbench(options=['-T', '30', '-c', '1']) + pgbench.wait() + + # Fill external dirs with changed data + shutil.rmtree(external_dir1, ignore_errors=True) + shutil.rmtree(external_dir2, ignore_errors=True) + + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir1, + options=['-j', '4', '--skip-external-dirs']) + + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir2, + options=['-j', '4', '--skip-external-dirs']) + + # delta backup without external directories using old binary + backup_id = self.backup_node( + backup_dir, 'node', node, + backup_type="delta", + options=[ + "-j", "4", "--stream", + "-E", "{0}{1}{2}".format( + external_dir1, + self.EXTERNAL_DIRECTORY_DELIMITER, + external_dir2)]) + + pgdata = self.pgdata_content( + node.base_dir, exclude_dirs=['logs']) + + # Merge chain using new binary + self.merge_backup(backup_dir, 'node', backup_id=backup_id) + + # Restore merged backup + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + + node_restored.cleanup() + + external_dir1_new = self.get_tblspace_path( + node_restored, 'external_dir1') + external_dir2_new = self.get_tblspace_path( + node_restored, 'external_dir2') + + self.restore_node( + backup_dir, 'node', node_restored, + options=[ + "-j", "4", + "--external-mapping={0}={1}".format( + external_dir1, external_dir1_new), + "--external-mapping={0}={1}".format( + external_dir2, external_dir2_new)]) + + pgdata_restored = self.pgdata_content( + node_restored.base_dir, exclude_dirs=['logs']) + + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.expectedFailure + # @unittest.skip("skip") + def test_external_merge(self): + """""" + if not self.probackup_old_path: + self.skipTest("You must specify PGPROBACKUPBIN_OLD" + " for run this test") + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node, old_binary=True) + node.slow_start() + + node.pgbench_init(scale=3) + + # take temp FULL backup + tmp_id = self.backup_node( + backup_dir, 'node', node, options=["-j", "4", "--stream"]) + + external_dir1 = self.get_tblspace_path(node, 'external_dir1') + external_dir2 = self.get_tblspace_path(node, 'external_dir2') + + # fill external directories with data + self.restore_node( + backup_dir, 'node', node, backup_id=tmp_id, + data_dir=external_dir1, options=["-j", "4"]) + + self.restore_node( + backup_dir, 'node', node, backup_id=tmp_id, + data_dir=external_dir2, options=["-j", "4"]) + + self.delete_pb(backup_dir, 'node', backup_id=tmp_id) + + # FULL backup with old binary without external dirs support + self.backup_node( + backup_dir, 'node', node, + old_binary=True, options=["-j", "4", "--stream"]) + + # change data a bit + pgbench = node.pgbench(options=['-T', '30', '-c', '1', '--no-vacuum']) + pgbench.wait() + + # delta backup with external directories using new binary + backup_id = self.backup_node( + backup_dir, 'node', node, backup_type="delta", + options=[ + "-j", "4", "--stream", + "-E", "{0}{1}{2}".format( + external_dir1, + self.EXTERNAL_DIRECTORY_DELIMITER, + external_dir2)]) + + pgdata = self.pgdata_content( + node.base_dir, exclude_dirs=['logs']) + + print(self.show_pb(backup_dir, 'node', as_json=False, as_text=True)) + + # Merge + print(self.merge_backup(backup_dir, 'node', backup_id=backup_id, + options=['--log-level-file=VERBOSE'])) + + # RESTORE + node.cleanup() + shutil.rmtree(node.base_dir, ignore_errors=True) + + external_dir1_new = self.get_tblspace_path(node, 'external_dir1') + external_dir2_new = self.get_tblspace_path(node, 'external_dir2') + + self.restore_node( + backup_dir, 'node', node, + options=[ + "-j", "4", + "--external-mapping={0}={1}".format( + external_dir1, external_dir1_new), + "--external-mapping={0}={1}".format( + external_dir2, external_dir2_new)]) + + pgdata_restored = self.pgdata_content( + node.base_dir, exclude_dirs=['logs']) + + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.expectedFailure + # @unittest.skip("skip") + def test_external_merge_skip_external_dirs(self): + """""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + node.pgbench_init(scale=3) + + # FULL backup with old data + tmp_id = self.backup_node( + backup_dir, 'node', node, options=["-j", "4", "--stream"]) + + external_dir1 = self.get_tblspace_path(node, 'external_dir1') + external_dir2 = self.get_tblspace_path(node, 'external_dir2') + + # fill external directories with old data + self.restore_node( + backup_dir, 'node', node, backup_id=tmp_id, + data_dir=external_dir1, options=["-j", "4"]) + + self.restore_node( + backup_dir, 'node', node, backup_id=tmp_id, + data_dir=external_dir2, options=["-j", "4"]) + + self.delete_pb(backup_dir, 'node', backup_id=tmp_id) + + # change data a bit + pgbench = node.pgbench(options=['-T', '30', '-c', '1', '--no-vacuum']) + pgbench.wait() + + # FULL backup with external directories + self.backup_node( + backup_dir, 'node', node, + options=[ + "-j", "4", "--stream", + "-E", "{0}{1}{2}".format( + external_dir1, + self.EXTERNAL_DIRECTORY_DELIMITER, + external_dir2)]) + + # drop old external data + shutil.rmtree(external_dir1, ignore_errors=True) + shutil.rmtree(external_dir2, ignore_errors=True) + + # fill external directories with new data + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir1, + options=["-j", "4", "--skip-external-dirs"]) + + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir2, + options=["-j", "4", "--skip-external-dirs"]) + + # DELTA backup with external directories + backup_id = self.backup_node( + backup_dir, 'node', node, backup_type="delta", + options=[ + "-j", "4", "--stream", + "-E", "{0}{1}{2}".format( + external_dir1, + self.EXTERNAL_DIRECTORY_DELIMITER, + external_dir2)]) + + pgdata = self.pgdata_content( + node.base_dir, exclude_dirs=['logs']) + + # merge backups without external directories + self.merge_backup( + backup_dir, 'node', + backup_id=backup_id, options=['--skip-external-dirs']) + + # RESTORE + node.cleanup() + shutil.rmtree(node.base_dir, ignore_errors=True) + + self.restore_node( + backup_dir, 'node', node, + options=["-j", "4"]) + + pgdata_restored = self.pgdata_content( + node.base_dir, exclude_dirs=['logs']) + + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.expectedFailure + # @unittest.skip("skip") + def test_external_merge_1(self): + """""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + node.pgbench_init(scale=3) + + # FULL backup + self.backup_node( + backup_dir, 'node', node, options=["-j", "4", "--stream"]) + + external_dir1 = self.get_tblspace_path(node, 'external_dir1') + external_dir2 = self.get_tblspace_path(node, 'external_dir2') + + pgbench = node.pgbench(options=['-T', '30', '-c', '1', '--no-vacuum']) + pgbench.wait() + + # FULL backup with changed data + backup_id = self.backup_node( + backup_dir, 'node', node, + options=["-j", "4", "--stream"]) + + # fill external directories with changed data + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir1, options=["-j", "4"]) + + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir2, options=["-j", "4"]) + + self.delete_pb(backup_dir, 'node', backup_id=backup_id) + + # delta backup with external directories using new binary + backup_id = self.backup_node( + backup_dir, 'node', node, backup_type="delta", + options=[ + "-j", "4", "--stream", + "-E", "{0}{1}{2}".format( + external_dir1, + self.EXTERNAL_DIRECTORY_DELIMITER, + external_dir2)]) + + self.merge_backup(backup_dir, 'node', backup_id=backup_id) + + pgdata = self.pgdata_content( + node.base_dir, exclude_dirs=['logs']) + + # RESTORE + node.cleanup() + shutil.rmtree(node.base_dir, ignore_errors=True) + + external_dir1_new = self.get_tblspace_path(node, 'external_dir1') + external_dir2_new = self.get_tblspace_path(node, 'external_dir2') + + self.restore_node( + backup_dir, 'node', node, + options=[ + "-j", "4", + "--external-mapping={0}={1}".format(external_dir1, external_dir1_new), + "--external-mapping={0}={1}".format(external_dir2, external_dir2_new)]) + + pgdata_restored = self.pgdata_content( + node.base_dir, exclude_dirs=['logs']) + + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.expectedFailure + # @unittest.skip("skip") + def test_external_merge_3(self): + """""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.pgbench_init(scale=3) + + # FULL backup + self.backup_node(backup_dir, 'node', node, options=["-j", "4"]) + + external_dir1 = self.get_tblspace_path(node, 'external_dir1') + external_dir2 = self.get_tblspace_path(node, 'external_dir2') + + pgbench = node.pgbench(options=['-T', '30', '-c', '1', '--no-vacuum']) + pgbench.wait() + + # FULL backup + backup_id = self.backup_node( + backup_dir, 'node', node) + + # fill external directories with changed data + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir1) + + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir2) + + self.delete_pb(backup_dir, 'node', backup_id=backup_id) + + # page backup with external directories + self.backup_node( + backup_dir, 'node', node, backup_type="page", + options=[ + "-j", "4", + "-E", "{0}{1}{2}".format( + external_dir1, + self.EXTERNAL_DIRECTORY_DELIMITER, + external_dir2)]) + + # page backup with external directories + backup_id = self.backup_node( + backup_dir, 'node', node, backup_type="page", + options=[ + "-j", "4", + "-E", "{0}{1}{2}".format( + external_dir1, + self.EXTERNAL_DIRECTORY_DELIMITER, + external_dir2)]) + + pgdata = self.pgdata_content( + node.base_dir, exclude_dirs=['logs']) + + self.merge_backup( + backup_dir, 'node', backup_id=backup_id, + options=['--log-level-file=verbose']) + + # RESTORE + node.cleanup() + shutil.rmtree(node.base_dir, ignore_errors=True) + + external_dir1_new = self.get_tblspace_path(node, 'external_dir1') + external_dir2_new = self.get_tblspace_path(node, 'external_dir2') + + self.restore_node( + backup_dir, 'node', node, + options=[ + "-j", "4", + "--external-mapping={0}={1}".format( + external_dir1, external_dir1_new), + "--external-mapping={0}={1}".format( + external_dir2, external_dir2_new)]) + + pgdata_restored = self.pgdata_content( + node.base_dir, exclude_dirs=['logs']) + + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.expectedFailure + # @unittest.skip("skip") + def test_external_merge_2(self): + """""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + node.pgbench_init(scale=3) + + # FULL backup + self.backup_node( + backup_dir, 'node', node, options=["-j", "4", "--stream"]) + + external_dir1 = self.get_tblspace_path(node, 'external_dir1') + external_dir2 = self.get_tblspace_path(node, 'external_dir2') + + pgbench = node.pgbench(options=['-T', '30', '-c', '1', '--no-vacuum']) + pgbench.wait() + + # FULL backup + backup_id = self.backup_node( + backup_dir, 'node', node, + options=["-j", "4", "--stream"]) + + # fill external directories with changed data + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir1, options=["-j", "4"]) + + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir2, options=["-j", "4"]) + + self.delete_pb(backup_dir, 'node', backup_id=backup_id) + + # delta backup with external directories + self.backup_node( + backup_dir, 'node', node, backup_type="delta", + options=[ + "-j", "4", "--stream", + "-E", "{0}{1}{2}".format( + external_dir1, + self.EXTERNAL_DIRECTORY_DELIMITER, + external_dir2)]) + + # delta backup with external directories + backup_id = self.backup_node( + backup_dir, 'node', node, backup_type="delta", + options=[ + "-j", "4", "--stream", + "-E", "{0}{1}{2}".format( + external_dir1, + self.EXTERNAL_DIRECTORY_DELIMITER, + external_dir2)]) + + pgdata = self.pgdata_content( + node.base_dir, exclude_dirs=['logs']) + + shutil.rmtree(external_dir1, ignore_errors=True) + shutil.rmtree(external_dir2, ignore_errors=True) + + # delta backup without external directories + self.merge_backup(backup_dir, 'node', backup_id=backup_id) + + # RESTORE + node.cleanup() + shutil.rmtree(node.base_dir, ignore_errors=True) + + external_dir1_new = self.get_tblspace_path(node, 'external_dir1') + external_dir2_new = self.get_tblspace_path(node, 'external_dir2') + + self.restore_node( + backup_dir, 'node', node, + options=[ + "-j", "4", + "--external-mapping={0}={1}".format(external_dir1, external_dir1_new), + "--external-mapping={0}={1}".format(external_dir2, external_dir2_new)]) + + pgdata_restored = self.pgdata_content( + node.base_dir, exclude_dirs=['logs']) + + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.expectedFailure + # @unittest.skip("skip") + def test_restore_external_changed_data(self): + """""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + node.pgbench_init(scale=2) + + # set externals + external_dir1 = self.get_tblspace_path(node, 'external_dir1') + external_dir2 = self.get_tblspace_path(node, 'external_dir2') + + # FULL backup + tmp_id = self.backup_node( + backup_dir, 'node', + node, options=["-j", "4", "--stream"]) + + # fill external directories with data + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir1, options=["-j", "4"]) + + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir2, options=["-j", "4"]) + + self.delete_pb(backup_dir, 'node', backup_id=tmp_id) + + # change data a bit + pgbench = node.pgbench(options=['-T', '30', '-c', '1', '--no-vacuum']) + pgbench.wait() + + # FULL backup + backup_id = self.backup_node( + backup_dir, 'node', node, + options=[ + "-j", "4", "--stream", + "-E", "{0}{1}{2}".format( + external_dir1, + self.EXTERNAL_DIRECTORY_DELIMITER, + external_dir2)]) + + # fill external directories with changed data + shutil.rmtree(external_dir1, ignore_errors=True) + shutil.rmtree(external_dir2, ignore_errors=True) + + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir1, backup_id=backup_id, + options=["-j", "4", "--skip-external-dirs"]) + + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir2, backup_id=backup_id, + options=["-j", "4", "--skip-external-dirs"]) + + # change data a bit more + pgbench = node.pgbench(options=['-T', '30', '-c', '1', '--no-vacuum']) + pgbench.wait() + + # Delta backup with external directories + self.backup_node( + backup_dir, 'node', node, backup_type="delta", + options=[ + "-j", "4", "--stream", + "-E", "{0}{1}{2}".format( + external_dir1, + self.EXTERNAL_DIRECTORY_DELIMITER, + external_dir2)]) + + pgdata = self.pgdata_content( + node.base_dir, exclude_dirs=['logs']) + + # Restore + node.cleanup() + shutil.rmtree(node.base_dir, ignore_errors=True) + + self.restore_node( + backup_dir, 'node', node, + options=["-j", "4"]) + + pgdata_restored = self.pgdata_content( + node.base_dir, exclude_dirs=['logs']) + + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.expectedFailure + # @unittest.skip("skip") + def test_restore_external_changed_data_1(self): + """""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'max_wal_size': '32MB'}) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + node.pgbench_init(scale=1) + + # set externals + external_dir1 = self.get_tblspace_path(node, 'external_dir1') + external_dir2 = self.get_tblspace_path(node, 'external_dir2') + + # FULL backup + tmp_id = self.backup_node( + backup_dir, 'node', + node, options=["-j", "4", "--stream"]) + + # fill external directories with data + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir1, options=["-j", "4"]) + + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir2, options=["-j", "4"]) + + self.delete_pb(backup_dir, 'node', backup_id=tmp_id) + + # change data a bit + pgbench = node.pgbench(options=['-T', '5', '-c', '1', '--no-vacuum']) + pgbench.wait() + + # FULL backup + backup_id = self.backup_node( + backup_dir, 'node', node, + options=[ + "-j", "4", "--stream", + "-E", "{0}{1}{2}".format( + external_dir1, + self.EXTERNAL_DIRECTORY_DELIMITER, + external_dir2)]) + + # fill external directories with changed data + shutil.rmtree(external_dir1, ignore_errors=True) + shutil.rmtree(external_dir2, ignore_errors=True) + + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir1, backup_id=backup_id, + options=["-j", "4", "--skip-external-dirs"]) + + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir2, backup_id=backup_id, + options=["-j", "4", "--skip-external-dirs"]) + + # change data a bit more + pgbench = node.pgbench(options=['-T', '30', '-c', '1', '--no-vacuum']) + pgbench.wait() + + # Delta backup with only one external directory + self.backup_node( + backup_dir, 'node', node, backup_type="delta", + options=[ + "-j", "4", "--stream", + "-E", external_dir1]) + + pgdata = self.pgdata_content( + node.base_dir, exclude_dirs=['logs', 'external_dir2']) + + # Restore + node.cleanup() + shutil.rmtree(node._base_dir) + + # create empty file in external_dir2 + os.mkdir(node._base_dir) + os.mkdir(external_dir2) + with open(os.path.join(external_dir2, 'file'), 'w+') as f: + f.close() + + output = self.restore_node( + backup_dir, 'node', node, + options=["-j", "4"]) + + self.assertNotIn( + 'externaldir2', + output) + + pgdata_restored = self.pgdata_content( + node.base_dir, exclude_dirs=['logs', 'external_dir2']) + + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.expectedFailure + # @unittest.skip("skip") + def test_merge_external_changed_data(self): + """""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'max_wal_size': '32MB'}) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + node.pgbench_init(scale=2) + + # set externals + external_dir1 = self.get_tblspace_path(node, 'external_dir1') + external_dir2 = self.get_tblspace_path(node, 'external_dir2') + + # FULL backup + tmp_id = self.backup_node( + backup_dir, 'node', + node, options=["-j", "4", "--stream"]) + + # fill external directories with data + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir1, options=["-j", "4"]) + + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir2, options=["-j", "4"]) + + self.delete_pb(backup_dir, 'node', backup_id=tmp_id) + + # change data a bit + pgbench = node.pgbench(options=['-T', '30', '-c', '1', '--no-vacuum']) + pgbench.wait() + + # FULL backup + backup_id = self.backup_node( + backup_dir, 'node', node, + options=[ + "-j", "4", "--stream", + "-E", "{0}{1}{2}".format( + external_dir1, + self.EXTERNAL_DIRECTORY_DELIMITER, + external_dir2)]) + + # fill external directories with changed data + shutil.rmtree(external_dir1, ignore_errors=True) + shutil.rmtree(external_dir2, ignore_errors=True) + + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir1, backup_id=backup_id, + options=["-j", "4", "--skip-external-dirs"]) + + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir2, backup_id=backup_id, + options=["-j", "4", "--skip-external-dirs"]) + + # change data a bit more + pgbench = node.pgbench(options=['-T', '30', '-c', '1', '--no-vacuum']) + pgbench.wait() + + # Delta backup with external directories + backup_id = self.backup_node( + backup_dir, 'node', node, backup_type="delta", + options=[ + "-j", "4", "--stream", + "-E", "{0}{1}{2}".format( + external_dir1, + self.EXTERNAL_DIRECTORY_DELIMITER, + external_dir2)]) + + pgdata = self.pgdata_content( + node.base_dir, exclude_dirs=['logs']) + + # Merge + self.merge_backup(backup_dir, 'node', backup_id) + + # Restore + node.cleanup() + shutil.rmtree(node.base_dir, ignore_errors=True) + + self.restore_node( + backup_dir, 'node', node, + options=["-j", "4"]) + + pgdata_restored = self.pgdata_content( + node.base_dir, exclude_dirs=['logs']) + + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.expectedFailure + # @unittest.skip("skip") + def test_restore_skip_external(self): + """ + Check that --skip-external-dirs works correctly + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + external_dir1 = self.get_tblspace_path(node, 'external_dir1') + external_dir2 = self.get_tblspace_path(node, 'external_dir2') + + # temp FULL backup + backup_id = self.backup_node( + backup_dir, 'node', node, options=["-j", "4", "--stream"]) + + # fill external directories with data + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir1, options=["-j", "4"]) + + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir2, options=["-j", "4"]) + + self.delete_pb(backup_dir, 'node', backup_id=backup_id) + + # FULL backup with external directories + self.backup_node( + backup_dir, 'node', node, + options=[ + "-j", "4", "--stream", + "-E", "{0}{1}{2}".format( + external_dir1, + self.EXTERNAL_DIRECTORY_DELIMITER, + external_dir2)]) + + # delete first externals, so pgdata_compare + # will be capable of detecting redundant + # external files after restore + shutil.rmtree(external_dir1, ignore_errors=True) + shutil.rmtree(external_dir2, ignore_errors=True) + + pgdata = self.pgdata_content( + node.base_dir, exclude_dirs=['logs']) + + # RESTORE + node.cleanup() + shutil.rmtree(node.base_dir, ignore_errors=True) + + self.restore_node( + backup_dir, 'node', node, + options=[ + "-j", "4", "--skip-external-dirs"]) + + pgdata_restored = self.pgdata_content( + node.base_dir, exclude_dirs=['logs']) + + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.expectedFailure + # @unittest.skip("skip") + def test_external_dir_is_symlink(self): + """ + Check that backup works correctly if external dir is symlink, + symlink pointing to external dir should be followed, + but restored as directory + """ + if os.name == 'nt': + self.skipTest('Skipped for Windows') + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) + shutil.rmtree(core_dir, ignore_errors=True) + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + external_dir = self.get_tblspace_path(node, 'external_dir') + + # temp FULL backup + backup_id = self.backup_node( + backup_dir, 'node', node, options=["-j", "4", "--stream"]) + + # fill some directory with data + core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) + symlinked_dir = os.path.join(core_dir, 'symlinked') + + self.restore_node( + backup_dir, 'node', node, + data_dir=symlinked_dir, options=["-j", "4"]) + + # drop temp FULL backup + self.delete_pb(backup_dir, 'node', backup_id=backup_id) + + # create symlink to directory in external directory + os.symlink(symlinked_dir, external_dir) + + # FULL backup with external directories + backup_id = self.backup_node( + backup_dir, 'node', node, + options=[ + "-j", "4", "--stream", + "-E", external_dir]) + + pgdata = self.pgdata_content( + node.base_dir, exclude_dirs=['logs']) + + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + + # RESTORE + node_restored.cleanup() + + external_dir_new = self.get_tblspace_path( + node_restored, 'external_dir') + + self.restore_node( + backup_dir, 'node', node_restored, + options=[ + "-j", "4", "--external-mapping={0}={1}".format( + external_dir, external_dir_new)]) + + pgdata_restored = self.pgdata_content( + node_restored.base_dir, exclude_dirs=['logs']) + + self.compare_pgdata(pgdata, pgdata_restored) + + self.assertEqual( + external_dir, + self.show_pb( + backup_dir, 'node', + backup_id=backup_id)['external-dirs']) + + # @unittest.expectedFailure + # @unittest.skip("skip") + def test_external_dir_contain_symlink_on_dir(self): + """ + Check that backup works correctly if external dir is symlink, + symlink pointing to external dir should be followed, + but restored as directory + """ + if os.name == 'nt': + self.skipTest('Skipped for Windows') + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) + shutil.rmtree(core_dir, ignore_errors=True) + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + external_dir = self.get_tblspace_path(node, 'external_dir') + dir_in_external_dir = os.path.join(external_dir, 'dir') + + # temp FULL backup + backup_id = self.backup_node( + backup_dir, 'node', node, options=["-j", "4", "--stream"]) + + # fill some directory with data + core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) + symlinked_dir = os.path.join(core_dir, 'symlinked') + + self.restore_node( + backup_dir, 'node', node, + data_dir=symlinked_dir, options=["-j", "4"]) + + # drop temp FULL backup + self.delete_pb(backup_dir, 'node', backup_id=backup_id) + + # create symlink to directory in external directory + os.mkdir(external_dir) + os.symlink(symlinked_dir, dir_in_external_dir) + + # FULL backup with external directories + backup_id = self.backup_node( + backup_dir, 'node', node, + options=[ + "-j", "4", "--stream", + "-E", external_dir]) + + pgdata = self.pgdata_content( + node.base_dir, exclude_dirs=['logs']) + + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + + # RESTORE + node_restored.cleanup() + + external_dir_new = self.get_tblspace_path( + node_restored, 'external_dir') + + self.restore_node( + backup_dir, 'node', node_restored, + options=[ + "-j", "4", "--external-mapping={0}={1}".format( + external_dir, external_dir_new)]) + + pgdata_restored = self.pgdata_content( + node_restored.base_dir, exclude_dirs=['logs']) + + self.compare_pgdata(pgdata, pgdata_restored) + + self.assertEqual( + external_dir, + self.show_pb( + backup_dir, 'node', + backup_id=backup_id)['external-dirs']) + + # @unittest.expectedFailure + # @unittest.skip("skip") + def test_external_dir_contain_symlink_on_file(self): + """ + Check that backup works correctly if external dir is symlink, + symlink pointing to external dir should be followed, + but restored as directory + """ + if os.name == 'nt': + self.skipTest('Skipped for Windows') + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) + shutil.rmtree(core_dir, ignore_errors=True) + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + external_dir = self.get_tblspace_path(node, 'external_dir') + file_in_external_dir = os.path.join(external_dir, 'file') + + # temp FULL backup + backup_id = self.backup_node( + backup_dir, 'node', node, options=["-j", "4", "--stream"]) + + # fill some directory with data + core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) + symlinked_dir = os.path.join(core_dir, 'symlinked') + + self.restore_node( + backup_dir, 'node', node, + data_dir=symlinked_dir, options=["-j", "4"]) + + # drop temp FULL backup + self.delete_pb(backup_dir, 'node', backup_id=backup_id) + + # create symlink to directory in external directory + src_file = os.path.join(symlinked_dir, 'postgresql.conf') + os.mkdir(external_dir) + os.chmod(external_dir, 0o0700) + os.symlink(src_file, file_in_external_dir) + + # FULL backup with external directories + backup_id = self.backup_node( + backup_dir, 'node', node, + options=[ + "-j", "4", "--stream", + "-E", external_dir]) + + pgdata = self.pgdata_content( + node.base_dir, exclude_dirs=['logs']) + + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + + # RESTORE + node_restored.cleanup() + + external_dir_new = self.get_tblspace_path( + node_restored, 'external_dir') + + self.restore_node( + backup_dir, 'node', node_restored, + options=[ + "-j", "4", "--external-mapping={0}={1}".format( + external_dir, external_dir_new)]) + + pgdata_restored = self.pgdata_content( + node_restored.base_dir, exclude_dirs=['logs']) + + self.compare_pgdata(pgdata, pgdata_restored) + + self.assertEqual( + external_dir, + self.show_pb( + backup_dir, 'node', + backup_id=backup_id)['external-dirs']) + + # @unittest.expectedFailure + # @unittest.skip("skip") + def test_external_dir_is_tablespace(self): + """ + Check that backup fails with error + if external directory points to tablespace + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) + shutil.rmtree(core_dir, ignore_errors=True) + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + external_dir = self.get_tblspace_path(node, 'external_dir') + + self.create_tblspace_in_node( + node, 'tblspace1', tblspc_path=external_dir) + + node.pgbench_init(scale=1, tablespace='tblspace1') + + # FULL backup with external directories + try: + backup_id = self.backup_node( + backup_dir, 'node', node, + options=[ + "-j", "4", "--stream", + "-E", external_dir]) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because external dir points to the tablespace" + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'External directory path (-E option)', + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + def test_restore_external_dir_not_empty(self): + """ + Check that backup fails with error + if external directory point to not empty tablespace and + if remapped directory also isn`t empty + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) + shutil.rmtree(core_dir, ignore_errors=True) + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + external_dir = self.get_tblspace_path(node, 'external_dir') + + # create empty file in external directory + # open(os.path.join(external_dir, 'file'), 'a').close() + os.mkdir(external_dir) + with open(os.path.join(external_dir, 'file'), 'w+') as f: + f.close() + + # FULL backup with external directory + self.backup_node( + backup_dir, 'node', node, + options=[ + "-j", "4", "--stream", + "-E", external_dir]) + + node.cleanup() + + try: + self.restore_node(backup_dir, 'node', node) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because external dir is not empty" + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'External directory is not empty', + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + external_dir_new = self.get_tblspace_path(node, 'external_dir_new') + + # create empty file in directory, which will be a target of + # remapping + os.mkdir(external_dir_new) + with open(os.path.join(external_dir_new, 'file1'), 'w+') as f: + f.close() + + try: + self.restore_node( + backup_dir, 'node', node, + options=['--external-mapping={0}={1}'.format( + external_dir, external_dir_new)]) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because remapped external dir is not empty" + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'External directory is not empty', + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + def test_restore_external_dir_is_missing(self): + """ + take FULL backup with not empty external directory + delete external directory + take DELTA backup with external directory, which + should fail + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) + shutil.rmtree(core_dir, ignore_errors=True) + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + external_dir = self.get_tblspace_path(node, 'external_dir') + + # create empty file in external directory + # open(os.path.join(external_dir, 'file'), 'a').close() + os.mkdir(external_dir) + with open(os.path.join(external_dir, 'file'), 'w+') as f: + f.close() + + # FULL backup with external directory + self.backup_node( + backup_dir, 'node', node, + options=[ + "-j", "4", "--stream", + "-E", external_dir]) + + # drop external directory + shutil.rmtree(external_dir, ignore_errors=True) + + try: + self.backup_node( + backup_dir, 'node', node, + backup_type='delta', + options=[ + "-j", "4", "--stream", + "-E", external_dir]) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because external dir is missing" + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: External directory is not found:', + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + sleep(1) + + # take DELTA without external directories + self.backup_node( + backup_dir, 'node', node, + backup_type='delta', + options=["-j", "4", "--stream"]) + + pgdata = self.pgdata_content( + node.base_dir, exclude_dirs=['logs']) + + # Restore Delta backup + node.cleanup() + shutil.rmtree(node.base_dir, ignore_errors=True) + + self.restore_node(backup_dir, 'node', node) + + pgdata_restored = self.pgdata_content( + node.base_dir, exclude_dirs=['logs']) + self.compare_pgdata(pgdata, pgdata_restored) + + def test_merge_external_dir_is_missing(self): + """ + take FULL backup with not empty external directory + delete external directory + take DELTA backup with external directory, which + should fail, + take DELTA backup without external directory, + merge it into FULL, restore and check + data correctness + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) + shutil.rmtree(core_dir, ignore_errors=True) + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + external_dir = self.get_tblspace_path(node, 'external_dir') + + # create empty file in external directory + # open(os.path.join(external_dir, 'file'), 'a').close() + os.mkdir(external_dir) + with open(os.path.join(external_dir, 'file'), 'w+') as f: + f.close() + + # FULL backup with external directory + self.backup_node( + backup_dir, 'node', node, + options=[ + "-j", "4", "--stream", + "-E", external_dir]) + + # drop external directory + shutil.rmtree(external_dir, ignore_errors=True) + + try: + self.backup_node( + backup_dir, 'node', node, + backup_type='delta', + options=[ + "-j", "4", "--stream", + "-E", external_dir]) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because external dir is missing" + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: External directory is not found:', + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + sleep(1) + + # take DELTA without external directories + backup_id = self.backup_node( + backup_dir, 'node', node, + backup_type='delta', + options=["-j", "4", "--stream"]) + + pgdata = self.pgdata_content( + node.base_dir, exclude_dirs=['logs']) + + # Merge + self.merge_backup(backup_dir, 'node', backup_id=backup_id) + + # Restore + node.cleanup() + shutil.rmtree(node.base_dir, ignore_errors=True) + + self.restore_node(backup_dir, 'node', node) + + pgdata_restored = self.pgdata_content( + node.base_dir, exclude_dirs=['logs']) + self.compare_pgdata(pgdata, pgdata_restored) + + def test_restore_external_dir_is_empty(self): + """ + take FULL backup with not empty external directory + drop external directory content + take DELTA backup with the same external directory + restore DELRA backup, check that restored + external directory is empty + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) + shutil.rmtree(core_dir, ignore_errors=True) + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + external_dir = self.get_tblspace_path(node, 'external_dir') + + # create empty file in external directory + # open(os.path.join(external_dir, 'file'), 'a').close() + os.mkdir(external_dir) + os.chmod(external_dir, 0o0700) + with open(os.path.join(external_dir, 'file'), 'w+') as f: + f.close() + + # FULL backup with external directory + self.backup_node( + backup_dir, 'node', node, + options=[ + "-j", "4", "--stream", + "-E", external_dir]) + + # make external directory empty + os.remove(os.path.join(external_dir, 'file')) + + # take DELTA backup with empty external directory + self.backup_node( + backup_dir, 'node', node, + backup_type='delta', + options=[ + "-j", "4", "--stream", + "-E", external_dir]) + + pgdata = self.pgdata_content( + node.base_dir, exclude_dirs=['logs']) + + # Restore Delta backup + node.cleanup() + shutil.rmtree(node.base_dir, ignore_errors=True) + + self.restore_node(backup_dir, 'node', node) + + pgdata_restored = self.pgdata_content( + node.base_dir, exclude_dirs=['logs']) + self.compare_pgdata(pgdata, pgdata_restored) + + def test_merge_external_dir_is_empty(self): + """ + take FULL backup with not empty external directory + drop external directory content + take DELTA backup with the same external directory + merge backups and restore FULL, check that restored + external directory is empty + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) + shutil.rmtree(core_dir, ignore_errors=True) + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + external_dir = self.get_tblspace_path(node, 'external_dir') + + # create empty file in external directory + # open(os.path.join(external_dir, 'file'), 'a').close() + os.mkdir(external_dir) + os.chmod(external_dir, 0o0700) + with open(os.path.join(external_dir, 'file'), 'w+') as f: + f.close() + + # FULL backup with external directory + self.backup_node( + backup_dir, 'node', node, + options=[ + "-j", "4", "--stream", + "-E", external_dir]) + + # make external directory empty + os.remove(os.path.join(external_dir, 'file')) + + # take DELTA backup with empty external directory + backup_id = self.backup_node( + backup_dir, 'node', node, + backup_type='delta', + options=[ + "-j", "4", "--stream", + "-E", external_dir]) + + pgdata = self.pgdata_content( + node.base_dir, exclude_dirs=['logs']) + + # Merge + self.merge_backup(backup_dir, 'node', backup_id=backup_id) + + # Restore + node.cleanup() + shutil.rmtree(node.base_dir, ignore_errors=True) + + self.restore_node(backup_dir, 'node', node) + + pgdata_restored = self.pgdata_content( + node.base_dir, exclude_dirs=['logs']) + self.compare_pgdata(pgdata, pgdata_restored) + + def test_restore_external_dir_string_order(self): + """ + take FULL backup with not empty external directory + drop external directory content + take DELTA backup with the same external directory + restore DELRA backup, check that restored + external directory is empty + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) + shutil.rmtree(core_dir, ignore_errors=True) + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + external_dir_1 = self.get_tblspace_path(node, 'external_dir_1') + external_dir_2 = self.get_tblspace_path(node, 'external_dir_2') + + # create empty file in external directory + os.mkdir(external_dir_1) + os.chmod(external_dir_1, 0o0700) + with open(os.path.join(external_dir_1, 'fileA'), 'w+') as f: + f.close() + + os.mkdir(external_dir_2) + os.chmod(external_dir_2, 0o0700) + with open(os.path.join(external_dir_2, 'fileZ'), 'w+') as f: + f.close() + + # FULL backup with external directory + self.backup_node( + backup_dir, 'node', node, + options=[ + "-j", "4", "--stream", + "-E", "{0}{1}{2}".format( + external_dir_1, + self.EXTERNAL_DIRECTORY_DELIMITER, + external_dir_2)]) + + with open(os.path.join(external_dir_1, 'fileB'), 'w+') as f: + f.close() + + with open(os.path.join(external_dir_2, 'fileY'), 'w+') as f: + f.close() + + # take DELTA backup and swap external_dir_2 and external_dir_1 + # in external_dir_str + self.backup_node( + backup_dir, 'node', node, + backup_type='delta', + options=[ + "-j", "4", "--stream", + "-E", "{0}{1}{2}".format( + external_dir_2, + self.EXTERNAL_DIRECTORY_DELIMITER, + external_dir_1)]) + + pgdata = self.pgdata_content( + node.base_dir, exclude_dirs=['logs']) + + # Restore Delta backup + node.cleanup() + shutil.rmtree(node.base_dir, ignore_errors=True) + + self.restore_node(backup_dir, 'node', node) + + pgdata_restored = self.pgdata_content( + node.base_dir, exclude_dirs=['logs']) + + self.compare_pgdata(pgdata, pgdata_restored) + + def test_merge_external_dir_string_order(self): + """ + take FULL backup with not empty external directory + drop external directory content + take DELTA backup with the same external directory + restore DELRA backup, check that restored + external directory is empty + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) + shutil.rmtree(core_dir, ignore_errors=True) + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + external_dir_1 = self.get_tblspace_path(node, 'external_dir_1') + external_dir_2 = self.get_tblspace_path(node, 'external_dir_2') + + # create empty file in external directory + os.mkdir(external_dir_1) + os.chmod(external_dir_1, 0o0700) + with open(os.path.join(external_dir_1, 'fileA'), 'w+') as f: + f.close() + + os.mkdir(external_dir_2) + os.chmod(external_dir_2, 0o0700) + with open(os.path.join(external_dir_2, 'fileZ'), 'w+') as f: + f.close() + + # FULL backup with external directory + self.backup_node( + backup_dir, 'node', node, + options=[ + "-j", "4", "--stream", + "-E", "{0}{1}{2}".format( + external_dir_1, + self.EXTERNAL_DIRECTORY_DELIMITER, + external_dir_2)]) + + with open(os.path.join(external_dir_1, 'fileB'), 'w+') as f: + f.close() + + with open(os.path.join(external_dir_2, 'fileY'), 'w+') as f: + f.close() + + # take DELTA backup and swap external_dir_2 and external_dir_1 + # in external_dir_str + backup_id = self.backup_node( + backup_dir, 'node', node, + backup_type='delta', + options=[ + "-j", "4", "--stream", + "-E", "{0}{1}{2}".format( + external_dir_2, + self.EXTERNAL_DIRECTORY_DELIMITER, + external_dir_1)]) + + pgdata = self.pgdata_content( + node.base_dir, exclude_dirs=['logs']) + + # Merge backups + self.merge_backup(backup_dir, 'node', backup_id=backup_id) + + # Restore + node.cleanup() + shutil.rmtree(node.base_dir, ignore_errors=True) + + self.restore_node(backup_dir, 'node', node) + + pgdata_restored = self.pgdata_content( + node.base_dir, exclude_dirs=['logs']) + + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + def test_smart_restore_externals(self): + """ + make node, create database, take full backup with externals, + take incremental backup without externals and restore it, + make sure that files from externals are not copied during restore + https://github.com/postgrespro/pg_probackup/issues/63 + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # fill external directories with data + tmp_id = self.backup_node(backup_dir, 'node', node) + + external_dir_1 = self.get_tblspace_path(node, 'external_dir_1') + external_dir_2 = self.get_tblspace_path(node, 'external_dir_2') + + self.restore_node( + backup_dir, 'node', node, backup_id=tmp_id, + data_dir=external_dir_1, options=["-j", "4"]) + + self.restore_node( + backup_dir, 'node', node, backup_id=tmp_id, + data_dir=external_dir_2, options=["-j", "4"]) + + self.delete_pb(backup_dir, 'node', backup_id=tmp_id) + + # create database + node.safe_psql( + "postgres", + "CREATE DATABASE testdb") + + # take FULL backup + full_id = self.backup_node(backup_dir, 'node', node) + + # drop database + node.safe_psql( + "postgres", + "DROP DATABASE testdb") + + # take PAGE backup + page_id = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # restore PAGE backup + node.cleanup() + self.restore_node( + backup_dir, 'node', node, backup_id=page_id, + options=['--no-validate', '--log-level-file=VERBOSE']) + + logfile = os.path.join(backup_dir, 'log', 'pg_probackup.log') + with open(logfile, 'r') as f: + logfile_content = f.read() + + # get delta between FULL and PAGE filelists + filelist_full = self.get_backup_filelist( + backup_dir, 'node', full_id) + + filelist_page = self.get_backup_filelist( + backup_dir, 'node', page_id) + + filelist_diff = self.get_backup_filelist_diff( + filelist_full, filelist_page) + + for file in filelist_diff: + self.assertNotIn(file, logfile_content) + + # @unittest.skip("skip") + def test_external_validation(self): + """ + make node, create database, + take full backup with external directory, + corrupt external file in backup, + run validate which should fail + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + # take temp FULL backup + tmp_id = self.backup_node( + backup_dir, 'node', node, options=['--stream']) + + external_dir = self.get_tblspace_path(node, 'external_dir') + + # fill external directories with data + self.restore_node( + backup_dir, 'node', node, backup_id=tmp_id, + data_dir=external_dir, options=["-j", "4"]) + + self.delete_pb(backup_dir, 'node', backup_id=tmp_id) + + # take FULL backup + full_id = self.backup_node( + backup_dir, 'node', node, + options=[ + '--stream', '-E', "{0}".format(external_dir)]) + + # Corrupt file + file = os.path.join( + backup_dir, 'backups', 'node', full_id, + 'external_directories', 'externaldir1', 'postgresql.auto.conf') + + with open(file, "r+b", 0) as f: + f.seek(42) + f.write(b"blah") + f.flush() + f.close + + try: + self.validate_pb(backup_dir) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because file in external dir is corrupted" + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'WARNING: Invalid CRC of backup file', + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.assertEqual( + 'CORRUPT', + self.show_pb(backup_dir, 'node', full_id)['status'], + 'Backup STATUS should be "CORRUPT"') diff --git a/tests/false_positive_test.py b/tests/false_positive_test.py new file mode 100644 index 000000000..fbb785c60 --- /dev/null +++ b/tests/false_positive_test.py @@ -0,0 +1,337 @@ +import unittest +import os +from time import sleep + +from .helpers.ptrack_helpers import ProbackupTest, ProbackupException +from datetime import datetime, timedelta +import subprocess + + +class FalsePositive(ProbackupTest, unittest.TestCase): + + # @unittest.skip("skip") + @unittest.expectedFailure + def test_validate_wal_lost_segment(self): + """ + Loose segment located between backups. ExpectedFailure. This is BUG + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + self.backup_node(backup_dir, 'node', node) + + # make some wals + node.pgbench_init(scale=5) + + # delete last wal segment + wals_dir = os.path.join(backup_dir, "wal", 'node') + wals = [f for f in os.listdir(wals_dir) if os.path.isfile( + os.path.join(wals_dir, f)) and not f.endswith('.backup')] + wals = map(int, wals) + os.remove(os.path.join(wals_dir, '0000000' + str(max(wals)))) + + # We just lost a wal segment and know nothing about it + self.backup_node(backup_dir, 'node', node) + self.assertTrue( + 'validation completed successfully' in self.validate_pb( + backup_dir, 'node')) + ######## + + @unittest.expectedFailure + # Need to force validation of ancestor-chain + def test_incremental_backup_corrupt_full_1(self): + """page-level backup with corrupted full backup""" + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + backup_id = self.backup_node(backup_dir, 'node', node) + file = os.path.join( + backup_dir, "backups", "node", + backup_id.decode("utf-8"), "database", "postgresql.conf") + os.remove(file) + + try: + self.backup_node(backup_dir, 'node', node, backup_type="page") + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because page backup should not be " + "possible without valid full backup.\n " + "Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertEqual( + e.message, + 'ERROR: Valid full backup on current timeline is not found. ' + 'Create new FULL backup before an incremental one.\n', + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + self.assertFalse( + True, + "Expecting Error because page backup should not be " + "possible without valid full backup.\n " + "Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertEqual( + e.message, + 'ERROR: Valid full backup on current timeline is not found. ' + 'Create new FULL backup before an incremental one.\n', + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.assertEqual( + self.show_pb(backup_dir, 'node')[0]['Status'], "ERROR") + + # @unittest.skip("skip") + @unittest.expectedFailure + def test_pg_10_waldir(self): + """ + test group access for PG >= 11 + """ + if self.pg_config_version < self.version_to_num('10.0'): + self.skipTest('You need PostgreSQL >= 10 for this test') + + wal_dir = os.path.join( + os.path.join(self.tmp_path, self.module_name, self.fname), 'wal_dir') + import shutil + shutil.rmtree(wal_dir, ignore_errors=True) + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=[ + '--data-checksums', + '--waldir={0}'.format(wal_dir)]) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + # take FULL backup + self.backup_node( + backup_dir, 'node', node, options=['--stream']) + + pgdata = self.pgdata_content(node.data_dir) + + # restore backup + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored.cleanup() + + self.restore_node( + backup_dir, 'node', node_restored) + + # compare pgdata permissions + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + self.assertTrue( + os.path.islink(os.path.join(node_restored.data_dir, 'pg_wal')), + 'pg_wal should be symlink') + + @unittest.expectedFailure + # @unittest.skip("skip") + def test_recovery_target_time_backup_victim(self): + """ + Check that for validation to recovery target + probackup chooses valid backup + https://github.com/postgrespro/pg_probackup/issues/104 + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # FULL backup + self.backup_node(backup_dir, 'node', node) + + node.safe_psql( + "postgres", + "create table t_heap as select 1 as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,10000) i") + + target_time = node.safe_psql( + "postgres", + "select now()").rstrip() + + node.safe_psql( + "postgres", + "create table t_heap1 as select 1 as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,100) i") + + gdb = self.backup_node(backup_dir, 'node', node, gdb=True) + + # Attention! This breakpoint is set to a probackup internal fuction, not a postgres core one + gdb.set_breakpoint('pg_stop_backup') + gdb.run_until_break() + gdb.remove_all_breakpoints() + gdb._execute('signal SIGINT') + gdb.continue_execution_until_error() + + backup_id = self.show_pb(backup_dir, 'node')[1]['id'] + + self.assertEqual( + 'ERROR', + self.show_pb(backup_dir, 'node', backup_id)['status'], + 'Backup STATUS should be "ERROR"') + + self.validate_pb( + backup_dir, 'node', + options=['--recovery-target-time={0}'.format(target_time)]) + + @unittest.expectedFailure + # @unittest.skip("skip") + def test_recovery_target_lsn_backup_victim(self): + """ + Check that for validation to recovery target + probackup chooses valid backup + https://github.com/postgrespro/pg_probackup/issues/104 + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # FULL backup + self.backup_node(backup_dir, 'node', node) + + node.safe_psql( + "postgres", + "create table t_heap as select 1 as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,10000) i") + + node.safe_psql( + "postgres", + "create table t_heap1 as select 1 as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,100) i") + + gdb = self.backup_node( + backup_dir, 'node', node, + options=['--log-level-console=LOG'], gdb=True) + + # Attention! This breakpoint is set to a probackup internal fuction, not a postgres core one + gdb.set_breakpoint('pg_stop_backup') + gdb.run_until_break() + gdb.remove_all_breakpoints() + gdb._execute('signal SIGINT') + gdb.continue_execution_until_error() + + backup_id = self.show_pb(backup_dir, 'node')[1]['id'] + + self.assertEqual( + 'ERROR', + self.show_pb(backup_dir, 'node', backup_id)['status'], + 'Backup STATUS should be "ERROR"') + + self.switch_wal_segment(node) + + target_lsn = self.show_pb(backup_dir, 'node', backup_id)['start-lsn'] + + self.validate_pb( + backup_dir, 'node', + options=['--recovery-target-lsn={0}'.format(target_lsn)]) + + # @unittest.skip("skip") + @unittest.expectedFailure + def test_streaming_timeout(self): + """ + Illustrate the problem of loosing exact error + message because our WAL streaming engine is "borrowed" + from pg_receivexlog + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'checkpoint_timeout': '1h', + 'wal_sender_timeout': '5s'}) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + # FULL backup + gdb = self.backup_node( + backup_dir, 'node', node, gdb=True, + options=['--stream', '--log-level-file=LOG']) + + # Attention! This breakpoint is set to a probackup internal fuction, not a postgres core one + gdb.set_breakpoint('pg_stop_backup') + gdb.run_until_break() + + sleep(10) + gdb.continue_execution_until_error() + gdb._execute('detach') + sleep(2) + + log_file_path = os.path.join(backup_dir, 'log', 'pg_probackup.log') + with open(log_file_path) as f: + log_content = f.read() + + self.assertIn( + 'could not receive data from WAL stream', + log_content) + + self.assertIn( + 'ERROR: Problem in receivexlog', + log_content) + + # @unittest.skip("skip") + @unittest.expectedFailure + def test_validate_all_empty_catalog(self): + """ + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + + try: + self.validate_pb(backup_dir) + self.assertEqual( + 1, 0, + "Expecting Error because backup_dir is empty.\n " + "Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: This backup catalog contains no backup instances', + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) diff --git a/tests/incr_restore_test.py b/tests/incr_restore_test.py new file mode 100644 index 000000000..613e4dd36 --- /dev/null +++ b/tests/incr_restore_test.py @@ -0,0 +1,2300 @@ +import os +import unittest +from .helpers.ptrack_helpers import ProbackupTest, ProbackupException +import subprocess +from datetime import datetime +import sys +from time import sleep +from datetime import datetime, timedelta +import hashlib +import shutil +import json +from testgres import QueryException + + +class IncrRestoreTest(ProbackupTest, unittest.TestCase): + + # @unittest.skip("skip") + def test_basic_incr_restore(self): + """incremental restore in CHECKSUM mode""" + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.pgbench_init(scale=50) + + self.backup_node(backup_dir, 'node', node) + + pgbench = node.pgbench( + stdout=subprocess.PIPE, stderr=subprocess.STDOUT, + options=['-T', '10', '-c', '1', '--no-vacuum']) + pgbench.wait() + pgbench.stdout.close() + + self.backup_node(backup_dir, 'node', node, backup_type='page') + + pgbench = node.pgbench( + stdout=subprocess.PIPE, stderr=subprocess.STDOUT, + options=['-T', '10', '-c', '1']) + pgbench.wait() + pgbench.stdout.close() + + self.backup_node(backup_dir, 'node', node, backup_type='page') + + pgbench = node.pgbench( + stdout=subprocess.PIPE, stderr=subprocess.STDOUT, + options=['-T', '10', '-c', '1', '--no-vacuum']) + pgbench.wait() + pgbench.stdout.close() + + backup_id = self.backup_node(backup_dir, 'node', node, backup_type='page') + + pgdata = self.pgdata_content(node.data_dir) + + pgbench = node.pgbench( + stdout=subprocess.PIPE, stderr=subprocess.STDOUT, + options=['-T', '10', '-c', '1', '--no-vacuum']) + pgbench.wait() + pgbench.stdout.close() + + node.stop() + + self.restore_node( + backup_dir, 'node', node, + options=["-j", "4", "--incremental-mode=checksum"]) + + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + def test_basic_incr_restore_into_missing_directory(self): + """""" + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.pgbench_init(scale=10) + + self.backup_node(backup_dir, 'node', node) + + pgbench = node.pgbench( + stdout=subprocess.PIPE, stderr=subprocess.STDOUT, + options=['-T', '10', '-c', '1', '--no-vacuum']) + pgbench.wait() + pgbench.stdout.close() + + self.backup_node(backup_dir, 'node', node, backup_type='page') + + pgbench = node.pgbench( + stdout=subprocess.PIPE, stderr=subprocess.STDOUT, + options=['-T', '10', '-c', '1']) + pgbench.wait() + pgbench.stdout.close() + + self.backup_node(backup_dir, 'node', node, backup_type='page') + + pgdata = self.pgdata_content(node.data_dir) + + node.cleanup() + + self.restore_node( + backup_dir, 'node', node, + options=["-j", "4", "--incremental-mode=checksum"]) + + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + def test_checksum_corruption_detection(self): + """ + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.pgbench_init(scale=10) + + self.backup_node(backup_dir, 'node', node) + + pgbench = node.pgbench( + stdout=subprocess.PIPE, stderr=subprocess.STDOUT, + options=['-T', '10', '-c', '1', '--no-vacuum']) + pgbench.wait() + pgbench.stdout.close() + + self.backup_node(backup_dir, 'node', node, backup_type='page') + + pgbench = node.pgbench( + stdout=subprocess.PIPE, stderr=subprocess.STDOUT, + options=['-T', '10', '-c', '1']) + pgbench.wait() + pgbench.stdout.close() + + self.backup_node(backup_dir, 'node', node, backup_type='page') + + pgbench = node.pgbench( + stdout=subprocess.PIPE, stderr=subprocess.STDOUT, + options=['-T', '10', '-c', '1', '--no-vacuum']) + pgbench.wait() + pgbench.stdout.close() + + backup_id = self.backup_node(backup_dir, 'node', node, backup_type='page') + + pgdata = self.pgdata_content(node.data_dir) + + node.stop() + + self.restore_node( + backup_dir, 'node', node, + options=["-j", "4", "--incremental-mode=lsn"]) + + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + def test_incr_restore_with_tablespace(self): + """ + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + self.backup_node(backup_dir, 'node', node, options=['--stream']) + + tblspace = self.get_tblspace_path(node, 'tblspace') + some_directory = self.get_tblspace_path(node, 'some_directory') + + # stuff new destination with garbage + self.restore_node(backup_dir, 'node', node, data_dir=some_directory) + + self.create_tblspace_in_node(node, 'tblspace') + node.pgbench_init(scale=10, tablespace='tblspace') + + self.backup_node(backup_dir, 'node', node, options=['--stream']) + pgdata = self.pgdata_content(node.data_dir) + + node.stop() + + self.restore_node( + backup_dir, 'node', node, + options=[ + "-j", "4", "--incremental-mode=checksum", "--force", + "-T{0}={1}".format(tblspace, some_directory)]) + + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + def test_incr_restore_with_tablespace_1(self): + """recovery to target timeline""" + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums'], + set_replication=True) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + self.backup_node(backup_dir, 'node', node, options=['--stream']) + + tblspace = self.get_tblspace_path(node, 'tblspace') + some_directory = self.get_tblspace_path(node, 'some_directory') + + self.restore_node(backup_dir, 'node', node, data_dir=some_directory) + + self.create_tblspace_in_node(node, 'tblspace') + node.pgbench_init(scale=10, tablespace='tblspace') + + self.backup_node(backup_dir, 'node', node, options=['--stream']) + + pgbench = node.pgbench( + stdout=subprocess.PIPE, stderr=subprocess.STDOUT, + options=['-T', '10', '-c', '1', '--no-vacuum']) + pgbench.wait() + pgbench.stdout.close() + + self.backup_node( + backup_dir, 'node', node, backup_type='delta', options=['--stream']) + + pgbench = node.pgbench( + stdout=subprocess.PIPE, stderr=subprocess.STDOUT, + options=['-T', '10', '-c', '1', '--no-vacuum']) + pgbench.wait() + pgbench.stdout.close() + + self.backup_node( + backup_dir, 'node', node, backup_type='delta', options=['--stream']) + + pgdata = self.pgdata_content(node.data_dir) + + node.stop() + + self.restore_node( + backup_dir, 'node', node, + options=["-j", "4", "--incremental-mode=checksum"]) + + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + def test_incr_restore_with_tablespace_2(self): + """ + If "--tablespace-mapping" option is used with incremental restore, + then new directory must be empty. + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums'], + set_replication=True) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + self.backup_node(backup_dir, 'node', node, options=['--stream']) + + node_1 = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_1')) + + # fill node1 with data + out = self.restore_node( + backup_dir, 'node', node, + data_dir=node_1.data_dir, + options=['--incremental-mode=checksum', '--force']) + + self.assertIn("WARNING: Backup catalog was initialized for system id", out) + + tblspace = self.get_tblspace_path(node, 'tblspace') + self.create_tblspace_in_node(node, 'tblspace') + node.pgbench_init(scale=5, tablespace='tblspace') + + node.safe_psql( + 'postgres', + 'vacuum') + + self.backup_node(backup_dir, 'node', node, backup_type='delta', options=['--stream']) + + pgdata = self.pgdata_content(node.data_dir) + + try: + self.restore_node( + backup_dir, 'node', node, + data_dir=node_1.data_dir, + options=['--incremental-mode=checksum', '-T{0}={1}'.format(tblspace, tblspace)]) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because remapped directory is not empty.\n " + "Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: Remapped tablespace destination is not empty', + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + out = self.restore_node( + backup_dir, 'node', node, + data_dir=node_1.data_dir, + options=[ + '--force', '--incremental-mode=checksum', + '-T{0}={1}'.format(tblspace, tblspace)]) + + pgdata_restored = self.pgdata_content(node_1.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + def test_incr_restore_with_tablespace_3(self): + """ + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + self.create_tblspace_in_node(node, 'tblspace1') + node.pgbench_init(scale=10, tablespace='tblspace1') + + # take backup with tblspace1 + self.backup_node(backup_dir, 'node', node, options=['--stream']) + pgdata = self.pgdata_content(node.data_dir) + + self.drop_tblspace(node, 'tblspace1') + + self.create_tblspace_in_node(node, 'tblspace2') + node.pgbench_init(scale=10, tablespace='tblspace2') + + node.stop() + + self.restore_node( + backup_dir, 'node', node, + options=[ + "-j", "4", + "--incremental-mode=checksum"]) + + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + def test_incr_restore_with_tablespace_4(self): + """ + Check that system ID mismatch is detected, + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + self.create_tblspace_in_node(node, 'tblspace1') + node.pgbench_init(scale=10, tablespace='tblspace1') + + # take backup of node1 with tblspace1 + self.backup_node(backup_dir, 'node', node, options=['--stream']) + pgdata = self.pgdata_content(node.data_dir) + + self.drop_tblspace(node, 'tblspace1') + node.cleanup() + + # recreate node + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + node.slow_start() + + self.create_tblspace_in_node(node, 'tblspace1') + node.pgbench_init(scale=10, tablespace='tblspace1') + node.stop() + + try: + self.restore_node( + backup_dir, 'node', node, + options=[ + "-j", "4", + "--incremental-mode=checksum"]) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because destination directory has wrong system id.\n " + "Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'WARNING: Backup catalog was initialized for system id', + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + self.assertIn( + 'ERROR: Incremental restore is not allowed', + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + out = self.restore_node( + backup_dir, 'node', node, + options=[ + "-j", "4", "--force", + "--incremental-mode=checksum"]) + + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.expectedFailure + @unittest.skip("skip") + def test_incr_restore_with_tablespace_5(self): + """ + More complicated case, we restore backup + with tablespace, which we remap into directory + with some old content, that belongs to an instance + with different system id. + """ + node1 = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node1'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node1) + node1.slow_start() + + self.create_tblspace_in_node(node1, 'tblspace') + node1.pgbench_init(scale=10, tablespace='tblspace') + + # take backup of node1 with tblspace + self.backup_node(backup_dir, 'node', node1, options=['--stream']) + pgdata = self.pgdata_content(node1.data_dir) + + node1.stop() + + # recreate node + node2 = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node2'), + set_replication=True, + initdb_params=['--data-checksums']) + node2.slow_start() + + self.create_tblspace_in_node(node2, 'tblspace') + node2.pgbench_init(scale=10, tablespace='tblspace') + node2.stop() + + tblspc1_path = self.get_tblspace_path(node1, 'tblspace') + tblspc2_path = self.get_tblspace_path(node2, 'tblspace') + + out = self.restore_node( + backup_dir, 'node', node1, + options=[ + "-j", "4", "--force", + "--incremental-mode=checksum", + "-T{0}={1}".format(tblspc1_path, tblspc2_path)]) + + # check that tblspc1_path is empty + self.assertFalse( + os.listdir(tblspc1_path), + "Dir is not empty: '{0}'".format(tblspc1_path)) + + pgdata_restored = self.pgdata_content(node1.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + def test_incr_restore_with_tablespace_6(self): + """ + Empty pgdata, not empty tablespace + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + self.create_tblspace_in_node(node, 'tblspace') + node.pgbench_init(scale=10, tablespace='tblspace') + + # take backup of node with tblspace + self.backup_node(backup_dir, 'node', node, options=['--stream']) + pgdata = self.pgdata_content(node.data_dir) + + node.cleanup() + + try: + self.restore_node( + backup_dir, 'node', node, + options=[ + "-j", "4", + "--incremental-mode=checksum"]) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because there is running postmaster " + "process in destination directory.\n " + "Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: PGDATA is empty, but tablespace destination is not', + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + out = self.restore_node( + backup_dir, 'node', node, + options=[ + "-j", "4", "--force", + "--incremental-mode=checksum"]) + + self.assertIn( + "INFO: Destination directory and tablespace directories are empty, " + "disable incremental restore", out) + + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + def test_incr_restore_with_tablespace_7(self): + """ + Restore backup without tablespace into + PGDATA with tablespace. + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + # take backup of node with tblspace + self.backup_node(backup_dir, 'node', node, options=['--stream']) + pgdata = self.pgdata_content(node.data_dir) + + self.create_tblspace_in_node(node, 'tblspace') + node.pgbench_init(scale=5, tablespace='tblspace') + node.stop() + +# try: +# self.restore_node( +# backup_dir, 'node', node, +# options=[ +# "-j", "4", +# "--incremental-mode=checksum"]) +# # we should die here because exception is what we expect to happen +# self.assertEqual( +# 1, 0, +# "Expecting Error because there is running postmaster " +# "process in destination directory.\n " +# "Output: {0} \n CMD: {1}".format( +# repr(self.output), self.cmd)) +# except ProbackupException as e: +# self.assertIn( +# 'ERROR: PGDATA is empty, but tablespace destination is not', +# e.message, +# '\n Unexpected Error Message: {0}\n CMD: {1}'.format( +# repr(e.message), self.cmd)) + + out = self.restore_node( + backup_dir, 'node', node, + options=[ + "-j", "4", "--incremental-mode=checksum"]) + + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + def test_basic_incr_restore_sanity(self): + """recovery to target timeline""" + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums'], + set_replication=True) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + self.backup_node(backup_dir, 'node', node, options=['--stream']) + + try: + self.restore_node( + backup_dir, 'node', node, + options=["-j", "4", "--incremental-mode=checksum"]) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because there is running postmaster " + "process in destination directory.\n " + "Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'WARNING: Postmaster with pid', + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + self.assertIn( + 'ERROR: Incremental restore is not allowed', + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + node_1 = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_1')) + + try: + self.restore_node( + backup_dir, 'node', node_1, data_dir=node_1.data_dir, + options=["-j", "4", "--incremental-mode=checksum"]) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because destination directory has wrong system id.\n " + "Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'WARNING: Backup catalog was initialized for system id', + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + self.assertIn( + 'ERROR: Incremental restore is not allowed', + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + # @unittest.skip("skip") + def test_incr_checksum_restore(self): + """ + /----C-----D + ------A----B---*--------X + + X - is instance, we want to return it to C state. + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums'], + pg_options={'wal_log_hints': 'on'}) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.pgbench_init(scale=50) + self.backup_node(backup_dir, 'node', node) + + pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) + pgbench.wait() + + self.backup_node(backup_dir, 'node', node, backup_type='page') + + pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) + pgbench.wait() + + xid = node.safe_psql( + 'postgres', + 'select txid_current()').decode('utf-8').rstrip() + + # --A-----B--------X + pgbench = node.pgbench(options=['-T', '30', '-c', '1', '--no-vacuum']) + pgbench.wait() + node.stop(['-m', 'immediate', '-D', node.data_dir]) + + node_1 = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_1')) + node_1.cleanup() + + self.restore_node( + backup_dir, 'node', node_1, data_dir=node_1.data_dir, + options=[ + '--recovery-target-action=promote', + '--recovery-target-xid={0}'.format(xid)]) + + self.set_auto_conf(node_1, {'port': node_1.port}) + node_1.slow_start() + + # /-- + # --A-----B----*----X + pgbench = node_1.pgbench(options=['-T', '10', '-c', '1']) + pgbench.wait() + + # /--C + # --A-----B----*----X + self.backup_node(backup_dir, 'node', node_1, + data_dir=node_1.data_dir, backup_type='page') + + # /--C------ + # --A-----B----*----X + pgbench = node_1.pgbench(options=['-T', '50', '-c', '1']) + pgbench.wait() + + # /--C------D + # --A-----B----*----X + self.backup_node(backup_dir, 'node', node_1, + data_dir=node_1.data_dir, backup_type='page') + + pgdata = self.pgdata_content(node_1.data_dir) + + self.restore_node( + backup_dir, 'node', node, + options=["-j", "4", "--incremental-mode=checksum"]) + + pgdata_restored = self.pgdata_content(node.data_dir) + + self.set_auto_conf(node, {'port': node.port}) + node.slow_start() + + self.compare_pgdata(pgdata, pgdata_restored) + + + # @unittest.skip("skip") + def test_incr_lsn_restore(self): + """ + /----C-----D + ------A----B---*--------X + + X - is instance, we want to return it to C state. + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums'], + pg_options={'wal_log_hints': 'on'}) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.pgbench_init(scale=50) + self.backup_node(backup_dir, 'node', node) + + pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) + pgbench.wait() + + self.backup_node(backup_dir, 'node', node, backup_type='page') + + pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) + pgbench.wait() + + xid = node.safe_psql( + 'postgres', + 'select txid_current()').decode('utf-8').rstrip() + + # --A-----B--------X + pgbench = node.pgbench(options=['-T', '30', '-c', '1', '--no-vacuum']) + pgbench.wait() + node.stop(['-m', 'immediate', '-D', node.data_dir]) + + node_1 = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_1')) + node_1.cleanup() + + self.restore_node( + backup_dir, 'node', node_1, data_dir=node_1.data_dir, + options=[ + '--recovery-target-action=promote', + '--recovery-target-xid={0}'.format(xid)]) + + self.set_auto_conf(node_1, {'port': node_1.port}) + node_1.slow_start() + + # /-- + # --A-----B----*----X + pgbench = node_1.pgbench(options=['-T', '10', '-c', '1']) + pgbench.wait() + + # /--C + # --A-----B----*----X + self.backup_node(backup_dir, 'node', node_1, + data_dir=node_1.data_dir, backup_type='page') + + # /--C------ + # --A-----B----*----X + pgbench = node_1.pgbench(options=['-T', '50', '-c', '1']) + pgbench.wait() + + # /--C------D + # --A-----B----*----X + self.backup_node(backup_dir, 'node', node_1, + data_dir=node_1.data_dir, backup_type='page') + + pgdata = self.pgdata_content(node_1.data_dir) + + self.restore_node( + backup_dir, 'node', node, options=["-j", "4", "--incremental-mode=lsn"]) + + pgdata_restored = self.pgdata_content(node.data_dir) + + self.set_auto_conf(node, {'port': node.port}) + node.slow_start() + + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + def test_incr_lsn_sanity(self): + """ + /----A-----B + F------*--------X + + X - is instance, we want to return it to state B. + fail is expected behaviour in case of lsn restore. + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums'], + pg_options={'wal_log_hints': 'on'}) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + self.backup_node(backup_dir, 'node', node) + node.pgbench_init(scale=10) + + node_1 = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_1')) + node_1.cleanup() + + self.restore_node( + backup_dir, 'node', node_1, data_dir=node_1.data_dir) + + self.set_auto_conf(node_1, {'port': node_1.port}) + node_1.slow_start() + + pgbench = node_1.pgbench(options=['-T', '10', '-c', '1']) + pgbench.wait() + + self.backup_node(backup_dir, 'node', node_1, + data_dir=node_1.data_dir, backup_type='full') + + pgbench = node_1.pgbench(options=['-T', '10', '-c', '1']) + pgbench.wait() + + page_id = self.backup_node(backup_dir, 'node', node_1, + data_dir=node_1.data_dir, backup_type='page') + + node.stop() + + try: + self.restore_node( + backup_dir, 'node', node, data_dir=node.data_dir, + options=["-j", "4", "--incremental-mode=lsn"]) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because incremental restore in lsn mode is impossible\n " + "Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + "ERROR: Cannot perform incremental restore of " + "backup chain {0} in 'lsn' mode".format(page_id), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + # @unittest.skip("skip") + def test_incr_checksum_sanity(self): + """ + /----A-----B + F------*--------X + + X - is instance, we want to return it to state B. + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + self.backup_node(backup_dir, 'node', node) + node.pgbench_init(scale=20) + + node_1 = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_1')) + node_1.cleanup() + + self.restore_node( + backup_dir, 'node', node_1, data_dir=node_1.data_dir) + + self.set_auto_conf(node_1, {'port': node_1.port}) + node_1.slow_start() + + pgbench = node_1.pgbench(options=['-T', '10', '-c', '1']) + pgbench.wait() + + self.backup_node(backup_dir, 'node', node_1, + data_dir=node_1.data_dir, backup_type='full') + + pgbench = node_1.pgbench(options=['-T', '10', '-c', '1']) + pgbench.wait() + + page_id = self.backup_node(backup_dir, 'node', node_1, + data_dir=node_1.data_dir, backup_type='page') + pgdata = self.pgdata_content(node_1.data_dir) + + node.stop() + + self.restore_node( + backup_dir, 'node', node, data_dir=node.data_dir, + options=["-j", "4", "--incremental-mode=checksum"]) + + pgdata_restored = self.pgdata_content(node.data_dir) + + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + def test_incr_checksum_corruption_detection(self): + """ + check that corrupted page got detected and replaced + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), +# initdb_params=['--data-checksums'], + pg_options={'wal_log_hints': 'on'}) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + self.backup_node(backup_dir, 'node', node) + node.pgbench_init(scale=20) + + pgbench = node.pgbench(options=['-T', '10', '-c', '1']) + pgbench.wait() + + self.backup_node(backup_dir, 'node', node, + data_dir=node.data_dir, backup_type='full') + + heap_path = node.safe_psql( + "postgres", + "select pg_relation_filepath('pgbench_accounts')").decode('utf-8').rstrip() + + pgbench = node.pgbench(options=['-T', '10', '-c', '1']) + pgbench.wait() + + page_id = self.backup_node(backup_dir, 'node', node, + data_dir=node.data_dir, backup_type='page') + + pgdata = self.pgdata_content(node.data_dir) + + node.stop() + + path = os.path.join(node.data_dir, heap_path) + with open(path, "rb+", 0) as f: + f.seek(22000) + f.write(b"bla") + f.flush() + f.close + + self.restore_node( + backup_dir, 'node', node, data_dir=node.data_dir, + options=["-j", "4", "--incremental-mode=checksum"]) + + pgdata_restored = self.pgdata_content(node.data_dir) + + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + def test_incr_lsn_corruption_detection(self): + """ + check that corrupted page got detected and replaced + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums'], + pg_options={'wal_log_hints': 'on'}) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + self.backup_node(backup_dir, 'node', node) + node.pgbench_init(scale=20) + + pgbench = node.pgbench(options=['-T', '10', '-c', '1']) + pgbench.wait() + + self.backup_node(backup_dir, 'node', node, + data_dir=node.data_dir, backup_type='full') + + heap_path = node.safe_psql( + "postgres", + "select pg_relation_filepath('pgbench_accounts')").decode('utf-8').rstrip() + + pgbench = node.pgbench(options=['-T', '10', '-c', '1']) + pgbench.wait() + + page_id = self.backup_node(backup_dir, 'node', node, + data_dir=node.data_dir, backup_type='page') + + pgdata = self.pgdata_content(node.data_dir) + + node.stop() + + path = os.path.join(node.data_dir, heap_path) + with open(path, "rb+", 0) as f: + f.seek(22000) + f.write(b"bla") + f.flush() + f.close + + self.restore_node( + backup_dir, 'node', node, data_dir=node.data_dir, + options=["-j", "4", "--incremental-mode=lsn"]) + + pgdata_restored = self.pgdata_content(node.data_dir) + + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_incr_restore_multiple_external(self): + """check that cmdline has priority over config""" + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + external_dir1 = self.get_tblspace_path(node, 'external_dir1') + external_dir2 = self.get_tblspace_path(node, 'external_dir2') + + # FULL backup + node.pgbench_init(scale=20) + self.backup_node( + backup_dir, 'node', node, + backup_type="full", options=["-j", "4"]) + + # fill external directories with data + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir1, options=["-j", "4"]) + + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir2, options=["-j", "4"]) + + self.set_config( + backup_dir, 'node', + options=['-E{0}{1}{2}'.format( + external_dir1, self.EXTERNAL_DIRECTORY_DELIMITER, external_dir2)]) + + # cmdline option MUST override options in config + self.backup_node( + backup_dir, 'node', node, + backup_type='full', options=["-j", "4"]) + + pgbench = node.pgbench(options=['-T', '10', '-c', '1']) + pgbench.wait() + + # cmdline option MUST override options in config + self.backup_node( + backup_dir, 'node', node, + backup_type='page', options=["-j", "4"]) + + pgdata = self.pgdata_content( + node.base_dir, exclude_dirs=['logs']) + + pgbench = node.pgbench(options=['-T', '10', '-c', '1']) + pgbench.wait() + + node.stop() + + self.restore_node( + backup_dir, 'node', node, + options=["-j", "4", '--incremental-mode=checksum']) + + pgdata_restored = self.pgdata_content( + node.base_dir, exclude_dirs=['logs']) + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_incr_lsn_restore_multiple_external(self): + """check that cmdline has priority over config""" + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + external_dir1 = self.get_tblspace_path(node, 'external_dir1') + external_dir2 = self.get_tblspace_path(node, 'external_dir2') + + # FULL backup + node.pgbench_init(scale=20) + self.backup_node( + backup_dir, 'node', node, + backup_type="full", options=["-j", "4"]) + + # fill external directories with data + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir1, options=["-j", "4"]) + + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir2, options=["-j", "4"]) + + self.set_config( + backup_dir, 'node', + options=['-E{0}{1}{2}'.format( + external_dir1, self.EXTERNAL_DIRECTORY_DELIMITER, external_dir2)]) + + # cmdline option MUST override options in config + self.backup_node( + backup_dir, 'node', node, + backup_type='full', options=["-j", "4"]) + + pgbench = node.pgbench(options=['-T', '10', '-c', '1']) + pgbench.wait() + + # cmdline option MUST override options in config + self.backup_node( + backup_dir, 'node', node, + backup_type='page', options=["-j", "4"]) + + pgdata = self.pgdata_content( + node.base_dir, exclude_dirs=['logs']) + + pgbench = node.pgbench(options=['-T', '10', '-c', '1']) + pgbench.wait() + + node.stop() + + self.restore_node( + backup_dir, 'node', node, + options=["-j", "4", '--incremental-mode=lsn']) + + pgdata_restored = self.pgdata_content( + node.base_dir, exclude_dirs=['logs']) + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_incr_lsn_restore_backward(self): + """ + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={'wal_log_hints': 'on', 'hot_standby': 'on'}) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # FULL backup + node.pgbench_init(scale=2) + full_id = self.backup_node( + backup_dir, 'node', node, + backup_type="full", options=["-j", "4"]) + + full_pgdata = self.pgdata_content(node.data_dir) + + pgbench = node.pgbench(options=['-T', '10', '-c', '1']) + pgbench.wait() + + page_id = self.backup_node( + backup_dir, 'node', node, + backup_type='page', options=["-j", "4"]) + + page_pgdata = self.pgdata_content(node.data_dir) + + pgbench = node.pgbench(options=['-T', '10', '-c', '1']) + pgbench.wait() + + delta_id = self.backup_node( + backup_dir, 'node', node, + backup_type='delta', options=["-j", "4"]) + + delta_pgdata = self.pgdata_content(node.data_dir) + + pgbench = node.pgbench(options=['-T', '10', '-c', '1']) + pgbench.wait() + + node.stop() + + self.restore_node( + backup_dir, 'node', node, backup_id=full_id, + options=[ + "-j", "4", + '--incremental-mode=lsn', + '--recovery-target=immediate', + '--recovery-target-action=pause']) + + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(full_pgdata, pgdata_restored) + + node.slow_start(replica=True) + node.stop() + + try: + self.restore_node( + backup_dir, 'node', node, backup_id=page_id, + options=[ + "-j", "4", '--incremental-mode=lsn', + '--recovery-target=immediate', '--recovery-target-action=pause']) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because incremental restore in lsn mode is impossible\n " + "Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + "Cannot perform incremental restore of backup chain", + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.restore_node( + backup_dir, 'node', node, backup_id=page_id, + options=[ + "-j", "4", '--incremental-mode=checksum', + '--recovery-target=immediate', '--recovery-target-action=pause']) + + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(page_pgdata, pgdata_restored) + + node.slow_start(replica=True) + node.stop() + + self.restore_node( + backup_dir, 'node', node, backup_id=delta_id, + options=[ + "-j", "4", + '--incremental-mode=lsn', + '--recovery-target=immediate', + '--recovery-target-action=pause']) + + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(delta_pgdata, pgdata_restored) + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_incr_checksum_restore_backward(self): + """ + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'hot_standby': 'on'}) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # FULL backup + node.pgbench_init(scale=20) + full_id = self.backup_node( + backup_dir, 'node', node, + backup_type="full", options=["-j", "4"]) + + full_pgdata = self.pgdata_content(node.data_dir) + + pgbench = node.pgbench(options=['-T', '10', '-c', '1']) + pgbench.wait() + + page_id = self.backup_node( + backup_dir, 'node', node, + backup_type='page', options=["-j", "4"]) + + page_pgdata = self.pgdata_content(node.data_dir) + + pgbench = node.pgbench(options=['-T', '10', '-c', '1']) + pgbench.wait() + + delta_id = self.backup_node( + backup_dir, 'node', node, + backup_type='delta', options=["-j", "4"]) + + delta_pgdata = self.pgdata_content(node.data_dir) + + pgbench = node.pgbench(options=['-T', '10', '-c', '1']) + pgbench.wait() + + node.stop() + + self.restore_node( + backup_dir, 'node', node, backup_id=full_id, + options=[ + "-j", "4", + '--incremental-mode=checksum', + '--recovery-target=immediate', + '--recovery-target-action=pause']) + + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(full_pgdata, pgdata_restored) + + node.slow_start(replica=True) + node.stop() + + self.restore_node( + backup_dir, 'node', node, backup_id=page_id, + options=[ + "-j", "4", + '--incremental-mode=checksum', + '--recovery-target=immediate', + '--recovery-target-action=pause']) + + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(page_pgdata, pgdata_restored) + + node.slow_start(replica=True) + node.stop() + + self.restore_node( + backup_dir, 'node', node, backup_id=delta_id, + options=[ + "-j", "4", + '--incremental-mode=checksum', + '--recovery-target=immediate', + '--recovery-target-action=pause']) + + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(delta_pgdata, pgdata_restored) + + # @unittest.skip("skip") + def test_make_replica_via_incr_checksum_restore(self): + """ + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + master = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'master'), + set_replication=True, + initdb_params=['--data-checksums']) + + if self.get_version(master) < self.version_to_num('9.6.0'): + self.skipTest( + 'Skipped because backup from replica is not supported in PG 9.5') + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', master) + self.set_archiving(backup_dir, 'node', master, replica=True) + master.slow_start() + + replica = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica.cleanup() + + master.pgbench_init(scale=20) + + self.backup_node(backup_dir, 'node', master) + + self.restore_node( + backup_dir, 'node', replica, options=['-R']) + + # Settings for Replica + self.set_replica(master, replica, synchronous=False) + + replica.slow_start(replica=True) + + pgbench = master.pgbench(options=['-T', '10', '-c', '1']) + pgbench.wait() + + # PROMOTIONS + replica.promote() + new_master = replica + + # old master is going a bit further + old_master = master + pgbench = old_master.pgbench(options=['-T', '10', '-c', '1']) + pgbench.wait() + old_master.stop() + + pgbench = new_master.pgbench(options=['-T', '10', '-c', '1']) + pgbench.wait() + + # take backup from new master + self.backup_node( + backup_dir, 'node', new_master, + data_dir=new_master.data_dir, backup_type='page') + + # restore old master as replica + self.restore_node( + backup_dir, 'node', old_master, data_dir=old_master.data_dir, + options=['-R', '--incremental-mode=checksum']) + + self.set_replica(new_master, old_master, synchronous=True) + + old_master.slow_start(replica=True) + + pgbench = new_master.pgbench(options=['-T', '10', '-c', '1']) + pgbench.wait() + + # @unittest.skip("skip") + def test_make_replica_via_incr_lsn_restore(self): + """ + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + master = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'master'), + set_replication=True, + initdb_params=['--data-checksums']) + + if self.get_version(master) < self.version_to_num('9.6.0'): + self.skipTest( + 'Skipped because backup from replica is not supported in PG 9.5') + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', master) + self.set_archiving(backup_dir, 'node', master, replica=True) + master.slow_start() + + replica = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica.cleanup() + + master.pgbench_init(scale=20) + + self.backup_node(backup_dir, 'node', master) + + self.restore_node( + backup_dir, 'node', replica, options=['-R']) + + # Settings for Replica + self.set_replica(master, replica, synchronous=False) + + replica.slow_start(replica=True) + + pgbench = master.pgbench(options=['-T', '10', '-c', '1']) + pgbench.wait() + + # PROMOTIONS + replica.promote() + new_master = replica + + # old master is going a bit further + old_master = master + pgbench = old_master.pgbench(options=['-T', '10', '-c', '1']) + pgbench.wait() + old_master.stop() + + pgbench = new_master.pgbench(options=['-T', '10', '-c', '1']) + pgbench.wait() + + # take backup from new master + self.backup_node( + backup_dir, 'node', new_master, + data_dir=new_master.data_dir, backup_type='page') + + # restore old master as replica + self.restore_node( + backup_dir, 'node', old_master, data_dir=old_master.data_dir, + options=['-R', '--incremental-mode=lsn']) + + self.set_replica(new_master, old_master, synchronous=True) + + old_master.slow_start(replica=True) + + pgbench = new_master.pgbench(options=['-T', '10', '-c', '1']) + pgbench.wait() + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_incr_checksum_long_xact(self): + """ + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + 'postgres', + 'create extension pageinspect') + + # FULL backup + con = node.connect("postgres") + con.execute("CREATE TABLE t1 (a int)") + con.commit() + + + con.execute("INSERT INTO t1 values (1)") + con.commit() + + # leave uncommited + con2 = node.connect("postgres") + con.execute("INSERT INTO t1 values (2)") + con2.execute("INSERT INTO t1 values (3)") + + full_id = self.backup_node( + backup_dir, 'node', node, + backup_type="full", options=["-j", "4", "--stream"]) + + self.backup_node( + backup_dir, 'node', node, + backup_type="delta", options=["-j", "4", "--stream"]) + + con.commit() + + node.safe_psql( + 'postgres', + 'select * from t1') + + con2.commit() + node.safe_psql( + 'postgres', + 'select * from t1') + + node.stop() + + self.restore_node( + backup_dir, 'node', node, backup_id=full_id, + options=["-j", "4", '--incremental-mode=checksum']) + + node.slow_start() + + self.assertEqual( + node.safe_psql( + 'postgres', + 'select count(*) from t1').decode('utf-8').rstrip(), + '1') + + # @unittest.skip("skip") + # @unittest.expectedFailure + # This test will pass with Enterprise + # because it has checksums enabled by default + @unittest.skipIf(ProbackupTest.enterprise, 'skip') + def test_incr_lsn_long_xact_1(self): + """ + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + 'postgres', + 'create extension pageinspect') + + # FULL backup + con = node.connect("postgres") + con.execute("CREATE TABLE t1 (a int)") + con.commit() + + + con.execute("INSERT INTO t1 values (1)") + con.commit() + + # leave uncommited + con2 = node.connect("postgres") + con.execute("INSERT INTO t1 values (2)") + con2.execute("INSERT INTO t1 values (3)") + + full_id = self.backup_node( + backup_dir, 'node', node, + backup_type="full", options=["-j", "4", "--stream"]) + + self.backup_node( + backup_dir, 'node', node, + backup_type="delta", options=["-j", "4", "--stream"]) + + con.commit() + + # when does LSN gets stamped when checksum gets updated ? + node.safe_psql( + 'postgres', + 'select * from t1') + + con2.commit() + node.safe_psql( + 'postgres', + 'select * from t1') + + node.stop() + + try: + self.restore_node( + backup_dir, 'node', node, backup_id=full_id, + options=["-j", "4", '--incremental-mode=lsn']) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because incremental restore in lsn mode is impossible\n " + "Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + "ERROR: Incremental restore in 'lsn' mode require data_checksums to be " + "enabled in destination data directory", + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_incr_lsn_long_xact_2(self): + """ + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'full_page_writes': 'off', + 'wal_log_hints': 'off'}) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + 'postgres', + 'create extension pageinspect') + + # FULL backup + con = node.connect("postgres") + con.execute("CREATE TABLE t1 (a int)") + con.commit() + + + con.execute("INSERT INTO t1 values (1)") + con.commit() + + # leave uncommited + con2 = node.connect("postgres") + con.execute("INSERT INTO t1 values (2)") + con2.execute("INSERT INTO t1 values (3)") + + full_id = self.backup_node( + backup_dir, 'node', node, + backup_type="full", options=["-j", "4", "--stream"]) + + self.backup_node( + backup_dir, 'node', node, + backup_type="delta", options=["-j", "4", "--stream"]) + +# print(node.safe_psql( +# 'postgres', +# "select * from page_header(get_raw_page('t1', 0))")) + + con.commit() + + # when does LSN gets stamped when checksum gets updated ? + node.safe_psql( + 'postgres', + 'select * from t1') + +# print(node.safe_psql( +# 'postgres', +# "select * from page_header(get_raw_page('t1', 0))")) + + con2.commit() + node.safe_psql( + 'postgres', + 'select * from t1') + +# print(node.safe_psql( +# 'postgres', +# "select * from page_header(get_raw_page('t1', 0))")) + + node.stop() + + self.restore_node( + backup_dir, 'node', node, backup_id=full_id, + options=["-j", "4", '--incremental-mode=lsn']) + + node.slow_start() + + self.assertEqual( + node.safe_psql( + 'postgres', + 'select count(*) from t1').decode('utf-8').rstrip(), + '1') + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_incr_restore_zero_size_file_checksum(self): + """ + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + fullpath = os.path.join(node.data_dir, 'simple_file') + with open(fullpath, "w+b", 0) as f: + f.flush() + f.close + + # FULL backup + id1 = self.backup_node( + backup_dir, 'node', node, + options=["-j", "4", "--stream"]) + + pgdata1 = self.pgdata_content(node.data_dir) + + with open(fullpath, "rb+", 0) as f: + f.seek(9000) + f.write(b"bla") + f.flush() + f.close + + id2 = self.backup_node( + backup_dir, 'node', node, + backup_type="delta", options=["-j", "4", "--stream"]) + pgdata2 = self.pgdata_content(node.data_dir) + + with open(fullpath, "w") as f: + f.close() + + id3 = self.backup_node( + backup_dir, 'node', node, + backup_type="delta", options=["-j", "4", "--stream"]) + pgdata3 = self.pgdata_content(node.data_dir) + + node.stop() + + self.restore_node( + backup_dir, 'node', node, backup_id=id1, + options=["-j", "4", '-I', 'checksum']) + + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata1, pgdata_restored) + + self.restore_node( + backup_dir, 'node', node, backup_id=id2, + options=["-j", "4", '-I', 'checksum']) + + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata2, pgdata_restored) + + self.restore_node( + backup_dir, 'node', node, backup_id=id3, + options=["-j", "4", '-I', 'checksum']) + + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata3, pgdata_restored) + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_incr_restore_zero_size_file_lsn(self): + """ + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + fullpath = os.path.join(node.data_dir, 'simple_file') + with open(fullpath, "w+b", 0) as f: + f.flush() + f.close + + # FULL backup + id1 = self.backup_node( + backup_dir, 'node', node, + options=["-j", "4", "--stream"]) + + pgdata1 = self.pgdata_content(node.data_dir) + + with open(fullpath, "rb+", 0) as f: + f.seek(9000) + f.write(b"bla") + f.flush() + f.close + + id2 = self.backup_node( + backup_dir, 'node', node, + backup_type="delta", options=["-j", "4", "--stream"]) + pgdata2 = self.pgdata_content(node.data_dir) + + with open(fullpath, "w") as f: + f.close() + + id3 = self.backup_node( + backup_dir, 'node', node, + backup_type="delta", options=["-j", "4", "--stream"]) + pgdata3 = self.pgdata_content(node.data_dir) + + node.stop() + + self.restore_node( + backup_dir, 'node', node, backup_id=id1, + options=["-j", "4", '-I', 'checksum']) + + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata1, pgdata_restored) + + node.slow_start() + node.stop() + + self.restore_node( + backup_dir, 'node', node, backup_id=id2, + options=["-j", "4", '-I', 'checksum']) + + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata2, pgdata_restored) + + node.slow_start() + node.stop() + + self.restore_node( + backup_dir, 'node', node, backup_id=id3, + options=["-j", "4", '-I', 'checksum']) + + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata3, pgdata_restored) + + def test_incremental_partial_restore_exclude_checksum(self): + """""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + for i in range(1, 10, 1): + node.safe_psql( + 'postgres', + 'CREATE database db{0}'.format(i)) + + db_list_raw = node.safe_psql( + 'postgres', + 'SELECT to_json(a) ' + 'FROM (SELECT oid, datname FROM pg_database) a').rstrip() + + db_list_splitted = db_list_raw.splitlines() + + db_list = {} + for line in db_list_splitted: + line = json.loads(line) + db_list[line['datname']] = line['oid'] + + node.pgbench_init(scale=20) + + # FULL backup + self.backup_node(backup_dir, 'node', node) + pgdata = self.pgdata_content(node.data_dir) + + pgbench = node.pgbench(options=['-T', '10', '-c', '1']) + pgbench.wait() + + # PAGE backup + backup_id = self.backup_node(backup_dir, 'node', node, backup_type='page') + + # restore FULL backup into second node2 + node1 = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node1')) + node1.cleanup() + + node2 = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node2')) + node2.cleanup() + + # restore some data into node2 + self.restore_node(backup_dir, 'node', node2) + + # partial restore backup into node1 + self.restore_node( + backup_dir, 'node', + node1, options=[ + "--db-exclude=db1", + "--db-exclude=db5"]) + + pgdata1 = self.pgdata_content(node1.data_dir) + + # partial incremental restore backup into node2 + self.restore_node( + backup_dir, 'node', + node2, options=[ + "--db-exclude=db1", + "--db-exclude=db5", + "-I", "checksum"]) + + pgdata2 = self.pgdata_content(node2.data_dir) + + self.compare_pgdata(pgdata1, pgdata2) + + self.set_auto_conf(node2, {'port': node2.port}) + + node2.slow_start() + + node2.safe_psql( + 'postgres', + 'select 1') + + try: + node2.safe_psql( + 'db1', + 'select 1') + except QueryException as e: + self.assertIn('FATAL', e.message) + + try: + node2.safe_psql( + 'db5', + 'select 1') + except QueryException as e: + self.assertIn('FATAL', e.message) + + with open(node2.pg_log_file, 'r') as f: + output = f.read() + + self.assertNotIn('PANIC', output) + + def test_incremental_partial_restore_exclude_lsn(self): + """""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + for i in range(1, 10, 1): + node.safe_psql( + 'postgres', + 'CREATE database db{0}'.format(i)) + + db_list_raw = node.safe_psql( + 'postgres', + 'SELECT to_json(a) ' + 'FROM (SELECT oid, datname FROM pg_database) a').rstrip() + + db_list_splitted = db_list_raw.splitlines() + + db_list = {} + for line in db_list_splitted: + line = json.loads(line) + db_list[line['datname']] = line['oid'] + + node.pgbench_init(scale=20) + + # FULL backup + self.backup_node(backup_dir, 'node', node) + pgdata = self.pgdata_content(node.data_dir) + + pgbench = node.pgbench(options=['-T', '10', '-c', '1']) + pgbench.wait() + + # PAGE backup + backup_id = self.backup_node(backup_dir, 'node', node, backup_type='page') + + node.stop() + + # restore FULL backup into second node2 + node1 = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node1')) + node1.cleanup() + + node2 = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node2')) + node2.cleanup() + + # restore some data into node2 + self.restore_node(backup_dir, 'node', node2) + + # partial restore backup into node1 + self.restore_node( + backup_dir, 'node', + node1, options=[ + "--db-exclude=db1", + "--db-exclude=db5"]) + + pgdata1 = self.pgdata_content(node1.data_dir) + + # partial incremental restore backup into node2 + node2.port = node.port + node2.slow_start() + node2.stop() + self.restore_node( + backup_dir, 'node', + node2, options=[ + "--db-exclude=db1", + "--db-exclude=db5", + "-I", "lsn"]) + + pgdata2 = self.pgdata_content(node2.data_dir) + + self.compare_pgdata(pgdata1, pgdata2) + + self.set_auto_conf(node2, {'port': node2.port}) + + node2.slow_start() + + node2.safe_psql( + 'postgres', + 'select 1') + + try: + node2.safe_psql( + 'db1', + 'select 1') + except QueryException as e: + self.assertIn('FATAL', e.message) + + try: + node2.safe_psql( + 'db5', + 'select 1') + except QueryException as e: + self.assertIn('FATAL', e.message) + + with open(node2.pg_log_file, 'r') as f: + output = f.read() + + self.assertNotIn('PANIC', output) + + def test_incremental_partial_restore_exclude_tablespace_checksum(self): + """""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # cat_version = node.get_control_data()["Catalog version number"] + # version_specific_dir = 'PG_' + node.major_version_str + '_' + cat_version + + # PG_10_201707211 + # pg_tblspc/33172/PG_9.5_201510051/16386/ + + self.create_tblspace_in_node(node, 'somedata') + + node_tablespace = self.get_tblspace_path(node, 'somedata') + + tbl_oid = node.safe_psql( + 'postgres', + "SELECT oid " + "FROM pg_tablespace " + "WHERE spcname = 'somedata'").rstrip() + + for i in range(1, 10, 1): + node.safe_psql( + 'postgres', + 'CREATE database db{0} tablespace somedata'.format(i)) + + db_list_raw = node.safe_psql( + 'postgres', + 'SELECT to_json(a) ' + 'FROM (SELECT oid, datname FROM pg_database) a').rstrip() + + db_list_splitted = db_list_raw.splitlines() + + db_list = {} + for line in db_list_splitted: + line = json.loads(line) + db_list[line['datname']] = line['oid'] + + # FULL backup + backup_id = self.backup_node(backup_dir, 'node', node) + + # node1 + node1 = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node1')) + node1.cleanup() + node1_tablespace = self.get_tblspace_path(node1, 'somedata') + + # node2 + node2 = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node2')) + node2.cleanup() + node2_tablespace = self.get_tblspace_path(node2, 'somedata') + + # in node2 restore full backup + self.restore_node( + backup_dir, 'node', + node2, options=[ + "-T", "{0}={1}".format( + node_tablespace, node2_tablespace)]) + + # partial restore into node1 + self.restore_node( + backup_dir, 'node', + node1, options=[ + "--db-exclude=db1", + "--db-exclude=db5", + "-T", "{0}={1}".format( + node_tablespace, node1_tablespace)]) + + pgdata1 = self.pgdata_content(node1.data_dir) + + # partial incremental restore into node2 + try: + self.restore_node( + backup_dir, 'node', + node2, options=[ + "-I", "checksum", + "--db-exclude=db1", + "--db-exclude=db5", + "-T", "{0}={1}".format( + node_tablespace, node2_tablespace)]) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because remapped tablespace contain old data .\n " + "Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: Remapped tablespace destination is not empty:', + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.restore_node( + backup_dir, 'node', + node2, options=[ + "-I", "checksum", "--force", + "--db-exclude=db1", + "--db-exclude=db5", + "-T", "{0}={1}".format( + node_tablespace, node2_tablespace)]) + + pgdata2 = self.pgdata_content(node2.data_dir) + + self.compare_pgdata(pgdata1, pgdata2) + + self.set_auto_conf(node2, {'port': node2.port}) + node2.slow_start() + + node2.safe_psql( + 'postgres', + 'select 1') + + try: + node2.safe_psql( + 'db1', + 'select 1') + except QueryException as e: + self.assertIn('FATAL', e.message) + + try: + node2.safe_psql( + 'db5', + 'select 1') + except QueryException as e: + self.assertIn('FATAL', e.message) + + with open(node2.pg_log_file, 'r') as f: + output = f.read() + + self.assertNotIn('PANIC', output) + + def test_incremental_pg_filenode_map(self): + """ + https://github.com/postgrespro/pg_probackup/issues/320 + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node1 = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node1'), + initdb_params=['--data-checksums']) + node1.cleanup() + + node.pgbench_init(scale=5) + + # FULL backup + backup_id = self.backup_node(backup_dir, 'node', node) + + # in node1 restore full backup + self.restore_node(backup_dir, 'node', node1) + self.set_auto_conf(node1, {'port': node1.port}) + node1.slow_start() + + pgbench = node.pgbench( + stdout=subprocess.PIPE, stderr=subprocess.STDOUT, + options=['-T', '10', '-c', '1']) + + pgbench = node1.pgbench( + stdout=subprocess.PIPE, stderr=subprocess.STDOUT, + options=['-T', '10', '-c', '1']) + + node.safe_psql( + 'postgres', + 'reindex index pg_type_oid_index') + + # FULL backup + backup_id = self.backup_node(backup_dir, 'node', node) + + node1.stop() + + # incremental restore into node1 + self.restore_node(backup_dir, 'node', node1, options=["-I", "checksum"]) + + self.set_auto_conf(node1, {'port': node1.port}) + node1.slow_start() + + node1.safe_psql( + 'postgres', + 'select 1') + +# check that MinRecPoint and BackupStartLsn are correctly used in case of --incrementa-lsn diff --git a/tests/init_test.py b/tests/init_test.py new file mode 100644 index 000000000..94b076fef --- /dev/null +++ b/tests/init_test.py @@ -0,0 +1,138 @@ +import os +import unittest +from .helpers.ptrack_helpers import dir_files, ProbackupTest, ProbackupException +import shutil + + +class InitTest(ProbackupTest, unittest.TestCase): + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_success(self): + """Success normal init""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node(base_dir=os.path.join(self.module_name, self.fname, 'node')) + self.init_pb(backup_dir) + self.assertEqual( + dir_files(backup_dir), + ['backups', 'wal'] + ) + self.add_instance(backup_dir, 'node', node) + self.assertIn( + "INFO: Instance 'node' successfully deleted", + self.del_instance(backup_dir, 'node'), + '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(self.output), self.cmd)) + + # Show non-existing instance + try: + self.show_pb(backup_dir, 'node') + self.assertEqual(1, 0, 'Expecting Error due to show of non-existing instance. Output: {0} \n CMD: {1}'.format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + "ERROR: Instance 'node' does not exist in this backup catalog", + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format(e.message, self.cmd)) + + # Delete non-existing instance + try: + self.del_instance(backup_dir, 'node1') + self.assertEqual(1, 0, 'Expecting Error due to delete of non-existing instance. Output: {0} \n CMD: {1}'.format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + "ERROR: Instance 'node1' does not exist in this backup catalog", + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format(e.message, self.cmd)) + + # Add instance without pgdata + try: + self.run_pb([ + "add-instance", + "--instance=node1", + "-B", backup_dir + ]) + self.assertEqual(1, 0, 'Expecting Error due to adding instance without pgdata. Output: {0} \n CMD: {1}'.format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + "ERROR: Required parameter not specified: PGDATA (-D, --pgdata)", + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format(e.message, self.cmd)) + + # @unittest.skip("skip") + def test_already_exist(self): + """Failure with backup catalog already existed""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node(base_dir=os.path.join(self.module_name, self.fname, 'node')) + self.init_pb(backup_dir) + try: + self.show_pb(backup_dir, 'node') + self.assertEqual(1, 0, 'Expecting Error due to initialization in non-empty directory. Output: {0} \n CMD: {1}'.format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + "ERROR: Instance 'node' does not exist in this backup catalog", + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) + + # @unittest.skip("skip") + def test_abs_path(self): + """failure with backup catalog should be given as absolute path""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node(base_dir=os.path.join(self.module_name, self.fname, 'node')) + try: + self.run_pb(["init", "-B", os.path.relpath("%s/backup" % node.base_dir, self.dir_path)]) + self.assertEqual(1, 0, 'Expecting Error due to initialization with non-absolute path in --backup-path. Output: {0} \n CMD: {1}'.format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + "ERROR: -B, --backup-path must be an absolute path", + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_add_instance_idempotence(self): + """ + https://github.com/postgrespro/pg_probackup/issues/219 + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node(base_dir=os.path.join(self.module_name, self.fname, 'node')) + self.init_pb(backup_dir) + + self.add_instance(backup_dir, 'node', node) + shutil.rmtree(os.path.join(backup_dir, 'backups', 'node')) + + dir_backups = os.path.join(backup_dir, 'backups', 'node') + dir_wal = os.path.join(backup_dir, 'wal', 'node') + + try: + self.add_instance(backup_dir, 'node', node) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because page backup should not be possible " + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + "ERROR: Instance 'node' WAL archive directory already exists: ", + e.message, + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) + + try: + self.add_instance(backup_dir, 'node', node) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because page backup should not be possible " + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + "ERROR: Instance 'node' WAL archive directory already exists: ", + e.message, + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) diff --git a/tests/locking_test.py b/tests/locking_test.py new file mode 100644 index 000000000..5367c2610 --- /dev/null +++ b/tests/locking_test.py @@ -0,0 +1,629 @@ +import unittest +import os +from time import sleep +from .helpers.ptrack_helpers import ProbackupTest, ProbackupException + + +class LockingTest(ProbackupTest, unittest.TestCase): + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_locking_running_validate_1(self): + """ + make node, take full backup, stop it in the middle + run validate, expect it to successfully executed, + concurrent RUNNING backup with pid file and active process is legal + """ + self._check_gdb_flag_or_skip_test() + + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + self.backup_node(backup_dir, 'node', node) + + gdb = self.backup_node( + backup_dir, 'node', node, gdb=True) + + gdb.set_breakpoint('backup_non_data_file') + gdb.run_until_break() + + gdb.continue_execution_until_break(20) + + self.assertEqual( + 'OK', self.show_pb(backup_dir, 'node')[0]['status']) + + self.assertEqual( + 'RUNNING', self.show_pb(backup_dir, 'node')[1]['status']) + + validate_output = self.validate_pb( + backup_dir, options=['--log-level-console=LOG']) + + backup_id = self.show_pb(backup_dir, 'node')[1]['id'] + + self.assertIn( + "is using backup {0}, and is still running".format(backup_id), + validate_output, + '\n Unexpected Validate Output: {0}\n'.format(repr(validate_output))) + + self.assertEqual( + 'OK', self.show_pb(backup_dir, 'node')[0]['status']) + + self.assertEqual( + 'RUNNING', self.show_pb(backup_dir, 'node')[1]['status']) + + # Clean after yourself + gdb.kill() + + def test_locking_running_validate_2(self): + """ + make node, take full backup, stop it in the middle, + kill process so no cleanup is done - pid file is in place, + run validate, expect it to not successfully executed, + RUNNING backup with pid file AND without active pid is legal, + but his status must be changed to ERROR and pid file is deleted + """ + self._check_gdb_flag_or_skip_test() + + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + self.backup_node(backup_dir, 'node', node) + + gdb = self.backup_node( + backup_dir, 'node', node, gdb=True) + + gdb.set_breakpoint('backup_non_data_file') + gdb.run_until_break() + + gdb.continue_execution_until_break(20) + + gdb._execute('signal SIGKILL') + gdb.continue_execution_until_error() + + self.assertEqual( + 'OK', self.show_pb(backup_dir, 'node')[0]['status']) + + self.assertEqual( + 'RUNNING', self.show_pb(backup_dir, 'node')[1]['status']) + + backup_id = self.show_pb(backup_dir, 'node')[1]['id'] + + try: + self.validate_pb(backup_dir) + self.assertEqual( + 1, 0, + "Expecting Error because RUNNING backup is no longer active.\n " + "Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertTrue( + "which used backup {0} no longer exists".format( + backup_id) in e.message and + "Backup {0} has status RUNNING, change it " + "to ERROR and skip validation".format( + backup_id) in e.message and + "WARNING: Some backups are not valid" in + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.assertEqual( + 'OK', self.show_pb(backup_dir, 'node')[0]['status']) + + self.assertEqual( + 'ERROR', self.show_pb(backup_dir, 'node')[1]['status']) + + # Clean after yourself + gdb.kill() + + def test_locking_running_validate_2_specific_id(self): + """ + make node, take full backup, stop it in the middle, + kill process so no cleanup is done - pid file is in place, + run validate on this specific backup, + expect it to not successfully executed, + RUNNING backup with pid file AND without active pid is legal, + but his status must be changed to ERROR and pid file is deleted + """ + self._check_gdb_flag_or_skip_test() + + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + self.backup_node(backup_dir, 'node', node) + + gdb = self.backup_node( + backup_dir, 'node', node, gdb=True) + + gdb.set_breakpoint('backup_non_data_file') + gdb.run_until_break() + + gdb.continue_execution_until_break(20) + + gdb._execute('signal SIGKILL') + gdb.continue_execution_until_error() + + self.assertEqual( + 'OK', self.show_pb(backup_dir, 'node')[0]['status']) + + self.assertEqual( + 'RUNNING', self.show_pb(backup_dir, 'node')[1]['status']) + + backup_id = self.show_pb(backup_dir, 'node')[1]['id'] + + try: + self.validate_pb(backup_dir, 'node', backup_id) + self.assertEqual( + 1, 0, + "Expecting Error because RUNNING backup is no longer active.\n " + "Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertTrue( + "which used backup {0} no longer exists".format( + backup_id) in e.message and + "Backup {0} has status RUNNING, change it " + "to ERROR and skip validation".format( + backup_id) in e.message and + "ERROR: Backup {0} has status: ERROR".format(backup_id) in + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.assertEqual( + 'OK', self.show_pb(backup_dir, 'node')[0]['status']) + + self.assertEqual( + 'ERROR', self.show_pb(backup_dir, 'node')[1]['status']) + + try: + self.validate_pb(backup_dir, 'node', backup_id) + self.assertEqual( + 1, 0, + "Expecting Error because backup has status ERROR.\n " + "Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + "ERROR: Backup {0} has status: ERROR".format(backup_id), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + try: + self.validate_pb(backup_dir) + self.assertEqual( + 1, 0, + "Expecting Error because backup has status ERROR.\n " + "Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertTrue( + "WARNING: Backup {0} has status ERROR. Skip validation".format( + backup_id) in e.message and + "WARNING: Some backups are not valid" in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + # Clean after yourself + gdb.kill() + + def test_locking_running_3(self): + """ + make node, take full backup, stop it in the middle, + terminate process, delete pid file, + run validate, expect it to not successfully executed, + RUNNING backup without pid file AND without active pid is legal, + his status must be changed to ERROR + """ + self._check_gdb_flag_or_skip_test() + + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + self.backup_node(backup_dir, 'node', node) + + gdb = self.backup_node( + backup_dir, 'node', node, gdb=True) + + gdb.set_breakpoint('backup_non_data_file') + gdb.run_until_break() + + gdb.continue_execution_until_break(20) + + gdb._execute('signal SIGKILL') + gdb.continue_execution_until_error() + + self.assertEqual( + 'OK', self.show_pb(backup_dir, 'node')[0]['status']) + + self.assertEqual( + 'RUNNING', self.show_pb(backup_dir, 'node')[1]['status']) + + backup_id = self.show_pb(backup_dir, 'node')[1]['id'] + + os.remove( + os.path.join(backup_dir, 'backups', 'node', backup_id, 'backup.pid')) + + try: + self.validate_pb(backup_dir) + self.assertEqual( + 1, 0, + "Expecting Error because RUNNING backup is no longer active.\n " + "Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertTrue( + "Backup {0} has status RUNNING, change it " + "to ERROR and skip validation".format( + backup_id) in e.message and + "WARNING: Some backups are not valid" in + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.assertEqual( + 'OK', self.show_pb(backup_dir, 'node')[0]['status']) + + self.assertEqual( + 'ERROR', self.show_pb(backup_dir, 'node')[1]['status']) + + # Clean after yourself + gdb.kill() + + def test_locking_restore_locked(self): + """ + make node, take full backup, take two page backups, + launch validate on PAGE1 and stop it in the middle, + launch restore of PAGE2. + Expect restore to sucseed because read-only locks + do not conflict + """ + self._check_gdb_flag_or_skip_test() + + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # FULL + full_id = self.backup_node(backup_dir, 'node', node) + + # PAGE1 + backup_id = self.backup_node(backup_dir, 'node', node, backup_type='page') + + # PAGE2 + self.backup_node(backup_dir, 'node', node, backup_type='page') + + gdb = self.validate_pb( + backup_dir, 'node', backup_id=backup_id, gdb=True) + + gdb.set_breakpoint('pgBackupValidate') + gdb.run_until_break() + + node.cleanup() + + self.restore_node(backup_dir, 'node', node) + + # Clean after yourself + gdb.kill() + + def test_concurrent_delete_and_restore(self): + """ + make node, take full backup, take page backup, + launch validate on FULL and stop it in the middle, + launch restore of PAGE. + Expect restore to fail because validation of + intermediate backup is impossible + """ + self._check_gdb_flag_or_skip_test() + + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # FULL + backup_id = self.backup_node(backup_dir, 'node', node) + + # PAGE1 + restore_id = self.backup_node(backup_dir, 'node', node, backup_type='page') + + gdb = self.delete_pb( + backup_dir, 'node', backup_id=backup_id, gdb=True) + + # gdb.set_breakpoint('pgFileDelete') + gdb.set_breakpoint('delete_backup_files') + gdb.run_until_break() + + node.cleanup() + + try: + self.restore_node( + backup_dir, 'node', node, options=['--no-validate']) + self.assertEqual( + 1, 0, + "Expecting Error because restore without whole chain validation " + "is prohibited unless --no-validate provided.\n " + "Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertTrue( + "Backup {0} is used without validation".format( + restore_id) in e.message and + 'is using backup {0}, and is still running'.format( + backup_id) in e.message and + 'ERROR: Cannot lock backup' in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + # Clean after yourself + gdb.kill() + + def test_locking_concurrent_validate_and_backup(self): + """ + make node, take full backup, launch validate + and stop it in the middle, take page backup. + Expect PAGE backup to be successfully executed + """ + self._check_gdb_flag_or_skip_test() + + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # FULL + self.backup_node(backup_dir, 'node', node) + + # PAGE2 + backup_id = self.backup_node(backup_dir, 'node', node, backup_type='page') + + gdb = self.validate_pb( + backup_dir, 'node', backup_id=backup_id, gdb=True) + + gdb.set_breakpoint('pgBackupValidate') + gdb.run_until_break() + + # This PAGE backup is expected to be successfull + self.backup_node(backup_dir, 'node', node, backup_type='page') + + # Clean after yourself + gdb.kill() + + def test_locking_concurren_restore_and_delete(self): + """ + make node, take full backup, launch restore + and stop it in the middle, delete full backup. + Expect it to fail. + """ + self._check_gdb_flag_or_skip_test() + + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # FULL + full_id = self.backup_node(backup_dir, 'node', node) + + node.cleanup() + gdb = self.restore_node(backup_dir, 'node', node, gdb=True) + + gdb.set_breakpoint('create_data_directories') + gdb.run_until_break() + + try: + self.delete_pb(backup_dir, 'node', full_id) + self.assertEqual( + 1, 0, + "Expecting Error because backup is locked\n " + "Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + "ERROR: Cannot lock backup {0} directory".format(full_id), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + # Clean after yourself + gdb.kill() + + def test_backup_directory_name(self): + """ + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # FULL + full_id_1 = self.backup_node(backup_dir, 'node', node) + page_id_1 = self.backup_node(backup_dir, 'node', node, backup_type='page') + + full_id_2 = self.backup_node(backup_dir, 'node', node) + page_id_2 = self.backup_node(backup_dir, 'node', node, backup_type='page') + + node.cleanup() + + old_path = os.path.join(backup_dir, 'backups', 'node', full_id_1) + new_path = os.path.join(backup_dir, 'backups', 'node', 'hello_kitty') + + os.rename(old_path, new_path) + + # This PAGE backup is expected to be successfull + self.show_pb(backup_dir, 'node', full_id_1) + + self.validate_pb(backup_dir) + self.validate_pb(backup_dir, 'node') + self.validate_pb(backup_dir, 'node', full_id_1) + + self.restore_node(backup_dir, 'node', node, backup_id=full_id_1) + + self.delete_pb(backup_dir, 'node', full_id_1) + + old_path = os.path.join(backup_dir, 'backups', 'node', full_id_2) + new_path = os.path.join(backup_dir, 'backups', 'node', 'hello_kitty') + + self.set_backup( + backup_dir, 'node', full_id_2, options=['--note=hello']) + + self.merge_backup(backup_dir, 'node', page_id_2, options=["-j", "4"]) + + self.assertNotIn( + 'note', + self.show_pb(backup_dir, 'node', page_id_2)) + + # Clean after yourself + + def test_empty_lock_file(self): + """ + https://github.com/postgrespro/pg_probackup/issues/308 + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # Fill with data + node.pgbench_init(scale=100) + + # FULL + backup_id = self.backup_node(backup_dir, 'node', node) + + lockfile = os.path.join(backup_dir, 'backups', 'node', backup_id, 'backup.pid') + with open(lockfile, "w+") as f: + f.truncate() + + out = self.validate_pb(backup_dir, 'node', backup_id) + + self.assertIn( + "Waiting 30 seconds on empty exclusive lock for backup", out) + +# lockfile = os.path.join(backup_dir, 'backups', 'node', backup_id, 'backup.pid') +# with open(lockfile, "w+") as f: +# f.truncate() +# +# p1 = self.validate_pb(backup_dir, 'node', backup_id, asynchronous=True, +# options=['--log-level-file=LOG', '--log-filename=validate.log']) +# sleep(3) +# p2 = self.delete_pb(backup_dir, 'node', backup_id, asynchronous=True, +# options=['--log-level-file=LOG', '--log-filename=delete.log']) +# +# p1.wait() +# p2.wait() + + def test_shared_lock(self): + """ + Make sure that shared lock leaves no files with pids + """ + self._check_gdb_flag_or_skip_test() + + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # Fill with data + node.pgbench_init(scale=1) + + # FULL + backup_id = self.backup_node(backup_dir, 'node', node) + + lockfile_excl = os.path.join(backup_dir, 'backups', 'node', backup_id, 'backup.pid') + lockfile_shr = os.path.join(backup_dir, 'backups', 'node', backup_id, 'backup_ro.pid') + + self.validate_pb(backup_dir, 'node', backup_id) + + self.assertFalse( + os.path.exists(lockfile_excl), + "File should not exist: {0}".format(lockfile_excl)) + + self.assertFalse( + os.path.exists(lockfile_shr), + "File should not exist: {0}".format(lockfile_shr)) + + gdb = self.validate_pb(backup_dir, 'node', backup_id, gdb=True) + + gdb.set_breakpoint('validate_one_page') + gdb.run_until_break() + gdb.kill() + + self.assertTrue( + os.path.exists(lockfile_shr), + "File should exist: {0}".format(lockfile_shr)) + + self.validate_pb(backup_dir, 'node', backup_id) + + self.assertFalse( + os.path.exists(lockfile_excl), + "File should not exist: {0}".format(lockfile_excl)) + + self.assertFalse( + os.path.exists(lockfile_shr), + "File should not exist: {0}".format(lockfile_shr)) + diff --git a/tests/logging_test.py b/tests/logging_test.py new file mode 100644 index 000000000..c5cdfa344 --- /dev/null +++ b/tests/logging_test.py @@ -0,0 +1,345 @@ +import unittest +import os +from .helpers.ptrack_helpers import ProbackupTest, ProbackupException +import datetime + +class LogTest(ProbackupTest, unittest.TestCase): + + # @unittest.skip("skip") + # @unittest.expectedFailure + # PGPRO-2154 + def test_log_rotation(self): + """ + """ + self._check_gdb_flag_or_skip_test() + + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + self.set_config( + backup_dir, 'node', + options=['--log-rotation-age=1s', '--log-rotation-size=1MB']) + + self.backup_node( + backup_dir, 'node', node, + options=['--stream', '--log-level-file=verbose']) + + gdb = self.backup_node( + backup_dir, 'node', node, + options=['--stream', '--log-level-file=verbose'], gdb=True) + + gdb.set_breakpoint('open_logfile') + gdb.run_until_break() + gdb.continue_execution_until_exit() + + def test_log_filename_strftime(self): + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + self.set_config( + backup_dir, 'node', + options=['--log-rotation-age=1d']) + + self.backup_node( + backup_dir, 'node', node, + options=[ + '--stream', + '--log-level-file=VERBOSE', + '--log-filename=pg_probackup-%a.log']) + + day_of_week = datetime.datetime.today().strftime("%a") + + path = os.path.join( + backup_dir, 'log', 'pg_probackup-{0}.log'.format(day_of_week)) + + self.assertTrue(os.path.isfile(path)) + + def test_truncate_rotation_file(self): + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + self.set_config( + backup_dir, 'node', + options=['--log-rotation-age=1d']) + + self.backup_node( + backup_dir, 'node', node, + options=[ + '--stream', + '--log-level-file=VERBOSE']) + + rotation_file_path = os.path.join( + backup_dir, 'log', 'pg_probackup.log.rotation') + + log_file_path = os.path.join( + backup_dir, 'log', 'pg_probackup.log') + + log_file_size = os.stat(log_file_path).st_size + + self.assertTrue(os.path.isfile(rotation_file_path)) + + # truncate .rotation file + with open(rotation_file_path, "rb+", 0) as f: + f.truncate() + f.flush() + f.close + + output = self.backup_node( + backup_dir, 'node', node, + options=[ + '--stream', + '--log-level-file=LOG'], + return_id=False) + + # check that log file wasn`t rotated + self.assertGreater( + os.stat(log_file_path).st_size, + log_file_size) + + self.assertIn( + 'WARNING: cannot read creation timestamp from rotation file', + output) + + output = self.backup_node( + backup_dir, 'node', node, + options=[ + '--stream', + '--log-level-file=LOG'], + return_id=False) + + # check that log file wasn`t rotated + self.assertGreater( + os.stat(log_file_path).st_size, + log_file_size) + + self.assertNotIn( + 'WARNING: cannot read creation timestamp from rotation file', + output) + + self.assertTrue(os.path.isfile(rotation_file_path)) + + def test_unlink_rotation_file(self): + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + self.set_config( + backup_dir, 'node', + options=['--log-rotation-age=1d']) + + self.backup_node( + backup_dir, 'node', node, + options=[ + '--stream', + '--log-level-file=VERBOSE']) + + rotation_file_path = os.path.join( + backup_dir, 'log', 'pg_probackup.log.rotation') + + log_file_path = os.path.join( + backup_dir, 'log', 'pg_probackup.log') + + log_file_size = os.stat(log_file_path).st_size + + self.assertTrue(os.path.isfile(rotation_file_path)) + + # unlink .rotation file + os.unlink(rotation_file_path) + + output = self.backup_node( + backup_dir, 'node', node, + options=[ + '--stream', + '--log-level-file=LOG'], + return_id=False) + + # check that log file wasn`t rotated + self.assertGreater( + os.stat(log_file_path).st_size, + log_file_size) + + self.assertIn( + 'WARNING: missing rotation file:', + output) + + self.assertTrue(os.path.isfile(rotation_file_path)) + + output = self.backup_node( + backup_dir, 'node', node, + options=[ + '--stream', + '--log-level-file=VERBOSE'], + return_id=False) + + self.assertNotIn( + 'WARNING: missing rotation file:', + output) + + # check that log file wasn`t rotated + self.assertGreater( + os.stat(log_file_path).st_size, + log_file_size) + + def test_garbage_in_rotation_file(self): + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + self.set_config( + backup_dir, 'node', + options=['--log-rotation-age=1d']) + + self.backup_node( + backup_dir, 'node', node, + options=[ + '--stream', + '--log-level-file=VERBOSE']) + + rotation_file_path = os.path.join( + backup_dir, 'log', 'pg_probackup.log.rotation') + + log_file_path = os.path.join( + backup_dir, 'log', 'pg_probackup.log') + + log_file_size = os.stat(log_file_path).st_size + + self.assertTrue(os.path.isfile(rotation_file_path)) + + # mangle .rotation file + with open(rotation_file_path, "w+b", 0) as f: + f.write(b"blah") + output = self.backup_node( + backup_dir, 'node', node, + options=[ + '--stream', + '--log-level-file=LOG'], + return_id=False) + + # check that log file wasn`t rotated + self.assertGreater( + os.stat(log_file_path).st_size, + log_file_size) + + self.assertIn( + 'WARNING: rotation file', + output) + + self.assertIn( + 'has wrong creation timestamp', + output) + + self.assertTrue(os.path.isfile(rotation_file_path)) + + output = self.backup_node( + backup_dir, 'node', node, + options=[ + '--stream', + '--log-level-file=LOG'], + return_id=False) + + self.assertNotIn( + 'WARNING: rotation file', + output) + + # check that log file wasn`t rotated + self.assertGreater( + os.stat(log_file_path).st_size, + log_file_size) + + def test_issue_274(self): + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + replica = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica.cleanup() + + self.backup_node(backup_dir, 'node', node, options=['--stream']) + self.restore_node(backup_dir, 'node', replica) + + # Settings for Replica + self.set_replica(node, replica, synchronous=True) + self.set_archiving(backup_dir, 'node', replica, replica=True) + self.set_auto_conf(replica, {'port': replica.port}) + + replica.slow_start(replica=True) + + node.safe_psql( + "postgres", + "create table t_heap as select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,45600) i") + + log_dir = os.path.join(backup_dir, "somedir") + + try: + self.backup_node( + backup_dir, 'node', replica, backup_type='page', + options=[ + '--log-level-console=verbose', '--log-level-file=verbose', + '--log-directory={0}'.format(log_dir), '-j1', + '--log-filename=somelog.txt', '--archive-timeout=5s', + '--no-validate', '--log-rotation-size=100KB']) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because of archiving timeout" + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: WAL segment', + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + log_file_path = os.path.join( + log_dir, 'somelog.txt') + + self.assertTrue(os.path.isfile(log_file_path)) + + with open(log_file_path, "r+") as f: + log_content = f.read() + + self.assertIn('INFO: command:', log_content) diff --git a/tests/merge_test.py b/tests/merge_test.py new file mode 100644 index 000000000..ffa73263c --- /dev/null +++ b/tests/merge_test.py @@ -0,0 +1,2759 @@ +# coding: utf-8 + +import unittest +import os +from .helpers.ptrack_helpers import ProbackupTest, ProbackupException +from testgres import QueryException +import shutil +from datetime import datetime, timedelta +import time +import subprocess + +class MergeTest(ProbackupTest, unittest.TestCase): + + def test_basic_merge_full_page(self): + """ + Test MERGE command, it merges FULL backup with target PAGE backups + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, "backup") + + # Initialize instance and backup directory + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=["--data-checksums"]) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, "node", node) + self.set_archiving(backup_dir, "node", node) + node.slow_start() + + # Do full backup + self.backup_node(backup_dir, "node", node, options=['--compress']) + show_backup = self.show_pb(backup_dir, "node")[0] + + self.assertEqual(show_backup["status"], "OK") + self.assertEqual(show_backup["backup-mode"], "FULL") + + # Fill with data + with node.connect() as conn: + conn.execute("create table test (id int)") + conn.execute( + "insert into test select i from generate_series(1,10) s(i)") + conn.commit() + + # Do first page backup + self.backup_node(backup_dir, "node", node, backup_type="page", options=['--compress']) + show_backup = self.show_pb(backup_dir, "node")[1] + + # sanity check + self.assertEqual(show_backup["status"], "OK") + self.assertEqual(show_backup["backup-mode"], "PAGE") + + # Fill with data + with node.connect() as conn: + conn.execute( + "insert into test select i from generate_series(1,10) s(i)") + count1 = conn.execute("select count(*) from test") + conn.commit() + + # Do second page backup + self.backup_node( + backup_dir, "node", node, + backup_type="page", options=['--compress']) + show_backup = self.show_pb(backup_dir, "node")[2] + page_id = show_backup["id"] + + if self.paranoia: + pgdata = self.pgdata_content(node.data_dir) + + # sanity check + self.assertEqual(show_backup["status"], "OK") + self.assertEqual(show_backup["backup-mode"], "PAGE") + + # Merge all backups + self.merge_backup(backup_dir, "node", page_id, + options=["-j", "4"]) + show_backups = self.show_pb(backup_dir, "node") + + # sanity check + self.assertEqual(len(show_backups), 1) + self.assertEqual(show_backups[0]["status"], "OK") + self.assertEqual(show_backups[0]["backup-mode"], "FULL") + + # Drop node and restore it + node.cleanup() + self.restore_node(backup_dir, 'node', node) + + # Check physical correctness + if self.paranoia: + pgdata_restored = self.pgdata_content( + node.data_dir, ignore_ptrack=False) + self.compare_pgdata(pgdata, pgdata_restored) + + node.slow_start() + + # Check restored node + count2 = node.execute("postgres", "select count(*) from test") + self.assertEqual(count1, count2) + + def test_merge_compressed_backups(self): + """ + Test MERGE command with compressed backups + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, "backup") + + # Initialize instance and backup directory + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=["--data-checksums"]) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, "node", node) + self.set_archiving(backup_dir, "node", node) + node.slow_start() + + # Do full compressed backup + self.backup_node(backup_dir, "node", node, options=['--compress']) + show_backup = self.show_pb(backup_dir, "node")[0] + + self.assertEqual(show_backup["status"], "OK") + self.assertEqual(show_backup["backup-mode"], "FULL") + + # Fill with data + with node.connect() as conn: + conn.execute("create table test (id int)") + conn.execute( + "insert into test select i from generate_series(1,10) s(i)") + count1 = conn.execute("select count(*) from test") + conn.commit() + + # Do compressed page backup + self.backup_node( + backup_dir, "node", node, backup_type="page", options=['--compress']) + show_backup = self.show_pb(backup_dir, "node")[1] + page_id = show_backup["id"] + + self.assertEqual(show_backup["status"], "OK") + self.assertEqual(show_backup["backup-mode"], "PAGE") + + # Merge all backups + self.merge_backup(backup_dir, "node", page_id, options=['-j2']) + show_backups = self.show_pb(backup_dir, "node") + + self.assertEqual(len(show_backups), 1) + self.assertEqual(show_backups[0]["status"], "OK") + self.assertEqual(show_backups[0]["backup-mode"], "FULL") + + # Drop node and restore it + node.cleanup() + self.restore_node(backup_dir, 'node', node) + node.slow_start() + + # Check restored node + count2 = node.execute("postgres", "select count(*) from test") + self.assertEqual(count1, count2) + + # Clean after yourself + node.cleanup() + + def test_merge_compressed_backups_1(self): + """ + Test MERGE command with compressed backups + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, "backup") + + # Initialize instance and backup directory + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, initdb_params=["--data-checksums"]) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, "node", node) + self.set_archiving(backup_dir, "node", node) + node.slow_start() + + # Fill with data + node.pgbench_init(scale=10) + + # Do compressed FULL backup + self.backup_node(backup_dir, "node", node, options=['--compress', '--stream']) + show_backup = self.show_pb(backup_dir, "node")[0] + + self.assertEqual(show_backup["status"], "OK") + self.assertEqual(show_backup["backup-mode"], "FULL") + + # Change data + pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) + pgbench.wait() + + # Do compressed DELTA backup + self.backup_node( + backup_dir, "node", node, + backup_type="delta", options=['--compress', '--stream']) + + # Change data + pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) + pgbench.wait() + + # Do compressed PAGE backup + self.backup_node( + backup_dir, "node", node, backup_type="page", options=['--compress']) + + pgdata = self.pgdata_content(node.data_dir) + + show_backup = self.show_pb(backup_dir, "node")[2] + page_id = show_backup["id"] + + self.assertEqual(show_backup["status"], "OK") + self.assertEqual(show_backup["backup-mode"], "PAGE") + + # Merge all backups + self.merge_backup(backup_dir, "node", page_id, options=['-j2']) + show_backups = self.show_pb(backup_dir, "node") + + self.assertEqual(len(show_backups), 1) + self.assertEqual(show_backups[0]["status"], "OK") + self.assertEqual(show_backups[0]["backup-mode"], "FULL") + + # Drop node and restore it + node.cleanup() + self.restore_node(backup_dir, 'node', node) + + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # Clean after yourself + node.cleanup() + + def test_merge_compressed_and_uncompressed_backups(self): + """ + Test MERGE command with compressed and uncompressed backups + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, "backup") + + # Initialize instance and backup directory + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, initdb_params=["--data-checksums"], + ) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, "node", node) + self.set_archiving(backup_dir, "node", node) + node.slow_start() + + # Fill with data + node.pgbench_init(scale=10) + + # Do compressed FULL backup + self.backup_node(backup_dir, "node", node, options=[ + '--compress-algorithm=zlib', '--stream']) + show_backup = self.show_pb(backup_dir, "node")[0] + + self.assertEqual(show_backup["status"], "OK") + self.assertEqual(show_backup["backup-mode"], "FULL") + + # Change data + pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) + pgbench.wait() + + # Do compressed DELTA backup + self.backup_node( + backup_dir, "node", node, backup_type="delta", + options=['--compress', '--stream']) + + # Change data + pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) + pgbench.wait() + + # Do uncompressed PAGE backup + self.backup_node(backup_dir, "node", node, backup_type="page") + + pgdata = self.pgdata_content(node.data_dir) + + show_backup = self.show_pb(backup_dir, "node")[2] + page_id = show_backup["id"] + + self.assertEqual(show_backup["status"], "OK") + self.assertEqual(show_backup["backup-mode"], "PAGE") + + # Merge all backups + self.merge_backup(backup_dir, "node", page_id, options=['-j2']) + show_backups = self.show_pb(backup_dir, "node") + + self.assertEqual(len(show_backups), 1) + self.assertEqual(show_backups[0]["status"], "OK") + self.assertEqual(show_backups[0]["backup-mode"], "FULL") + + # Drop node and restore it + node.cleanup() + self.restore_node(backup_dir, 'node', node) + + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # Clean after yourself + node.cleanup() + + def test_merge_compressed_and_uncompressed_backups_1(self): + """ + Test MERGE command with compressed and uncompressed backups + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, "backup") + + # Initialize instance and backup directory + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, initdb_params=["--data-checksums"], + ) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, "node", node) + self.set_archiving(backup_dir, "node", node) + node.slow_start() + + # Fill with data + node.pgbench_init(scale=5) + + # Do compressed FULL backup + self.backup_node(backup_dir, "node", node, options=[ + '--compress-algorithm=zlib', '--stream']) + show_backup = self.show_pb(backup_dir, "node")[0] + + self.assertEqual(show_backup["status"], "OK") + self.assertEqual(show_backup["backup-mode"], "FULL") + + # Change data + pgbench = node.pgbench(options=['-T', '20', '-c', '1', '--no-vacuum']) + pgbench.wait() + + # Do uncompressed DELTA backup + self.backup_node( + backup_dir, "node", node, backup_type="delta", + options=['--stream']) + + # Change data + pgbench = node.pgbench(options=['-T', '20', '-c', '1', '--no-vacuum']) + pgbench.wait() + + # Do compressed PAGE backup + self.backup_node( + backup_dir, "node", node, backup_type="page", + options=['--compress-algorithm=zlib']) + + pgdata = self.pgdata_content(node.data_dir) + + show_backup = self.show_pb(backup_dir, "node")[2] + page_id = show_backup["id"] + + self.assertEqual(show_backup["status"], "OK") + self.assertEqual(show_backup["backup-mode"], "PAGE") + + # Merge all backups + self.merge_backup(backup_dir, "node", page_id) + show_backups = self.show_pb(backup_dir, "node") + + self.assertEqual(len(show_backups), 1) + self.assertEqual(show_backups[0]["status"], "OK") + self.assertEqual(show_backups[0]["backup-mode"], "FULL") + + # Drop node and restore it + node.cleanup() + self.restore_node(backup_dir, 'node', node) + + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # Clean after yourself + node.cleanup() + + def test_merge_compressed_and_uncompressed_backups_2(self): + """ + Test MERGE command with compressed and uncompressed backups + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, "backup") + + # Initialize instance and backup directory + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, initdb_params=["--data-checksums"], + ) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, "node", node) + self.set_archiving(backup_dir, "node", node) + node.slow_start() + + # Fill with data + node.pgbench_init(scale=20) + + # Do uncompressed FULL backup + self.backup_node(backup_dir, "node", node) + show_backup = self.show_pb(backup_dir, "node")[0] + + self.assertEqual(show_backup["status"], "OK") + self.assertEqual(show_backup["backup-mode"], "FULL") + + # Change data + pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) + pgbench.wait() + + # Do compressed DELTA backup + self.backup_node( + backup_dir, "node", node, backup_type="delta", + options=['--compress-algorithm=zlib', '--stream']) + + # Change data + pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) + pgbench.wait() + + # Do uncompressed PAGE backup + self.backup_node( + backup_dir, "node", node, backup_type="page") + + pgdata = self.pgdata_content(node.data_dir) + + show_backup = self.show_pb(backup_dir, "node")[2] + page_id = show_backup["id"] + + self.assertEqual(show_backup["status"], "OK") + self.assertEqual(show_backup["backup-mode"], "PAGE") + + # Merge all backups + self.merge_backup(backup_dir, "node", page_id) + show_backups = self.show_pb(backup_dir, "node") + + self.assertEqual(len(show_backups), 1) + self.assertEqual(show_backups[0]["status"], "OK") + self.assertEqual(show_backups[0]["backup-mode"], "FULL") + + # Drop node and restore it + node.cleanup() + self.restore_node(backup_dir, 'node', node) + + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + def test_merge_tablespaces(self): + """ + Create tablespace with table, take FULL backup, + create another tablespace with another table and drop previous + tablespace, take page backup, merge it and restore + + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, initdb_params=['--data-checksums'], + ) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + self.create_tblspace_in_node(node, 'somedata') + node.safe_psql( + "postgres", + "create table t_heap tablespace somedata as select i as id," + " md5(i::text) as text, md5(i::text)::tsvector as tsvector" + " from generate_series(0,100) i" + ) + # FULL backup + self.backup_node(backup_dir, 'node', node) + + # Create new tablespace + self.create_tblspace_in_node(node, 'somedata1') + + node.safe_psql( + "postgres", + "create table t_heap1 tablespace somedata1 as select i as id," + " md5(i::text) as text, md5(i::text)::tsvector as tsvector" + " from generate_series(0,100) i" + ) + + node.safe_psql( + "postgres", + "drop table t_heap" + ) + + # Drop old tablespace + node.safe_psql( + "postgres", + "drop tablespace somedata" + ) + + # PAGE backup + backup_id = self.backup_node(backup_dir, 'node', node, backup_type="page") + + pgdata = self.pgdata_content(node.data_dir) + + node.stop() + shutil.rmtree( + self.get_tblspace_path(node, 'somedata'), + ignore_errors=True) + shutil.rmtree( + self.get_tblspace_path(node, 'somedata1'), + ignore_errors=True) + node.cleanup() + + self.merge_backup(backup_dir, 'node', backup_id) + + self.restore_node( + backup_dir, 'node', node, options=["-j", "4"]) + + pgdata_restored = self.pgdata_content(node.data_dir) + + # this compare should fall because we lost some directories + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + def test_merge_tablespaces_1(self): + """ + Create tablespace with table, take FULL backup, + create another tablespace with another table, take page backup, + drop first tablespace and take delta backup, + merge it and restore + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, initdb_params=['--data-checksums'], + ) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + self.create_tblspace_in_node(node, 'somedata') + + # FULL backup + self.backup_node(backup_dir, 'node', node) + node.safe_psql( + "postgres", + "create table t_heap tablespace somedata as select i as id," + " md5(i::text) as text, md5(i::text)::tsvector as tsvector" + " from generate_series(0,100) i" + ) + + # CREATE NEW TABLESPACE + self.create_tblspace_in_node(node, 'somedata1') + + node.safe_psql( + "postgres", + "create table t_heap1 tablespace somedata1 as select i as id," + " md5(i::text) as text, md5(i::text)::tsvector as tsvector" + " from generate_series(0,100) i" + ) + + # PAGE backup + self.backup_node(backup_dir, 'node', node, backup_type="page") + + node.safe_psql( + "postgres", + "drop table t_heap" + ) + node.safe_psql( + "postgres", + "drop tablespace somedata" + ) + + # DELTA backup + backup_id = self.backup_node( + backup_dir, 'node', node, backup_type="delta") + + pgdata = self.pgdata_content(node.data_dir) + + node.stop() + shutil.rmtree( + self.get_tblspace_path(node, 'somedata'), + ignore_errors=True) + shutil.rmtree( + self.get_tblspace_path(node, 'somedata1'), + ignore_errors=True) + node.cleanup() + + self.merge_backup(backup_dir, 'node', backup_id) + + self.restore_node( + backup_dir, 'node', node, + options=["-j", "4"]) + + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + def test_merge_page_truncate(self): + """ + make node, create table, take full backup, + delete last 3 pages, vacuum relation, + take page backup, merge full and page, + restore last page backup and check data correctness + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'checkpoint_timeout': '300s'}) + + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node_restored.cleanup() + node.slow_start() + self.create_tblspace_in_node(node, 'somedata') + + node.safe_psql( + "postgres", + "create sequence t_seq; " + "create table t_heap tablespace somedata as select i as id, " + "md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,1024) i;") + + node.safe_psql( + "postgres", + "vacuum t_heap") + + self.backup_node(backup_dir, 'node', node) + + node.safe_psql( + "postgres", + "delete from t_heap where ctid >= '(11,0)'") + node.safe_psql( + "postgres", + "vacuum t_heap") + + self.backup_node( + backup_dir, 'node', node, backup_type='page') + + if self.paranoia: + pgdata = self.pgdata_content(node.data_dir) + + page_id = self.show_pb(backup_dir, "node")[1]["id"] + self.merge_backup(backup_dir, "node", page_id) + + self.validate_pb(backup_dir) + + old_tablespace = self.get_tblspace_path(node, 'somedata') + new_tablespace = self.get_tblspace_path(node_restored, 'somedata_new') + + self.restore_node( + backup_dir, 'node', node_restored, + options=[ + "-j", "4", + "-T", "{0}={1}".format(old_tablespace, new_tablespace)]) + + # Physical comparison + if self.paranoia: + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + self.set_auto_conf(node_restored, {'port': node_restored.port}) + node_restored.slow_start() + + # Logical comparison + result1 = node.safe_psql( + "postgres", + "select * from t_heap") + + result2 = node_restored.safe_psql( + "postgres", + "select * from t_heap") + + self.assertEqual(result1, result2) + + def test_merge_delta_truncate(self): + """ + make node, create table, take full backup, + delete last 3 pages, vacuum relation, + take page backup, merge full and page, + restore last page backup and check data correctness + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'checkpoint_timeout': '300s'}) + + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node_restored.cleanup() + node.slow_start() + self.create_tblspace_in_node(node, 'somedata') + + node.safe_psql( + "postgres", + "create sequence t_seq; " + "create table t_heap tablespace somedata as select i as id, " + "md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,1024) i;") + + node.safe_psql( + "postgres", + "vacuum t_heap") + + self.backup_node(backup_dir, 'node', node) + + node.safe_psql( + "postgres", + "delete from t_heap where ctid >= '(11,0)'") + node.safe_psql( + "postgres", + "vacuum t_heap") + + self.backup_node( + backup_dir, 'node', node, backup_type='delta') + + if self.paranoia: + pgdata = self.pgdata_content(node.data_dir) + + page_id = self.show_pb(backup_dir, "node")[1]["id"] + self.merge_backup(backup_dir, "node", page_id) + + self.validate_pb(backup_dir) + + old_tablespace = self.get_tblspace_path(node, 'somedata') + new_tablespace = self.get_tblspace_path(node_restored, 'somedata_new') + + self.restore_node( + backup_dir, 'node', node_restored, + options=[ + "-j", "4", + "-T", "{0}={1}".format(old_tablespace, new_tablespace)]) + + # Physical comparison + if self.paranoia: + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + self.set_auto_conf(node_restored, {'port': node_restored.port}) + node_restored.slow_start() + + # Logical comparison + result1 = node.safe_psql( + "postgres", + "select * from t_heap") + + result2 = node_restored.safe_psql( + "postgres", + "select * from t_heap") + + self.assertEqual(result1, result2) + + def test_merge_ptrack_truncate(self): + """ + make node, create table, take full backup, + delete last 3 pages, vacuum relation, + take page backup, merge full and page, + restore last page backup and check data correctness + """ + if not self.ptrack: + self.skipTest('Skipped because ptrack support is disabled') + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + ptrack_enable=True) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") + + self.create_tblspace_in_node(node, 'somedata') + + node.safe_psql( + "postgres", + "create sequence t_seq; " + "create table t_heap tablespace somedata as select i as id, " + "md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,1024) i;") + + node.safe_psql( + "postgres", + "vacuum t_heap") + + self.backup_node(backup_dir, 'node', node) + + node.safe_psql( + "postgres", + "delete from t_heap where ctid >= '(11,0)'") + + node.safe_psql( + "postgres", + "vacuum t_heap") + + page_id = self.backup_node( + backup_dir, 'node', node, backup_type='ptrack') + + if self.paranoia: + pgdata = self.pgdata_content(node.data_dir) + + self.merge_backup(backup_dir, "node", page_id) + + self.validate_pb(backup_dir) + + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored.cleanup() + + old_tablespace = self.get_tblspace_path(node, 'somedata') + new_tablespace = self.get_tblspace_path(node_restored, 'somedata_new') + + self.restore_node( + backup_dir, 'node', node_restored, + options=[ + "-j", "4", + "-T", "{0}={1}".format(old_tablespace, new_tablespace)]) + + # Physical comparison + if self.paranoia: + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + self.set_auto_conf(node_restored, {'port': node_restored.port}) + node_restored.slow_start() + + # Logical comparison + result1 = node.safe_psql( + "postgres", + "select * from t_heap") + + result2 = node_restored.safe_psql( + "postgres", + "select * from t_heap") + + self.assertEqual(result1, result2) + + # @unittest.skip("skip") + def test_merge_delta_delete(self): + """ + Make node, create tablespace with table, take full backup, + alter tablespace location, take delta backup, merge full and delta, + restore database. + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, initdb_params=['--data-checksums'], + pg_options={ + 'checkpoint_timeout': '30s', + } + ) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + self.create_tblspace_in_node(node, 'somedata') + + # FULL backup + self.backup_node(backup_dir, 'node', node, options=["--stream"]) + + node.safe_psql( + "postgres", + "create table t_heap tablespace somedata as select i as id," + " md5(i::text) as text, md5(i::text)::tsvector as tsvector" + " from generate_series(0,100) i" + ) + + node.safe_psql( + "postgres", + "delete from t_heap" + ) + + node.safe_psql( + "postgres", + "vacuum t_heap" + ) + + # DELTA BACKUP + self.backup_node( + backup_dir, 'node', node, + backup_type='delta', + options=["--stream"] + ) + + if self.paranoia: + pgdata = self.pgdata_content(node.data_dir) + + backup_id = self.show_pb(backup_dir, "node")[1]["id"] + self.merge_backup(backup_dir, "node", backup_id, options=["-j", "4"]) + + # RESTORE + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored') + ) + node_restored.cleanup() + + self.restore_node( + backup_dir, 'node', node_restored, + options=[ + "-j", "4", + "-T", "{0}={1}".format( + self.get_tblspace_path(node, 'somedata'), + self.get_tblspace_path(node_restored, 'somedata') + ) + ] + ) + + # GET RESTORED PGDATA AND COMPARE + if self.paranoia: + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # START RESTORED NODE + self.set_auto_conf(node_restored, {'port': node_restored.port}) + node_restored.slow_start() + + # @unittest.skip("skip") + def test_continue_failed_merge(self): + """ + Check that failed MERGE can be continued + """ + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join( + self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # FULL backup + self.backup_node(backup_dir, 'node', node) + + node.safe_psql( + "postgres", + "create table t_heap as select i as id," + " md5(i::text) as text, md5(i::text)::tsvector as tsvector" + " from generate_series(0,1000) i" + ) + + # DELTA BACKUP + self.backup_node( + backup_dir, 'node', node, backup_type='delta' + ) + + node.safe_psql( + "postgres", + "delete from t_heap" + ) + + node.safe_psql( + "postgres", + "vacuum t_heap" + ) + + # DELTA BACKUP + self.backup_node( + backup_dir, 'node', node, backup_type='delta' + ) + + if self.paranoia: + pgdata = self.pgdata_content(node.data_dir) + + backup_id = self.show_pb(backup_dir, "node")[2]["id"] + + gdb = self.merge_backup(backup_dir, "node", backup_id, gdb=True) + + gdb.set_breakpoint('backup_non_data_file_internal') + gdb.run_until_break() + + gdb.continue_execution_until_break(5) + + gdb._execute('signal SIGKILL') + gdb._execute('detach') + time.sleep(1) + + print(self.show_pb(backup_dir, as_text=True, as_json=False)) + + # Try to continue failed MERGE + self.merge_backup(backup_dir, "node", backup_id) + + # Drop node and restore it + node.cleanup() + self.restore_node(backup_dir, 'node', node) + + # @unittest.skip("skip") + def test_continue_failed_merge_with_corrupted_delta_backup(self): + """ + Fail merge via gdb, corrupt DELTA backup, try to continue merge + """ + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # FULL backup + self.backup_node(backup_dir, 'node', node) + + node.safe_psql( + "postgres", + "create table t_heap as select i as id," + " md5(i::text) as text, md5(i::text)::tsvector as tsvector" + " from generate_series(0,1000) i") + + old_path = node.safe_psql( + "postgres", + "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip() + + # DELTA BACKUP + self.backup_node( + backup_dir, 'node', node, backup_type='delta') + + node.safe_psql( + "postgres", + "update t_heap set id = 100500") + + node.safe_psql( + "postgres", + "vacuum full t_heap") + + new_path = node.safe_psql( + "postgres", + "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip() + + # DELTA BACKUP + backup_id_2 = self.backup_node( + backup_dir, 'node', node, backup_type='delta') + + backup_id = self.show_pb(backup_dir, "node")[1]["id"] + + # Failed MERGE + gdb = self.merge_backup(backup_dir, "node", backup_id, gdb=True) + gdb.set_breakpoint('backup_non_data_file_internal') + gdb.run_until_break() + + gdb.continue_execution_until_break(2) + + gdb._execute('signal SIGKILL') + + # CORRUPT incremental backup + # read block from future + # block_size + backup_header = 8200 + file = os.path.join( + backup_dir, 'backups', 'node', + backup_id_2, 'database', new_path) + with open(file, 'rb') as f: + f.seek(8200) + block_1 = f.read(8200) + f.close + + # write block from future + file = os.path.join( + backup_dir, 'backups', 'node', + backup_id, 'database', old_path) + with open(file, 'r+b') as f: + f.seek(8200) + f.write(block_1) + f.close + + # Try to continue failed MERGE + try: + print(self.merge_backup(backup_dir, "node", backup_id)) + self.assertEqual( + 1, 0, + "Expecting Error because of incremental backup corruption.\n " + "Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertTrue( + "ERROR: Backup {0} has status CORRUPT, merge is aborted".format( + backup_id) in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + def test_continue_failed_merge_2(self): + """ + Check that failed MERGE on delete can be continued + """ + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # FULL backup + self.backup_node(backup_dir, 'node', node) + + node.safe_psql( + "postgres", + "create table t_heap as select i as id," + " md5(i::text) as text, md5(i::text)::tsvector as tsvector" + " from generate_series(0,1000) i") + + # DELTA BACKUP + self.backup_node( + backup_dir, 'node', node, backup_type='delta') + + node.safe_psql( + "postgres", + "delete from t_heap") + + node.safe_psql( + "postgres", + "vacuum t_heap") + + # DELTA BACKUP + self.backup_node( + backup_dir, 'node', node, backup_type='delta') + + if self.paranoia: + pgdata = self.pgdata_content(node.data_dir) + + backup_id = self.show_pb(backup_dir, "node")[2]["id"] + + gdb = self.merge_backup(backup_dir, "node", backup_id, gdb=True) + + gdb.set_breakpoint('pgFileDelete') + + gdb.run_until_break() + + gdb._execute('thread apply all bt') + + gdb.continue_execution_until_break(20) + + gdb._execute('thread apply all bt') + + gdb._execute('signal SIGKILL') + + print(self.show_pb(backup_dir, as_text=True, as_json=False)) + + backup_id_deleted = self.show_pb(backup_dir, "node")[1]["id"] + + # TODO check that full backup has meta info is equal to DELETTING + + # Try to continue failed MERGE + self.merge_backup(backup_dir, "node", backup_id) + + def test_continue_failed_merge_3(self): + """ + Check that failed MERGE cannot be continued if intermediate + backup is missing. + """ + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # Create test data + node.safe_psql("postgres", "create sequence t_seq") + node.safe_psql( + "postgres", + "create table t_heap as select i as id, nextval('t_seq')" + " as t_seq, md5(i::text) as text, md5(i::text)::tsvector" + " as tsvector from generate_series(0,100000) i" + ) + + # FULL backup + self.backup_node(backup_dir, 'node', node) + + # CREATE FEW PAGE BACKUP + i = 0 + + while i < 2: + + node.safe_psql( + "postgres", + "delete from t_heap" + ) + + node.safe_psql( + "postgres", + "vacuum t_heap" + ) + node.safe_psql( + "postgres", + "insert into t_heap select i as id, nextval('t_seq') as t_seq," + " md5(i::text) as text, md5(i::text)::tsvector as tsvector" + " from generate_series(100,200000) i" + ) + + # PAGE BACKUP + self.backup_node( + backup_dir, 'node', node, backup_type='page' + ) + i = i + 1 + + if self.paranoia: + pgdata = self.pgdata_content(node.data_dir) + + backup_id_merge = self.show_pb(backup_dir, "node")[2]["id"] + backup_id_delete = self.show_pb(backup_dir, "node")[1]["id"] + + print(self.show_pb(backup_dir, as_text=True, as_json=False)) + + gdb = self.merge_backup(backup_dir, "node", backup_id_merge, gdb=True) + + gdb.set_breakpoint('backup_non_data_file_internal') + gdb.run_until_break() + gdb.continue_execution_until_break(2) + + gdb._execute('signal SIGKILL') + + print(self.show_pb(backup_dir, as_text=True, as_json=False)) + # print(os.path.join(backup_dir, "backups", "node", backup_id_delete)) + + # DELETE PAGE1 + shutil.rmtree( + os.path.join(backup_dir, "backups", "node", backup_id_delete)) + + # Try to continue failed MERGE + try: + self.merge_backup(backup_dir, "node", backup_id_merge) + self.assertEqual( + 1, 0, + "Expecting Error because of backup corruption.\n " + "Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertTrue( + "ERROR: Incremental chain is broken, " + "merge is impossible to finish" in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + def test_merge_different_compression_algo(self): + """ + Check that backups with different compression algorithms can be merged + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # FULL backup + self.backup_node( + backup_dir, 'node', node, options=['--compress-algorithm=zlib']) + + node.safe_psql( + "postgres", + "create table t_heap as select i as id," + " md5(i::text) as text, md5(i::text)::tsvector as tsvector" + " from generate_series(0,1000) i") + + # DELTA BACKUP + self.backup_node( + backup_dir, 'node', node, + backup_type='delta', options=['--compress-algorithm=pglz']) + + node.safe_psql( + "postgres", + "delete from t_heap") + + node.safe_psql( + "postgres", + "vacuum t_heap") + + # DELTA BACKUP + self.backup_node( + backup_dir, 'node', node, backup_type='delta') + + if self.paranoia: + pgdata = self.pgdata_content(node.data_dir) + + backup_id = self.show_pb(backup_dir, "node")[2]["id"] + + self.merge_backup(backup_dir, "node", backup_id) + + def test_merge_different_wal_modes(self): + """ + Check that backups with different wal modes can be merged + correctly + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # FULL stream backup + self.backup_node( + backup_dir, 'node', node, options=['--stream']) + + # DELTA archive backup + backup_id = self.backup_node( + backup_dir, 'node', node, backup_type='delta') + + self.merge_backup(backup_dir, 'node', backup_id=backup_id) + + self.assertEqual( + 'ARCHIVE', self.show_pb(backup_dir, 'node', backup_id)['wal']) + + # DELTA stream backup + backup_id = self.backup_node( + backup_dir, 'node', node, + backup_type='delta', options=['--stream']) + + self.merge_backup(backup_dir, 'node', backup_id=backup_id) + + self.assertEqual( + 'STREAM', self.show_pb(backup_dir, 'node', backup_id)['wal']) + + def test_crash_after_opening_backup_control_1(self): + """ + check that crashing after opening backup.control + for writing will not result in losing backup metadata + """ + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # FULL stream backup + self.backup_node( + backup_dir, 'node', node, options=['--stream']) + + # DELTA archive backup + backup_id = self.backup_node( + backup_dir, 'node', node, backup_type='delta') + + print(self.show_pb( + backup_dir, 'node', as_json=False, as_text=True)) + + gdb = self.merge_backup(backup_dir, "node", backup_id, gdb=True) + gdb.set_breakpoint('write_backup_filelist') + gdb.run_until_break() + + gdb.set_breakpoint('write_backup') + gdb.continue_execution_until_break() + gdb.set_breakpoint('pgBackupWriteControl') + gdb.continue_execution_until_break() + + gdb._execute('signal SIGKILL') + + print(self.show_pb( + backup_dir, 'node', as_json=False, as_text=True)) + + self.assertEqual( + 'MERGING', self.show_pb(backup_dir, 'node')[0]['status']) + + self.assertEqual( + 'MERGING', self.show_pb(backup_dir, 'node')[1]['status']) + + # @unittest.skip("skip") + def test_crash_after_opening_backup_control_2(self): + """ + check that crashing after opening backup_content.control + for writing will not result in losing metadata about backup files + TODO: rewrite + """ + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # Add data + node.pgbench_init(scale=3) + + # FULL backup + full_id = self.backup_node( + backup_dir, 'node', node, options=['--stream']) + + # Change data + pgbench = node.pgbench(options=['-T', '20', '-c', '2']) + pgbench.wait() + + path = node.safe_psql( + 'postgres', + "select pg_relation_filepath('pgbench_accounts')").decode('utf-8').rstrip() + + fsm_path = path + '_fsm' + + node.safe_psql( + 'postgres', + 'vacuum pgbench_accounts') + + # DELTA backup + backup_id = self.backup_node( + backup_dir, 'node', node, backup_type='delta') + + pgdata = self.pgdata_content(node.data_dir) + + print(self.show_pb( + backup_dir, 'node', as_json=False, as_text=True)) + + gdb = self.merge_backup(backup_dir, "node", backup_id, gdb=True) + gdb.set_breakpoint('write_backup_filelist') + gdb.run_until_break() + +# gdb.set_breakpoint('sprintf') +# gdb.continue_execution_until_break(1) + + gdb._execute('signal SIGKILL') + + print(self.show_pb( + backup_dir, 'node', as_json=False, as_text=True)) + + self.assertEqual( + 'MERGING', self.show_pb(backup_dir, 'node')[0]['status']) + + self.assertEqual( + 'MERGING', self.show_pb(backup_dir, 'node')[1]['status']) + + # In to_backup drop file that comes from from_backup + # emulate crash during previous merge + file_to_remove = os.path.join( + backup_dir, 'backups', + 'node', full_id, 'database', fsm_path) + + # print(file_to_remove) + + os.remove(file_to_remove) + + # Continue failed merge + self.merge_backup(backup_dir, "node", backup_id) + + node.cleanup() + + # restore merge backup + self.restore_node(backup_dir, 'node', node) + + pgdata_restored = self.pgdata_content(node.data_dir) + + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + def test_losing_file_after_failed_merge(self): + """ + check that crashing after opening backup_content.control + for writing will not result in losing metadata about backup files + TODO: rewrite + """ + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # Add data + node.pgbench_init(scale=1) + + # FULL backup + full_id = self.backup_node( + backup_dir, 'node', node, options=['--stream']) + + # Change data + node.safe_psql( + 'postgres', + "update pgbench_accounts set aid = aid + 1005000") + + path = node.safe_psql( + 'postgres', + "select pg_relation_filepath('pgbench_accounts')").decode('utf-8').rstrip() + + node.safe_psql( + 'postgres', + "VACUUM pgbench_accounts") + + vm_path = path + '_vm' + + # DELTA backup + backup_id = self.backup_node( + backup_dir, 'node', node, backup_type='delta') + + pgdata = self.pgdata_content(node.data_dir) + + print(self.show_pb( + backup_dir, 'node', as_json=False, as_text=True)) + + gdb = self.merge_backup(backup_dir, "node", backup_id, gdb=True) + gdb.set_breakpoint('write_backup_filelist') + gdb.run_until_break() + +# gdb.set_breakpoint('sprintf') +# gdb.continue_execution_until_break(20) + + gdb._execute('signal SIGKILL') + + print(self.show_pb( + backup_dir, 'node', as_json=False, as_text=True)) + + self.assertEqual( + 'MERGING', self.show_pb(backup_dir, 'node')[0]['status']) + + self.assertEqual( + 'MERGING', self.show_pb(backup_dir, 'node')[1]['status']) + + # In to_backup drop file that comes from from_backup + # emulate crash during previous merge + file_to_remove = os.path.join( + backup_dir, 'backups', + 'node', full_id, 'database', vm_path) + + os.remove(file_to_remove) + + # Try to continue failed MERGE + self.merge_backup(backup_dir, "node", backup_id) + + self.assertEqual( + 'OK', self.show_pb(backup_dir, 'node')[0]['status']) + + node.cleanup() + + self.restore_node(backup_dir, 'node', node) + + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + def test_failed_merge_after_delete(self): + """ + """ + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # add database + node.safe_psql( + 'postgres', + 'CREATE DATABASE testdb') + + dboid = node.safe_psql( + "postgres", + "select oid from pg_database where datname = 'testdb'").decode('utf-8').rstrip() + + # take FULL backup + full_id = self.backup_node( + backup_dir, 'node', node, options=['--stream']) + + # drop database + node.safe_psql( + 'postgres', + 'DROP DATABASE testdb') + + # take PAGE backup + page_id = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + page_id_2 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + gdb = self.merge_backup( + backup_dir, 'node', page_id, + gdb=True, options=['--log-level-console=verbose']) + + gdb.set_breakpoint('delete_backup_files') + gdb.run_until_break() + + gdb.set_breakpoint('pgFileDelete') + gdb.continue_execution_until_break(20) + + gdb._execute('signal SIGKILL') + + # backup half-merged + self.assertEqual( + 'MERGED', self.show_pb(backup_dir, 'node')[0]['status']) + + self.assertEqual( + full_id, self.show_pb(backup_dir, 'node')[0]['id']) + + db_path = os.path.join( + backup_dir, 'backups', 'node', + full_id, 'database', 'base', dboid) + + try: + self.merge_backup( + backup_dir, 'node', page_id_2, + options=['--log-level-console=verbose']) + self.assertEqual( + 1, 0, + "Expecting Error because of missing parent.\n " + "Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertTrue( + "ERROR: Full backup {0} has unfinished merge with backup {1}".format( + full_id, page_id) in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + def test_failed_merge_after_delete_1(self): + """ + """ + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # take FULL backup + full_id = self.backup_node( + backup_dir, 'node', node, options=['--stream']) + + node.pgbench_init(scale=1) + + page_1 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # Change PAGE1 backup status to ERROR + self.change_backup_status(backup_dir, 'node', page_1, 'ERROR') + + pgdata = self.pgdata_content(node.data_dir) + + # add data + pgbench = node.pgbench(options=['-T', '10', '-c', '2', '--no-vacuum']) + pgbench.wait() + + # take PAGE2 backup + page_id = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # Change PAGE1 backup status to OK + self.change_backup_status(backup_dir, 'node', page_1, 'OK') + + gdb = self.merge_backup( + backup_dir, 'node', page_id, + gdb=True, options=['--log-level-console=verbose']) + + gdb.set_breakpoint('delete_backup_files') + gdb.run_until_break() + +# gdb.set_breakpoint('parray_bsearch') +# gdb.continue_execution_until_break() + + gdb.set_breakpoint('pgFileDelete') + gdb.continue_execution_until_break(30) + gdb._execute('signal SIGKILL') + + self.assertEqual( + full_id, self.show_pb(backup_dir, 'node')[0]['id']) + + # restore + node.cleanup() + try: + #self.restore_node(backup_dir, 'node', node, backup_id=page_1) + self.restore_node(backup_dir, 'node', node) + self.assertEqual( + 1, 0, + "Expecting Error because of orphan status.\n " + "Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + "ERROR: Backup {0} is orphan".format(page_1), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + def test_failed_merge_after_delete_2(self): + """ + """ + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # take FULL backup + full_id = self.backup_node( + backup_dir, 'node', node, options=['--stream']) + + node.pgbench_init(scale=1) + + page_1 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # add data + pgbench = node.pgbench(options=['-T', '10', '-c', '2', '--no-vacuum']) + pgbench.wait() + + # take PAGE2 backup + page_2 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + gdb = self.merge_backup( + backup_dir, 'node', page_2, gdb=True, + options=['--log-level-console=VERBOSE']) + + gdb.set_breakpoint('pgFileDelete') + gdb.run_until_break() + gdb.continue_execution_until_break(2) + gdb._execute('signal SIGKILL') + + self.delete_pb(backup_dir, 'node', backup_id=page_2) + + # rerun merge + try: + #self.restore_node(backup_dir, 'node', node, backup_id=page_1) + self.merge_backup(backup_dir, 'node', page_1) + self.assertEqual( + 1, 0, + "Expecting Error because of backup is missing.\n " + "Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + "ERROR: Full backup {0} has unfinished merge " + "with backup {1}".format(full_id, page_2), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + def test_failed_merge_after_delete_3(self): + """ + """ + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # add database + node.safe_psql( + 'postgres', + 'CREATE DATABASE testdb') + + dboid = node.safe_psql( + "postgres", + "select oid from pg_database where datname = 'testdb'").rstrip() + + # take FULL backup + full_id = self.backup_node( + backup_dir, 'node', node, options=['--stream']) + + # drop database + node.safe_psql( + 'postgres', + 'DROP DATABASE testdb') + + # take PAGE backup + page_id = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # create database + node.safe_psql( + 'postgres', + 'create DATABASE testdb') + + page_id_2 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + gdb = self.merge_backup( + backup_dir, 'node', page_id, + gdb=True, options=['--log-level-console=verbose']) + + gdb.set_breakpoint('delete_backup_files') + gdb.run_until_break() + + gdb.set_breakpoint('pgFileDelete') + gdb.continue_execution_until_break(20) + + gdb._execute('signal SIGKILL') + + # backup half-merged + self.assertEqual( + 'MERGED', self.show_pb(backup_dir, 'node')[0]['status']) + + self.assertEqual( + full_id, self.show_pb(backup_dir, 'node')[0]['id']) + + db_path = os.path.join( + backup_dir, 'backups', 'node', full_id) + + # FULL backup is missing now + shutil.rmtree(db_path) + + try: + self.merge_backup( + backup_dir, 'node', page_id_2, + options=['--log-level-console=verbose']) + self.assertEqual( + 1, 0, + "Expecting Error because of missing parent.\n " + "Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertTrue( + "ERROR: Failed to find parent full backup for {0}".format( + page_id_2) in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + # Skipped, because backups from the future are invalid. + # This cause a "ERROR: Can't assign backup_id, there is already a backup in future" + # now (PBCKP-259). We can conduct such a test again when we + # untie 'backup_id' from 'start_time' + @unittest.skip("skip") + def test_merge_backup_from_future(self): + """ + take FULL backup, table PAGE backup from future, + try to merge page with FULL + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # Take FULL + self.backup_node(backup_dir, 'node', node) + + node.pgbench_init(scale=5) + + # Take PAGE from future + backup_id = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + with open( + os.path.join( + backup_dir, 'backups', 'node', + backup_id, "backup.control"), "a") as conf: + conf.write("start-time='{:%Y-%m-%d %H:%M:%S}'\n".format( + datetime.now() + timedelta(days=3))) + + # rename directory + new_id = self.show_pb(backup_dir, 'node')[1]['id'] + + os.rename( + os.path.join(backup_dir, 'backups', 'node', backup_id), + os.path.join(backup_dir, 'backups', 'node', new_id)) + + pgbench = node.pgbench(options=['-T', '5', '-c', '1', '--no-vacuum']) + pgbench.wait() + + backup_id = self.backup_node(backup_dir, 'node', node, backup_type='page') + pgdata = self.pgdata_content(node.data_dir) + + result = node.safe_psql( + 'postgres', + 'SELECT * from pgbench_accounts') + + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored.cleanup() + + self.restore_node( + backup_dir, 'node', + node_restored, backup_id=backup_id) + + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # check that merged backup has the same state as + node_restored.cleanup() + self.merge_backup(backup_dir, 'node', backup_id=backup_id) + self.restore_node( + backup_dir, 'node', + node_restored, backup_id=backup_id) + pgdata_restored = self.pgdata_content(node_restored.data_dir) + + self.set_auto_conf( + node_restored, + {'port': node_restored.port}) + node_restored.slow_start() + + result_new = node_restored.safe_psql( + 'postgres', + 'SELECT * from pgbench_accounts') + + self.assertTrue(result, result_new) + + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + def test_merge_multiple_descendants(self): + """ + PAGEb3 + | PAGEa3 + PAGEb2 / + | PAGEa2 / + PAGEb1 \ / + | PAGEa1 + FULLb | + FULLa + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # Take FULL BACKUPs + backup_id_a = self.backup_node(backup_dir, 'node', node) + + backup_id_b = self.backup_node(backup_dir, 'node', node) + + # Change FULLb backup status to ERROR + self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR') + + page_id_a1 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # Change FULLb backup status to OK + self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK') + + # Change PAGEa1 backup status to ERROR + self.change_backup_status(backup_dir, 'node', page_id_a1, 'ERROR') + + # PAGEa1 ERROR + # FULLb OK + # FULLa OK + + page_id_b1 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # PAGEb1 OK + # PAGEa1 ERROR + # FULLb OK + # FULLa OK + + # Change PAGEa1 to OK + self.change_backup_status(backup_dir, 'node', page_id_a1, 'OK') + + # Change PAGEb1 and FULLb to ERROR + self.change_backup_status(backup_dir, 'node', page_id_b1, 'ERROR') + self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR') + + # PAGEb1 ERROR + # PAGEa1 OK + # FULLb ERROR + # FULLa OK + + page_id_a2 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # PAGEa2 OK + # PAGEb1 ERROR + # PAGEa1 OK + # FULLb ERROR + # FULLa OK + + # Change PAGEb1 and FULLb to OK + self.change_backup_status(backup_dir, 'node', page_id_b1, 'OK') + self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK') + + # Change PAGEa2 and FULL to ERROR + self.change_backup_status(backup_dir, 'node', page_id_a2, 'ERROR') + self.change_backup_status(backup_dir, 'node', backup_id_a, 'ERROR') + + # PAGEa2 ERROR + # PAGEb1 OK + # PAGEa1 OK + # FULLb OK + # FULLa ERROR + + page_id_b2 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # PAGEb2 OK + # PAGEa2 ERROR + # PAGEb1 OK + # PAGEa1 OK + # FULLb OK + # FULLa ERROR + + # Change PAGEb2, PAGEb1 and FULLb to ERROR + self.change_backup_status(backup_dir, 'node', page_id_b2, 'ERROR') + self.change_backup_status(backup_dir, 'node', page_id_b1, 'ERROR') + self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR') + + # Change FULLa to OK + self.change_backup_status(backup_dir, 'node', backup_id_a, 'OK') + + # PAGEb2 ERROR + # PAGEa2 ERROR + # PAGEb1 ERROR + # PAGEa1 OK + # FULLb ERROR + # FULLa OK + + page_id_a3 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # PAGEa3 OK + # PAGEb2 ERROR + # PAGEa2 ERROR + # PAGEb1 ERROR + # PAGEa1 OK + # FULLb ERROR + # FULLa OK + + # Change PAGEa3 and FULLa to ERROR + self.change_backup_status(backup_dir, 'node', page_id_a3, 'ERROR') + + # Change PAGEb2, PAGEb1 and FULLb to OK + self.change_backup_status(backup_dir, 'node', page_id_b2, 'OK') + self.change_backup_status(backup_dir, 'node', page_id_b1, 'OK') + self.change_backup_status(backup_dir, 'node', backup_id_a, 'OK') + + page_id_b3 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # PAGEb3 OK + # PAGEa3 ERROR + # PAGEb2 OK + # PAGEa2 ERROR + # PAGEb1 OK + # PAGEa1 OK + # FULLb OK + # FULLa ERROR + + # Change PAGEa3, PAGEa2 and FULLa status to OK + self.change_backup_status(backup_dir, 'node', page_id_a3, 'OK') + self.change_backup_status(backup_dir, 'node', page_id_a2, 'OK') + self.change_backup_status(backup_dir, 'node', backup_id_a, 'OK') + + # PAGEb3 OK + # PAGEa3 OK + # PAGEb2 OK + # PAGEa2 OK + # PAGEb1 OK + # PAGEa1 OK + # FULLb OK + # FULLa OK + + # Check that page_id_a3 and page_id_a2 are both direct descendants of page_id_a1 + self.assertEqual( + self.show_pb(backup_dir, 'node', backup_id=page_id_a3)['parent-backup-id'], + page_id_a1) + + self.assertEqual( + self.show_pb(backup_dir, 'node', backup_id=page_id_a2)['parent-backup-id'], + page_id_a1) + + self.merge_backup( + backup_dir, 'node', page_id_a2, + options=['--merge-expired', '--log-level-console=log']) + + try: + self.merge_backup( + backup_dir, 'node', page_id_a3, + options=['--merge-expired', '--log-level-console=log']) + self.assertEqual( + 1, 0, + "Expecting Error because of parent FULL backup is missing.\n " + "Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertTrue( + "ERROR: Failed to find parent full backup for {0}".format( + page_id_a3) in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + # @unittest.skip("skip") + def test_smart_merge(self): + """ + make node, create database, take full backup, drop database, + take PAGE backup and merge it into FULL, + make sure that files from dropped database are not + copied during restore + https://github.com/postgrespro/pg_probackup/issues/63 + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # create database + node.safe_psql( + "postgres", + "CREATE DATABASE testdb") + + # take FULL backup + full_id = self.backup_node(backup_dir, 'node', node) + + # drop database + node.safe_psql( + "postgres", + "DROP DATABASE testdb") + + # take PAGE backup + page_id = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # get delta between FULL and PAGE filelists + filelist_full = self.get_backup_filelist( + backup_dir, 'node', full_id) + + filelist_page = self.get_backup_filelist( + backup_dir, 'node', page_id) + + filelist_diff = self.get_backup_filelist_diff( + filelist_full, filelist_page) + + # merge PAGE backup + self.merge_backup( + backup_dir, 'node', page_id, + options=['--log-level-file=VERBOSE']) + + logfile = os.path.join(backup_dir, 'log', 'pg_probackup.log') + with open(logfile, 'r') as f: + logfile_content = f.read() + + def test_idempotent_merge(self): + """ + """ + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # add database + node.safe_psql( + 'postgres', + 'CREATE DATABASE testdb') + + # take FULL backup + full_id = self.backup_node( + backup_dir, 'node', node, options=['--stream']) + + # create database + node.safe_psql( + 'postgres', + 'create DATABASE testdb1') + + # take PAGE backup + page_id = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # create database + node.safe_psql( + 'postgres', + 'create DATABASE testdb2') + + page_id_2 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + gdb = self.merge_backup( + backup_dir, 'node', page_id_2, + gdb=True, options=['--log-level-console=verbose']) + + gdb.set_breakpoint('delete_backup_files') + gdb.run_until_break() + gdb.remove_all_breakpoints() + + gdb.set_breakpoint('rename') + gdb.continue_execution_until_break() + gdb.continue_execution_until_break(2) + + gdb._execute('signal SIGKILL') + + show_backups = self.show_pb(backup_dir, "node") + self.assertEqual(len(show_backups), 1) + + self.assertEqual( + 'MERGED', self.show_pb(backup_dir, 'node')[0]['status']) + + self.assertEqual( + full_id, self.show_pb(backup_dir, 'node')[0]['id']) + + self.merge_backup(backup_dir, 'node', page_id_2) + + self.assertEqual( + 'OK', self.show_pb(backup_dir, 'node')[0]['status']) + + self.assertEqual( + page_id_2, self.show_pb(backup_dir, 'node')[0]['id']) + + def test_merge_correct_inheritance(self): + """ + Make sure that backup metainformation fields + 'note' and 'expire-time' are correctly inherited + during merge + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # add database + node.safe_psql( + 'postgres', + 'CREATE DATABASE testdb') + + # take FULL backup + self.backup_node(backup_dir, 'node', node, options=['--stream']) + + # create database + node.safe_psql( + 'postgres', + 'create DATABASE testdb1') + + # take PAGE backup + page_id = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + self.set_backup( + backup_dir, 'node', page_id, options=['--note=hello', '--ttl=20d']) + + page_meta = self.show_pb(backup_dir, 'node', page_id) + + self.merge_backup(backup_dir, 'node', page_id) + + print(self.show_pb(backup_dir, 'node', page_id)) + + self.assertEqual( + page_meta['note'], + self.show_pb(backup_dir, 'node', page_id)['note']) + + self.assertEqual( + page_meta['expire-time'], + self.show_pb(backup_dir, 'node', page_id)['expire-time']) + + def test_merge_correct_inheritance_1(self): + """ + Make sure that backup metainformation fields + 'note' and 'expire-time' are correctly inherited + during merge + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # add database + node.safe_psql( + 'postgres', + 'CREATE DATABASE testdb') + + # take FULL backup + self.backup_node( + backup_dir, 'node', node, + options=['--stream', '--note=hello', '--ttl=20d']) + + # create database + node.safe_psql( + 'postgres', + 'create DATABASE testdb1') + + # take PAGE backup + page_id = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + self.merge_backup(backup_dir, 'node', page_id) + + self.assertNotIn( + 'note', + self.show_pb(backup_dir, 'node', page_id)) + + self.assertNotIn( + 'expire-time', + self.show_pb(backup_dir, 'node', page_id)) + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_multi_timeline_merge(self): + """ + Check that backup in PAGE mode choose + parent backup correctly: + t12 /---P--> + ... + t3 /----> + t2 /----> + t1 -F-----D-> + + P must have F as parent + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql("postgres", "create extension pageinspect") + + try: + node.safe_psql( + "postgres", + "create extension amcheck") + except QueryException as e: + node.safe_psql( + "postgres", + "create extension amcheck_next") + + node.pgbench_init(scale=20) + full_id = self.backup_node(backup_dir, 'node', node) + + pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) + pgbench.wait() + + self.backup_node(backup_dir, 'node', node, backup_type='delta') + + node.cleanup() + self.restore_node( + backup_dir, 'node', node, backup_id=full_id, + options=[ + '--recovery-target=immediate', + '--recovery-target-action=promote']) + + node.slow_start() + + pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) + pgbench.wait() + + # create timelines + for i in range(2, 7): + node.cleanup() + self.restore_node( + backup_dir, 'node', node, + options=[ + '--recovery-target=latest', + '--recovery-target-action=promote', + '--recovery-target-timeline={0}'.format(i)]) + node.slow_start() + + # at this point there is i+1 timeline + pgbench = node.pgbench(options=['-T', '20', '-c', '1', '--no-vacuum']) + pgbench.wait() + + # create backup at 2, 4 and 6 timeline + if i % 2 == 0: + self.backup_node(backup_dir, 'node', node, backup_type='page') + + page_id = self.backup_node(backup_dir, 'node', node, backup_type='page') + pgdata = self.pgdata_content(node.data_dir) + + self.merge_backup(backup_dir, 'node', page_id) + + result = node.safe_psql( + "postgres", "select * from pgbench_accounts") + + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored.cleanup() + + self.restore_node(backup_dir, 'node', node_restored) + pgdata_restored = self.pgdata_content(node_restored.data_dir) + + self.set_auto_conf(node_restored, {'port': node_restored.port}) + node_restored.slow_start() + + result_new = node_restored.safe_psql( + "postgres", "select * from pgbench_accounts") + + self.assertEqual(result, result_new) + + self.compare_pgdata(pgdata, pgdata_restored) + + self.checkdb_node( + backup_dir, + 'node', + options=[ + '--amcheck', + '-d', 'postgres', '-p', str(node.port)]) + + self.checkdb_node( + backup_dir, + 'node', + options=[ + '--amcheck', + '-d', 'postgres', '-p', str(node_restored.port)]) + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_merge_page_header_map_retry(self): + """ + page header map cannot be trusted when + running retry + """ + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + node.pgbench_init(scale=20) + self.backup_node(backup_dir, 'node', node, options=['--stream']) + + pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) + pgbench.wait() + + delta_id = self.backup_node( + backup_dir, 'node', node, + backup_type='delta', options=['--stream']) + + pgdata = self.pgdata_content(node.data_dir) + + gdb = self.merge_backup(backup_dir, 'node', delta_id, gdb=True) + + # our goal here is to get full backup with merged data files, + # but with old page header map + gdb.set_breakpoint('cleanup_header_map') + gdb.run_until_break() + gdb._execute('signal SIGKILL') + + self.merge_backup(backup_dir, 'node', delta_id) + + node.cleanup() + + self.restore_node(backup_dir, 'node', node) + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + def test_missing_data_file(self): + """ + """ + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # Add data + node.pgbench_init(scale=1) + + # FULL backup + self.backup_node(backup_dir, 'node', node) + + # Change data + pgbench = node.pgbench(options=['-T', '5', '-c', '1']) + pgbench.wait() + + # DELTA backup + delta_id = self.backup_node(backup_dir, 'node', node, backup_type='delta') + + path = node.safe_psql( + 'postgres', + "select pg_relation_filepath('pgbench_accounts')").decode('utf-8').rstrip() + + gdb = self.merge_backup( + backup_dir, "node", delta_id, + options=['--log-level-file=VERBOSE'], gdb=True) + gdb.set_breakpoint('merge_files') + gdb.run_until_break() + + # remove data file in incremental backup + file_to_remove = os.path.join( + backup_dir, 'backups', + 'node', delta_id, 'database', path) + + os.remove(file_to_remove) + + gdb.continue_execution_until_error() + + logfile = os.path.join(backup_dir, 'log', 'pg_probackup.log') + with open(logfile, 'r') as f: + logfile_content = f.read() + + self.assertIn( + 'ERROR: Cannot open backup file "{0}": No such file or directory'.format(file_to_remove), + logfile_content) + + # @unittest.skip("skip") + def test_missing_non_data_file(self): + """ + """ + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # FULL backup + self.backup_node(backup_dir, 'node', node) + + # DELTA backup + delta_id = self.backup_node(backup_dir, 'node', node, backup_type='delta') + + gdb = self.merge_backup( + backup_dir, "node", delta_id, + options=['--log-level-file=VERBOSE'], gdb=True) + gdb.set_breakpoint('merge_files') + gdb.run_until_break() + + # remove data file in incremental backup + file_to_remove = os.path.join( + backup_dir, 'backups', + 'node', delta_id, 'database', 'backup_label') + + os.remove(file_to_remove) + + gdb.continue_execution_until_error() + + logfile = os.path.join(backup_dir, 'log', 'pg_probackup.log') + with open(logfile, 'r') as f: + logfile_content = f.read() + + self.assertIn( + 'ERROR: File "{0}" is not found'.format(file_to_remove), + logfile_content) + + self.assertIn( + 'ERROR: Backup files merging failed', + logfile_content) + + self.assertEqual( + 'MERGING', self.show_pb(backup_dir, 'node')[0]['status']) + + self.assertEqual( + 'MERGING', self.show_pb(backup_dir, 'node')[1]['status']) + + # @unittest.skip("skip") + def test_merge_remote_mode(self): + """ + """ + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # FULL backup + full_id = self.backup_node(backup_dir, 'node', node) + + # DELTA backup + delta_id = self.backup_node(backup_dir, 'node', node, backup_type='delta') + + self.set_config(backup_dir, 'node', options=['--retention-window=1']) + + backups = os.path.join(backup_dir, 'backups', 'node') + with open( + os.path.join( + backups, full_id, "backup.control"), "a") as conf: + conf.write("recovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format( + datetime.now() - timedelta(days=5))) + + gdb = self.backup_node( + backup_dir, "node", node, + options=['--log-level-file=VERBOSE', '--merge-expired'], gdb=True) + gdb.set_breakpoint('merge_files') + gdb.run_until_break() + + logfile = os.path.join(backup_dir, 'log', 'pg_probackup.log') + + with open(logfile, "w+") as f: + f.truncate() + + gdb.continue_execution_until_exit() + + logfile = os.path.join(backup_dir, 'log', 'pg_probackup.log') + with open(logfile, 'r') as f: + logfile_content = f.read() + + self.assertNotIn( + 'SSH', logfile_content) + + self.assertEqual( + 'OK', self.show_pb(backup_dir, 'node')[0]['status']) + + def test_merge_pg_filenode_map(self): + """ + https://github.com/postgrespro/pg_probackup/issues/320 + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node1 = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node1'), + initdb_params=['--data-checksums']) + node1.cleanup() + + node.pgbench_init(scale=5) + + # FULL backup + self.backup_node(backup_dir, 'node', node) + + pgbench = node.pgbench( + stdout=subprocess.PIPE, stderr=subprocess.STDOUT, + options=['-T', '10', '-c', '1']) + + self.backup_node(backup_dir, 'node', node, backup_type='delta') + + node.safe_psql( + 'postgres', + 'reindex index pg_type_oid_index') + + backup_id = self.backup_node( + backup_dir, 'node', node, backup_type='delta') + + self.merge_backup(backup_dir, 'node', backup_id) + + node.cleanup() + + self.restore_node(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + 'postgres', + 'select 1') + +# 1. Need new test with corrupted FULL backup +# 2. different compression levels diff --git a/tests/option_test.py b/tests/option_test.py new file mode 100644 index 000000000..eec1bab44 --- /dev/null +++ b/tests/option_test.py @@ -0,0 +1,231 @@ +import unittest +import os +from .helpers.ptrack_helpers import ProbackupTest, ProbackupException +import locale + + +class OptionTest(ProbackupTest, unittest.TestCase): + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_help_1(self): + """help options""" + with open(os.path.join(self.dir_path, "expected/option_help.out"), "rb") as help_out: + self.assertEqual( + self.run_pb(["--help"]), + help_out.read().decode("utf-8") + ) + + # @unittest.skip("skip") + def test_version_2(self): + """help options""" + with open(os.path.join(self.dir_path, "expected/option_version.out"), "rb") as version_out: + self.assertIn( + version_out.read().decode("utf-8").strip(), + self.run_pb(["--version"]) + ) + + # @unittest.skip("skip") + def test_without_backup_path_3(self): + """backup command failure without backup mode option""" + try: + self.run_pb(["backup", "-b", "full"]) + self.assertEqual(1, 0, "Expecting Error because '-B' parameter is not specified.\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: required parameter not specified: BACKUP_PATH (-B, --backup-path)', + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) + + # @unittest.skip("skip") + def test_options_4(self): + """check options test""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node')) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + + # backup command failure without instance option + try: + self.run_pb(["backup", "-B", backup_dir, "-D", node.data_dir, "-b", "full"]) + self.assertEqual(1, 0, "Expecting Error because 'instance' parameter is not specified.\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: required parameter not specified: --instance', + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) + + # backup command failure without backup mode option + try: + self.run_pb(["backup", "-B", backup_dir, "--instance=node", "-D", node.data_dir]) + self.assertEqual(1, 0, "Expecting Error because '-b' parameter is not specified.\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: required parameter not specified: BACKUP_MODE (-b, --backup-mode)', + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) + + # backup command failure with invalid backup mode option + try: + self.run_pb(["backup", "-B", backup_dir, "--instance=node", "-b", "bad"]) + self.assertEqual(1, 0, "Expecting Error because backup-mode parameter is invalid.\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: invalid backup-mode "bad"', + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) + + # delete failure without delete options + try: + self.run_pb(["delete", "-B", backup_dir, "--instance=node"]) + # we should die here because exception is what we expect to happen + self.assertEqual(1, 0, "Expecting Error because delete options are omitted.\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: You must specify at least one of the delete options: ' + '--delete-expired |--delete-wal |--merge-expired |--status |(-i, --backup-id)', + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) + + + # delete failure without ID + try: + self.run_pb(["delete", "-B", backup_dir, "--instance=node", '-i']) + # we should die here because exception is what we expect to happen + self.assertEqual(1, 0, "Expecting Error because backup ID is omitted.\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + "option requires an argument -- 'i'", + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) + + # @unittest.skip("skip") + def test_options_5(self): + """check options test""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node')) + + output = self.init_pb(backup_dir) + self.assertIn( + "INFO: Backup catalog", + output) + + self.assertIn( + "successfully inited", + output) + self.add_instance(backup_dir, 'node', node) + + node.slow_start() + + # syntax error in pg_probackup.conf + conf_file = os.path.join(backup_dir, "backups", "node", "pg_probackup.conf") + with open(conf_file, "a") as conf: + conf.write(" = INFINITE\n") + try: + self.backup_node(backup_dir, 'node', node) + # we should die here because exception is what we expect to happen + self.assertEqual(1, 0, "Expecting Error because of garbage in pg_probackup.conf.\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: Syntax error in " = INFINITE', + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) + + self.clean_pb(backup_dir) + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + + # invalid value in pg_probackup.conf + with open(conf_file, "a") as conf: + conf.write("BACKUP_MODE=\n") + + try: + self.backup_node(backup_dir, 'node', node, backup_type=None), + # we should die here because exception is what we expect to happen + self.assertEqual(1, 0, "Expecting Error because of invalid backup-mode in pg_probackup.conf.\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: Invalid option "BACKUP_MODE" in file', + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) + + self.clean_pb(backup_dir) + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + + # Command line parameters should override file values + with open(conf_file, "a") as conf: + conf.write("retention-redundancy=1\n") + + self.assertEqual(self.show_config(backup_dir, 'node')['retention-redundancy'], '1') + + # User cannot send --system-identifier parameter via command line + try: + self.backup_node(backup_dir, 'node', node, options=["--system-identifier", "123"]), + # we should die here because exception is what we expect to happen + self.assertEqual(1, 0, "Expecting Error because option system-identifier cannot be specified in command line.\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: Option system-identifier cannot be specified in command line', + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) + + # invalid value in pg_probackup.conf + with open(conf_file, "a") as conf: + conf.write("SMOOTH_CHECKPOINT=FOO\n") + + try: + self.backup_node(backup_dir, 'node', node) + # we should die here because exception is what we expect to happen + self.assertEqual(1, 0, "Expecting Error because option -C should be boolean.\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: Invalid option "SMOOTH_CHECKPOINT" in file', + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) + + self.clean_pb(backup_dir) + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + + # invalid option in pg_probackup.conf + with open(conf_file, "a") as conf: + conf.write("TIMELINEID=1\n") + + try: + self.backup_node(backup_dir, 'node', node) + # we should die here because exception is what we expect to happen + self.assertEqual(1, 0, 'Expecting Error because of invalid option "TIMELINEID".\n Output: {0} \n CMD: {1}'.format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: Invalid option "TIMELINEID" in file', + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) + + # @unittest.skip("skip") + def test_help_6(self): + """help options""" + if ProbackupTest.enable_nls: + self.test_env['LC_ALL'] = 'ru_RU.utf-8' + with open(os.path.join(self.dir_path, "expected/option_help_ru.out"), "rb") as help_out: + self.assertEqual( + self.run_pb(["--help"]), + help_out.read().decode("utf-8") + ) + else: + self.skipTest( + 'You need configure PostgreSQL with --enabled-nls option for this test') diff --git a/tests/page_test.py b/tests/page_test.py new file mode 100644 index 000000000..e77e5c827 --- /dev/null +++ b/tests/page_test.py @@ -0,0 +1,1424 @@ +import os +import unittest +from .helpers.ptrack_helpers import ProbackupTest, ProbackupException +from testgres import QueryException +from datetime import datetime, timedelta +import subprocess +import gzip +import shutil + +class PageTest(ProbackupTest, unittest.TestCase): + + # @unittest.skip("skip") + def test_basic_page_vacuum_truncate(self): + """ + make node, create table, take full backup, + delete last 3 pages, vacuum relation, + take page backup, take second page backup, + restore last page backup and check data correctness + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'checkpoint_timeout': '300s'}) + + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node_restored.cleanup() + node.slow_start() + self.create_tblspace_in_node(node, 'somedata') + + node.safe_psql( + "postgres", + "create sequence t_seq; " + "create table t_heap tablespace somedata as select i as id, " + "md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,1024) i;") + + node.safe_psql( + "postgres", + "vacuum t_heap") + + self.backup_node(backup_dir, 'node', node) + + # TODO: make it dynamic + node.safe_psql( + "postgres", + "delete from t_heap where ctid >= '(11,0)'") + node.safe_psql( + "postgres", + "vacuum t_heap") + + self.backup_node( + backup_dir, 'node', node, backup_type='page') + + self.backup_node( + backup_dir, 'node', node, backup_type='page') + + if self.paranoia: + pgdata = self.pgdata_content(node.data_dir) + + old_tablespace = self.get_tblspace_path(node, 'somedata') + new_tablespace = self.get_tblspace_path(node_restored, 'somedata_new') + + self.restore_node( + backup_dir, 'node', node_restored, + options=[ + "-j", "4", + "-T", "{0}={1}".format(old_tablespace, new_tablespace)]) + + # Physical comparison + if self.paranoia: + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + self.set_auto_conf(node_restored, {'port': node_restored.port}) + node_restored.slow_start() + + # Logical comparison + result1 = node.safe_psql( + "postgres", + "select * from t_heap") + + result2 = node_restored.safe_psql( + "postgres", + "select * from t_heap") + + self.assertEqual(result1, result2) + + # @unittest.skip("skip") + def test_page_vacuum_truncate_1(self): + """ + make node, create table, take full backup, + delete all data, vacuum relation, + take page backup, insert some data, + take second page backup and check data correctness + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + "postgres", + "create sequence t_seq; " + "create table t_heap as select i as id, " + "md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,1024) i") + + node.safe_psql( + "postgres", + "vacuum t_heap") + + self.backup_node(backup_dir, 'node', node) + + node.safe_psql( + "postgres", + "delete from t_heap") + + node.safe_psql( + "postgres", + "vacuum t_heap") + + self.backup_node( + backup_dir, 'node', node, backup_type='page') + + node.safe_psql( + "postgres", + "insert into t_heap select i as id, " + "md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,1) i") + + self.backup_node( + backup_dir, 'node', node, backup_type='page') + + pgdata = self.pgdata_content(node.data_dir) + + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored.cleanup() + + self.restore_node(backup_dir, 'node', node_restored) + + # Physical comparison + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + self.set_auto_conf(node_restored, {'port': node_restored.port}) + node_restored.slow_start() + + # @unittest.skip("skip") + def test_page_stream(self): + """ + make archive node, take full and page stream backups, + restore them and check data correctness + """ + self.maxDiff = None + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'checkpoint_timeout': '30s'} + ) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # FULL BACKUP + node.safe_psql( + "postgres", + "create table t_heap as select i as id, md5(i::text) as text, " + "md5(i::text)::tsvector as tsvector " + "from generate_series(0,100) i") + + full_result = node.execute("postgres", "SELECT * FROM t_heap") + full_backup_id = self.backup_node( + backup_dir, 'node', node, + backup_type='full', options=['--stream']) + + # PAGE BACKUP + node.safe_psql( + "postgres", + "insert into t_heap select i as id, md5(i::text) as text, " + "md5(i::text)::tsvector as tsvector " + "from generate_series(100,200) i") + page_result = node.execute("postgres", "SELECT * FROM t_heap") + page_backup_id = self.backup_node( + backup_dir, 'node', node, + backup_type='page', options=['--stream', '-j', '4']) + + if self.paranoia: + pgdata = self.pgdata_content(node.data_dir) + + # Drop Node + node.cleanup() + + # Check full backup + self.assertIn( + "INFO: Restore of backup {0} completed.".format(full_backup_id), + self.restore_node( + backup_dir, 'node', node, + backup_id=full_backup_id, options=["-j", "4"]), + '\n Unexpected Error Message: {0}\n' + ' CMD: {1}'.format(repr(self.output), self.cmd)) + + node.slow_start() + full_result_new = node.execute("postgres", "SELECT * FROM t_heap") + self.assertEqual(full_result, full_result_new) + node.cleanup() + + # Check page backup + self.assertIn( + "INFO: Restore of backup {0} completed.".format(page_backup_id), + self.restore_node( + backup_dir, 'node', node, + backup_id=page_backup_id, options=["-j", "4"]), + '\n Unexpected Error Message: {0}\n' + ' CMD: {1}'.format(repr(self.output), self.cmd)) + + # GET RESTORED PGDATA AND COMPARE + if self.paranoia: + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + node.slow_start() + page_result_new = node.execute("postgres", "SELECT * FROM t_heap") + self.assertEqual(page_result, page_result_new) + node.cleanup() + + # @unittest.skip("skip") + def test_page_archive(self): + """ + make archive node, take full and page archive backups, + restore them and check data correctness + """ + self.maxDiff = None + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'checkpoint_timeout': '30s'} + ) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # FULL BACKUP + node.safe_psql( + "postgres", + "create table t_heap as select i as id, md5(i::text) as text, " + "md5(i::text)::tsvector as tsvector from generate_series(0,100) i") + full_result = node.execute("postgres", "SELECT * FROM t_heap") + full_backup_id = self.backup_node( + backup_dir, 'node', node, backup_type='full') + + # PAGE BACKUP + node.safe_psql( + "postgres", + "insert into t_heap select i as id, " + "md5(i::text) as text, md5(i::text)::tsvector as tsvector " + "from generate_series(100, 200) i") + page_result = node.execute("postgres", "SELECT * FROM t_heap") + page_backup_id = self.backup_node( + backup_dir, 'node', node, + backup_type='page', options=["-j", "4"]) + + if self.paranoia: + pgdata = self.pgdata_content(node.data_dir) + + # Drop Node + node.cleanup() + + # Restore and check full backup + self.assertIn("INFO: Restore of backup {0} completed.".format( + full_backup_id), + self.restore_node( + backup_dir, 'node', node, + backup_id=full_backup_id, + options=[ + "-j", "4", + "--immediate", + "--recovery-target-action=promote"]), + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd)) + + node.slow_start() + + full_result_new = node.execute("postgres", "SELECT * FROM t_heap") + self.assertEqual(full_result, full_result_new) + node.cleanup() + + # Restore and check page backup + self.assertIn( + "INFO: Restore of backup {0} completed.".format(page_backup_id), + self.restore_node( + backup_dir, 'node', node, + backup_id=page_backup_id, + options=[ + "-j", "4", + "--immediate", + "--recovery-target-action=promote"]), + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd)) + + # GET RESTORED PGDATA AND COMPARE + if self.paranoia: + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + node.slow_start() + + page_result_new = node.execute("postgres", "SELECT * FROM t_heap") + self.assertEqual(page_result, page_result_new) + node.cleanup() + + # @unittest.skip("skip") + def test_page_multiple_segments(self): + """ + Make node, create table with multiple segments, + write some data to it, check page and data correctness + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'fsync': 'off', + 'shared_buffers': '1GB', + 'maintenance_work_mem': '1GB', + 'full_page_writes': 'off'}) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + self.create_tblspace_in_node(node, 'somedata') + + # CREATE TABLE + node.pgbench_init(scale=100, options=['--tablespace=somedata']) + # FULL BACKUP + self.backup_node(backup_dir, 'node', node) + + # PGBENCH STUFF + pgbench = node.pgbench(options=['-T', '50', '-c', '1', '--no-vacuum']) + pgbench.wait() + + # GET LOGICAL CONTENT FROM NODE + result = node.safe_psql("postgres", "select count(*) from pgbench_accounts") + # PAGE BACKUP + self.backup_node(backup_dir, 'node', node, backup_type='page') + + # GET PHYSICAL CONTENT FROM NODE + pgdata = self.pgdata_content(node.data_dir) + + # RESTORE NODE + restored_node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'restored_node')) + restored_node.cleanup() + tblspc_path = self.get_tblspace_path(node, 'somedata') + tblspc_path_new = self.get_tblspace_path( + restored_node, 'somedata_restored') + + self.restore_node( + backup_dir, 'node', restored_node, + options=[ + "-j", "4", + "-T", "{0}={1}".format(tblspc_path, tblspc_path_new)]) + + # GET PHYSICAL CONTENT FROM NODE_RESTORED + pgdata_restored = self.pgdata_content(restored_node.data_dir) + + # START RESTORED NODE + self.set_auto_conf(restored_node, {'port': restored_node.port}) + restored_node.slow_start() + + result_new = restored_node.safe_psql( + "postgres", "select count(*) from pgbench_accounts") + + # COMPARE RESTORED FILES + self.assertEqual(result, result_new, 'data is lost') + + if self.paranoia: + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + def test_page_delete(self): + """ + Make node, create tablespace with table, take full backup, + delete everything from table, vacuum table, take page backup, + restore page backup, compare . + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, initdb_params=['--data-checksums'], + pg_options={ + 'checkpoint_timeout': '30s', + } + ) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + self.create_tblspace_in_node(node, 'somedata') + # FULL backup + self.backup_node(backup_dir, 'node', node) + node.safe_psql( + "postgres", + "create table t_heap tablespace somedata as select i as id," + " md5(i::text) as text, md5(i::text)::tsvector as tsvector" + " from generate_series(0,100) i") + + node.safe_psql( + "postgres", + "delete from t_heap") + + node.safe_psql( + "postgres", + "vacuum t_heap") + + # PAGE BACKUP + self.backup_node( + backup_dir, 'node', node, backup_type='page') + if self.paranoia: + pgdata = self.pgdata_content(node.data_dir) + + # RESTORE + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored.cleanup() + + self.restore_node( + backup_dir, 'node', node_restored, + options=[ + "-j", "4", + "-T", "{0}={1}".format( + self.get_tblspace_path(node, 'somedata'), + self.get_tblspace_path(node_restored, 'somedata')) + ] + ) + + # GET RESTORED PGDATA AND COMPARE + if self.paranoia: + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # START RESTORED NODE + self.set_auto_conf(node_restored, {'port': node_restored.port}) + node_restored.slow_start() + + # @unittest.skip("skip") + def test_page_delete_1(self): + """ + Make node, create tablespace with table, take full backup, + delete everything from table, vacuum table, take page backup, + restore page backup, compare . + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'checkpoint_timeout': '30s', + } + ) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + self.create_tblspace_in_node(node, 'somedata') + + node.safe_psql( + "postgres", + "create table t_heap tablespace somedata as select i as id," + " md5(i::text) as text, md5(i::text)::tsvector as tsvector" + " from generate_series(0,100) i" + ) + # FULL backup + self.backup_node(backup_dir, 'node', node) + + node.safe_psql( + "postgres", + "delete from t_heap" + ) + + node.safe_psql( + "postgres", + "vacuum t_heap" + ) + + # PAGE BACKUP + self.backup_node( + backup_dir, 'node', node, backup_type='page') + if self.paranoia: + pgdata = self.pgdata_content(node.data_dir) + + # RESTORE + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored') + ) + node_restored.cleanup() + + self.restore_node( + backup_dir, 'node', node_restored, + options=[ + "-j", "4", + "-T", "{0}={1}".format( + self.get_tblspace_path(node, 'somedata'), + self.get_tblspace_path(node_restored, 'somedata')) + ] + ) + + # GET RESTORED PGDATA AND COMPARE + if self.paranoia: + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # START RESTORED NODE + self.set_auto_conf(node_restored, {'port': node_restored.port}) + node_restored.slow_start() + + def test_parallel_pagemap(self): + """ + Test for parallel WAL segments reading, during which pagemap is built + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + + # Initialize instance and backup directory + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums'], + pg_options={ + "hot_standby": "on" + } + ) + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored'), + ) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node_restored.cleanup() + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # Do full backup + self.backup_node(backup_dir, 'node', node) + show_backup = self.show_pb(backup_dir, 'node')[0] + + self.assertEqual(show_backup['status'], "OK") + self.assertEqual(show_backup['backup-mode'], "FULL") + + # Fill instance with data and make several WAL segments ... + with node.connect() as conn: + conn.execute("create table test (id int)") + for x in range(0, 8): + conn.execute( + "insert into test select i from generate_series(1,100) s(i)") + conn.commit() + self.switch_wal_segment(conn) + count1 = conn.execute("select count(*) from test") + + # ... and do page backup with parallel pagemap + self.backup_node( + backup_dir, 'node', node, backup_type="page", options=["-j", "4"]) + show_backup = self.show_pb(backup_dir, 'node')[1] + + self.assertEqual(show_backup['status'], "OK") + self.assertEqual(show_backup['backup-mode'], "PAGE") + + if self.paranoia: + pgdata = self.pgdata_content(node.data_dir) + + # Restore it + self.restore_node(backup_dir, 'node', node_restored) + + # Physical comparison + if self.paranoia: + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + self.set_auto_conf(node_restored, {'port': node_restored.port}) + node_restored.slow_start() + + # Check restored node + count2 = node_restored.execute("postgres", "select count(*) from test") + + self.assertEqual(count1, count2) + + # Clean after yourself + node.cleanup() + node_restored.cleanup() + + def test_parallel_pagemap_1(self): + """ + Test for parallel WAL segments reading, during which pagemap is built + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + + # Initialize instance and backup directory + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums'], + pg_options={} + ) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # Do full backup + self.backup_node(backup_dir, 'node', node) + show_backup = self.show_pb(backup_dir, 'node')[0] + + self.assertEqual(show_backup['status'], "OK") + self.assertEqual(show_backup['backup-mode'], "FULL") + + # Fill instance with data and make several WAL segments ... + node.pgbench_init(scale=10) + + # do page backup in single thread + page_id = self.backup_node( + backup_dir, 'node', node, backup_type="page") + + self.delete_pb(backup_dir, 'node', page_id) + + # ... and do page backup with parallel pagemap + self.backup_node( + backup_dir, 'node', node, backup_type="page", options=["-j", "4"]) + show_backup = self.show_pb(backup_dir, 'node')[1] + + self.assertEqual(show_backup['status'], "OK") + self.assertEqual(show_backup['backup-mode'], "PAGE") + + # Drop node and restore it + node.cleanup() + self.restore_node(backup_dir, 'node', node) + node.slow_start() + + # Clean after yourself + node.cleanup() + + # @unittest.skip("skip") + def test_page_backup_with_lost_wal_segment(self): + """ + make node with archiving + make archive backup, then generate some wals with pgbench, + delete latest archived wal segment + run page backup, expecting error because of missing wal segment + make sure that backup status is 'ERROR' + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + self.backup_node(backup_dir, 'node', node) + + # make some wals + node.pgbench_init(scale=3) + + # delete last wal segment + wals_dir = os.path.join(backup_dir, 'wal', 'node') + wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join( + wals_dir, f)) and not f.endswith('.backup') and not f.endswith('.part')] + wals = map(str, wals) + file = os.path.join(wals_dir, max(wals)) + os.remove(file) + if self.archive_compress: + file = file[:-3] + + # Single-thread PAGE backup + try: + self.backup_node( + backup_dir, 'node', node, backup_type='page') + self.assertEqual( + 1, 0, + "Expecting Error because of wal segment disappearance.\n " + "Output: {0} \n CMD: {1}".format( + self.output, self.cmd)) + except ProbackupException as e: + self.assertTrue( + 'Could not read WAL record at' in e.message and + 'is absent' in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.assertEqual( + 'ERROR', + self.show_pb(backup_dir, 'node')[1]['status'], + 'Backup {0} should have STATUS "ERROR"') + + # Multi-thread PAGE backup + try: + self.backup_node( + backup_dir, 'node', node, + backup_type='page', + options=["-j", "4"]) + self.assertEqual( + 1, 0, + "Expecting Error because of wal segment disappearance.\n " + "Output: {0} \n CMD: {1}".format( + self.output, self.cmd)) + except ProbackupException as e: + self.assertTrue( + 'Could not read WAL record at' in e.message and + 'is absent' in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.assertEqual( + 'ERROR', + self.show_pb(backup_dir, 'node')[2]['status'], + 'Backup {0} should have STATUS "ERROR"') + + # @unittest.skip("skip") + def test_page_backup_with_corrupted_wal_segment(self): + """ + make node with archiving + make archive backup, then generate some wals with pgbench, + corrupt latest archived wal segment + run page backup, expecting error because of missing wal segment + make sure that backup status is 'ERROR' + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + self.backup_node(backup_dir, 'node', node) + + # make some wals + node.pgbench_init(scale=10) + + # delete last wal segment + wals_dir = os.path.join(backup_dir, 'wal', 'node') + wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join( + wals_dir, f)) and not f.endswith('.backup')] + wals = map(str, wals) + # file = os.path.join(wals_dir, max(wals)) + + if self.archive_compress: + original_file = os.path.join(wals_dir, '000000010000000000000004.gz') + tmp_file = os.path.join(backup_dir, '000000010000000000000004') + + with gzip.open(original_file, 'rb') as f_in, open(tmp_file, 'wb') as f_out: + shutil.copyfileobj(f_in, f_out) + + # drop healthy file + os.remove(original_file) + file = tmp_file + + else: + file = os.path.join(wals_dir, '000000010000000000000004') + + # corrupt file + print(file) + with open(file, "rb+", 0) as f: + f.seek(42) + f.write(b"blah") + f.flush() + f.close + + if self.archive_compress: + # compress corrupted file and replace with it old file + with open(file, 'rb') as f_in, gzip.open(original_file, 'wb', compresslevel=1) as f_out: + shutil.copyfileobj(f_in, f_out) + + file = os.path.join(wals_dir, '000000010000000000000004.gz') + + #if self.archive_compress: + # file = file[:-3] + + # Single-thread PAGE backup + try: + self.backup_node( + backup_dir, 'node', node, backup_type='page') + self.assertEqual( + 1, 0, + "Expecting Error because of wal segment disappearance.\n " + "Output: {0} \n CMD: {1}".format( + self.output, self.cmd)) + except ProbackupException as e: + self.assertTrue( + 'Could not read WAL record at' in e.message and + 'Possible WAL corruption. Error has occured during reading WAL segment' in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.assertEqual( + 'ERROR', + self.show_pb(backup_dir, 'node')[1]['status'], + 'Backup {0} should have STATUS "ERROR"') + + # Multi-thread PAGE backup + try: + self.backup_node( + backup_dir, 'node', node, + backup_type='page', options=["-j", "4"]) + self.assertEqual( + 1, 0, + "Expecting Error because of wal segment disappearance.\n " + "Output: {0} \n CMD: {1}".format( + self.output, self.cmd)) + except ProbackupException as e: + self.assertTrue( + 'Could not read WAL record at' in e.message and + 'Possible WAL corruption. Error has occured during reading WAL segment "{0}"'.format( + file) in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.assertEqual( + 'ERROR', + self.show_pb(backup_dir, 'node')[2]['status'], + 'Backup {0} should have STATUS "ERROR"') + + # @unittest.skip("skip") + def test_page_backup_with_alien_wal_segment(self): + """ + make two nodes with archiving + take archive full backup from both nodes, + generate some wals with pgbench on both nodes, + move latest archived wal segment from second node to first node`s archive + run page backup on first node + expecting error because of alien wal segment + make sure that backup status is 'ERROR' + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + alien_node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'alien_node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + self.add_instance(backup_dir, 'alien_node', alien_node) + self.set_archiving(backup_dir, 'alien_node', alien_node) + alien_node.slow_start() + + self.backup_node( + backup_dir, 'node', node, options=['--stream']) + self.backup_node( + backup_dir, 'alien_node', alien_node, options=['--stream']) + + # make some wals + node.safe_psql( + "postgres", + "create sequence t_seq; " + "create table t_heap as select i as id, " + "md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,1000) i;") + + alien_node.safe_psql( + "postgres", + "create database alien") + + alien_node.safe_psql( + "alien", + "create sequence t_seq; " + "create table t_heap_alien as select i as id, " + "md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,100000) i;") + + # copy latest wal segment + wals_dir = os.path.join(backup_dir, 'wal', 'alien_node') + wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join( + wals_dir, f)) and not f.endswith('.backup')] + wals = map(str, wals) + filename = max(wals) + file = os.path.join(wals_dir, filename) + file_destination = os.path.join( + os.path.join(backup_dir, 'wal', 'node'), filename) +# file = os.path.join(wals_dir, '000000010000000000000004') + print(file) + print(file_destination) + os.remove(file_destination) + os.rename(file, file_destination) + + # Single-thread PAGE backup + try: + self.backup_node( + backup_dir, 'node', node, + backup_type='page') + self.assertEqual( + 1, 0, + "Expecting Error because of alien wal segment.\n " + "Output: {0} \n CMD: {1}".format( + self.output, self.cmd)) + except ProbackupException as e: + self.assertTrue( + 'Could not read WAL record at' in e.message and + 'Possible WAL corruption. Error has occured during reading WAL segment' in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.assertEqual( + 'ERROR', + self.show_pb(backup_dir, 'node')[1]['status'], + 'Backup {0} should have STATUS "ERROR"') + + # Multi-thread PAGE backup + try: + self.backup_node( + backup_dir, 'node', node, + backup_type='page', options=["-j", "4"]) + self.assertEqual( + 1, 0, + "Expecting Error because of alien wal segment.\n " + "Output: {0} \n CMD: {1}".format( + self.output, self.cmd)) + except ProbackupException as e: + self.assertIn('Could not read WAL record at', e.message) + self.assertIn('WAL file is from different database system: ' + 'WAL file database system identifier is', e.message) + self.assertIn('pg_control database system identifier is', e.message) + self.assertIn('Possible WAL corruption. Error has occured ' + 'during reading WAL segment', e.message) + + self.assertEqual( + 'ERROR', + self.show_pb(backup_dir, 'node')[2]['status'], + 'Backup {0} should have STATUS "ERROR"') + + # @unittest.skip("skip") + def test_multithread_page_backup_with_toast(self): + """ + make node, create toast, do multithread PAGE backup + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + self.backup_node(backup_dir, 'node', node) + + # make some wals + node.safe_psql( + "postgres", + "create table t3 as select i, " + "repeat(md5(i::text),5006056) as fat_attr " + "from generate_series(0,70) i") + + # Multi-thread PAGE backup + self.backup_node( + backup_dir, 'node', node, + backup_type='page', options=["-j", "4"]) + + # @unittest.skip("skip") + def test_page_create_db(self): + """ + Make node, take full backup, create database db1, take page backup, + restore database and check it presense + """ + self.maxDiff = None + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'max_wal_size': '10GB', + 'checkpoint_timeout': '5min', + } + ) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # FULL BACKUP + node.safe_psql( + "postgres", + "create table t_heap as select i as id, md5(i::text) as text, " + "md5(i::text)::tsvector as tsvector from generate_series(0,100) i") + + self.backup_node( + backup_dir, 'node', node) + + # CREATE DATABASE DB1 + node.safe_psql("postgres", "create database db1") + node.safe_psql( + "db1", + "create table t_heap as select i as id, md5(i::text) as text, " + "md5(i::text)::tsvector as tsvector from generate_series(0,1000) i") + + # PAGE BACKUP + backup_id = self.backup_node(backup_dir, 'node', node, backup_type='page') + + if self.paranoia: + pgdata = self.pgdata_content(node.data_dir) + + # RESTORE + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + + node_restored.cleanup() + self.restore_node( + backup_dir, 'node', node_restored, + backup_id=backup_id, options=["-j", "4"]) + + # COMPARE PHYSICAL CONTENT + if self.paranoia: + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # START RESTORED NODE + self.set_auto_conf(node_restored, {'port': node_restored.port}) + node_restored.slow_start() + + node_restored.safe_psql('db1', 'select 1') + node_restored.cleanup() + + # DROP DATABASE DB1 + node.safe_psql( + "postgres", "drop database db1") + # SECOND PAGE BACKUP + backup_id = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + if self.paranoia: + pgdata = self.pgdata_content(node.data_dir) + + # RESTORE SECOND PAGE BACKUP + self.restore_node( + backup_dir, 'node', node_restored, + backup_id=backup_id, options=["-j", "4"] + ) + + # COMPARE PHYSICAL CONTENT + if self.paranoia: + pgdata_restored = self.pgdata_content( + node_restored.data_dir, ignore_ptrack=False) + self.compare_pgdata(pgdata, pgdata_restored) + + # START RESTORED NODE + self.set_auto_conf(node_restored, {'port': node_restored.port}) + node_restored.slow_start() + + try: + node_restored.safe_psql('db1', 'select 1') + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because we are connecting to deleted database" + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd) + ) + except QueryException as e: + self.assertTrue( + 'FATAL: database "db1" does not exist' in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd) + ) + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_multi_timeline_page(self): + """ + Check that backup in PAGE mode choose + parent backup correctly: + t12 /---P--> + ... + t3 /----> + t2 /----> + t1 -F-----D-> + + P must have F as parent + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql("postgres", "create extension pageinspect") + + try: + node.safe_psql( + "postgres", + "create extension amcheck") + except QueryException as e: + node.safe_psql( + "postgres", + "create extension amcheck_next") + + node.pgbench_init(scale=20) + full_id = self.backup_node(backup_dir, 'node', node) + + pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) + pgbench.wait() + + self.backup_node(backup_dir, 'node', node, backup_type='delta') + + node.cleanup() + self.restore_node( + backup_dir, 'node', node, backup_id=full_id, + options=[ + '--recovery-target=immediate', + '--recovery-target-action=promote']) + + node.slow_start() + + pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) + pgbench.wait() + + # create timelines + for i in range(2, 7): + node.cleanup() + self.restore_node( + backup_dir, 'node', node, + options=[ + '--recovery-target=latest', + '--recovery-target-action=promote', + '--recovery-target-timeline={0}'.format(i)]) + node.slow_start() + + # at this point there is i+1 timeline + pgbench = node.pgbench(options=['-T', '20', '-c', '1', '--no-vacuum']) + pgbench.wait() + + # create backup at 2, 4 and 6 timeline + if i % 2 == 0: + self.backup_node(backup_dir, 'node', node, backup_type='page') + + page_id = self.backup_node( + backup_dir, 'node', node, backup_type='page', + options=['--log-level-file=VERBOSE']) + + pgdata = self.pgdata_content(node.data_dir) + + result = node.safe_psql( + "postgres", "select * from pgbench_accounts") + + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored.cleanup() + + self.restore_node(backup_dir, 'node', node_restored) + pgdata_restored = self.pgdata_content(node_restored.data_dir) + + self.set_auto_conf(node_restored, {'port': node_restored.port}) + node_restored.slow_start() + + result_new = node_restored.safe_psql( + "postgres", "select * from pgbench_accounts") + + self.assertEqual(result, result_new) + + self.compare_pgdata(pgdata, pgdata_restored) + + self.checkdb_node( + backup_dir, + 'node', + options=[ + '--amcheck', + '-d', 'postgres', '-p', str(node.port)]) + + self.checkdb_node( + backup_dir, + 'node', + options=[ + '--amcheck', + '-d', 'postgres', '-p', str(node_restored.port)]) + + backup_list = self.show_pb(backup_dir, 'node') + + self.assertEqual( + backup_list[2]['parent-backup-id'], + backup_list[0]['id']) + self.assertEqual(backup_list[2]['current-tli'], 3) + + self.assertEqual( + backup_list[3]['parent-backup-id'], + backup_list[2]['id']) + self.assertEqual(backup_list[3]['current-tli'], 5) + + self.assertEqual( + backup_list[4]['parent-backup-id'], + backup_list[3]['id']) + self.assertEqual(backup_list[4]['current-tli'], 7) + + self.assertEqual( + backup_list[5]['parent-backup-id'], + backup_list[4]['id']) + self.assertEqual(backup_list[5]['current-tli'], 7) + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_multitimeline_page_1(self): + """ + Check that backup in PAGE mode choose + parent backup correctly: + t2 /----> + t1 -F--P---D-> + + P must have F as parent + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={'wal_log_hints': 'on'}) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql("postgres", "create extension pageinspect") + + try: + node.safe_psql( + "postgres", + "create extension amcheck") + except QueryException as e: + node.safe_psql( + "postgres", + "create extension amcheck_next") + + node.pgbench_init(scale=20) + full_id = self.backup_node(backup_dir, 'node', node) + + pgbench = node.pgbench(options=['-T', '20', '-c', '1']) + pgbench.wait() + + page1 = self.backup_node(backup_dir, 'node', node, backup_type='page') + + pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) + pgbench.wait() + + page1 = self.backup_node(backup_dir, 'node', node, backup_type='delta') + + node.cleanup() + self.restore_node( + backup_dir, 'node', node, backup_id=page1, + options=[ + '--recovery-target=immediate', + '--recovery-target-action=promote']) + + node.slow_start() + + pgbench = node.pgbench(options=['-T', '20', '-c', '1', '--no-vacuum']) + pgbench.wait() + + print(self.backup_node( + backup_dir, 'node', node, backup_type='page', + options=['--log-level-console=LOG'], return_id=False)) + + pgdata = self.pgdata_content(node.data_dir) + + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored.cleanup() + + self.restore_node(backup_dir, 'node', node_restored) + pgdata_restored = self.pgdata_content(node_restored.data_dir) + + self.set_auto_conf(node_restored, {'port': node_restored.port}) + node_restored.slow_start() + + self.compare_pgdata(pgdata, pgdata_restored) + + @unittest.skip("skip") + # @unittest.expectedFailure + def test_page_pg_resetxlog(self): + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'shared_buffers': '512MB', + 'max_wal_size': '3GB'}) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # Create table + node.safe_psql( + "postgres", + "create extension bloom; create sequence t_seq; " + "create table t_heap " + "as select nextval('t_seq')::int as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " +# "from generate_series(0,25600) i") + "from generate_series(0,2560) i") + + self.backup_node(backup_dir, 'node', node) + + node.safe_psql( + 'postgres', + "update t_heap set id = nextval('t_seq'), text = md5(text), " + "tsvector = md5(repeat(tsvector::text, 10))::tsvector") + + self.switch_wal_segment(node) + + # kill the bastard + if self.verbose: + print('Killing postmaster. Losing Ptrack changes') + node.stop(['-m', 'immediate', '-D', node.data_dir]) + + # now smack it with sledgehammer + if node.major_version >= 10: + pg_resetxlog_path = self.get_bin_path('pg_resetwal') + wal_dir = 'pg_wal' + else: + pg_resetxlog_path = self.get_bin_path('pg_resetxlog') + wal_dir = 'pg_xlog' + + self.run_binary( + [ + pg_resetxlog_path, + '-D', + node.data_dir, + '-o 42', + '-f' + ], + asynchronous=False) + + if not node.status(): + node.slow_start() + else: + print("Die! Die! Why won't you die?... Why won't you die?") + exit(1) + + # take ptrack backup +# self.backup_node( +# backup_dir, 'node', node, +# backup_type='page', options=['--stream']) + + try: + self.backup_node( + backup_dir, 'node', node, backup_type='page') + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because instance was brutalized by pg_resetxlog" + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd) + ) + except ProbackupException as e: + self.assertIn( + 'Insert error message', + e.message, + '\n Unexpected Error Message: {0}\n' + ' CMD: {1}'.format(repr(e.message), self.cmd)) + +# pgdata = self.pgdata_content(node.data_dir) +# +# node_restored = self.make_simple_node( +# base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) +# node_restored.cleanup() +# +# self.restore_node( +# backup_dir, 'node', node_restored) +# +# pgdata_restored = self.pgdata_content(node_restored.data_dir) +# self.compare_pgdata(pgdata, pgdata_restored) diff --git a/tests/pgpro2068_test.py b/tests/pgpro2068_test.py new file mode 100644 index 000000000..434ce2800 --- /dev/null +++ b/tests/pgpro2068_test.py @@ -0,0 +1,188 @@ +import os +import unittest +from .helpers.ptrack_helpers import ProbackupTest, ProbackupException, idx_ptrack +from datetime import datetime, timedelta +import subprocess +from time import sleep +import shutil +import signal +from testgres import ProcessType + + +class BugTest(ProbackupTest, unittest.TestCase): + + def test_minrecpoint_on_replica(self): + """ + https://jira.postgrespro.ru/browse/PGPRO-2068 + """ + self._check_gdb_flag_or_skip_test() + + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + # 'checkpoint_timeout': '60min', + 'checkpoint_completion_target': '0.9', + 'bgwriter_delay': '10ms', + 'bgwriter_lru_maxpages': '1000', + 'bgwriter_lru_multiplier': '4.0', + 'max_wal_size': '256MB'}) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # take full backup and restore it as replica + self.backup_node( + backup_dir, 'node', node, options=['--stream']) + + # start replica + replica = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica.cleanup() + + self.restore_node(backup_dir, 'node', replica, options=['-R']) + self.set_replica(node, replica) + self.add_instance(backup_dir, 'replica', replica) + self.set_archiving(backup_dir, 'replica', replica, replica=True) + + self.set_auto_conf( + replica, + {'port': replica.port, 'restart_after_crash': 'off'}) + + # we need those later + node.safe_psql( + "postgres", + "CREATE EXTENSION plpython3u") + + node.safe_psql( + "postgres", + "CREATE EXTENSION pageinspect") + + replica.slow_start(replica=True) + + # generate some data + node.pgbench_init(scale=10) + pgbench = node.pgbench( + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + options=["-c", "4", "-T", "20"]) + pgbench.wait() + pgbench.stdout.close() + + # generate some more data and leave it in background + pgbench = node.pgbench( + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + options=["-c", "4", "-j 4", "-T", "100"]) + + # wait for shared buffer on replica to be filled with dirty data + sleep(20) + + # get pids of replica background workers + startup_pid = replica.auxiliary_pids[ProcessType.Startup][0] + checkpointer_pid = replica.auxiliary_pids[ProcessType.Checkpointer][0] + bgwriter_pid = replica.auxiliary_pids[ProcessType.BackgroundWriter][0] + + # break checkpointer on UpdateLastRemovedPtr + gdb_checkpointer = self.gdb_attach(checkpointer_pid) + gdb_checkpointer._execute('handle SIGINT noprint nostop pass') + gdb_checkpointer._execute('handle SIGUSR1 noprint nostop pass') + gdb_checkpointer.set_breakpoint('UpdateLastRemovedPtr') + gdb_checkpointer.continue_execution_until_break() + + # break recovery on UpdateControlFile + gdb_recovery = self.gdb_attach(startup_pid) + gdb_recovery._execute('handle SIGINT noprint nostop pass') + gdb_recovery._execute('handle SIGUSR1 noprint nostop pass') + gdb_recovery.set_breakpoint('UpdateMinRecoveryPoint') + gdb_recovery.continue_execution_until_break() + gdb_recovery.set_breakpoint('UpdateControlFile') + gdb_recovery.continue_execution_until_break() + + # stop data generation + pgbench.wait() + pgbench.stdout.close() + + # kill someone, we need a crash + os.kill(int(bgwriter_pid), 9) + gdb_recovery._execute('detach') + gdb_checkpointer._execute('detach') + + # just to be sure + try: + replica.stop(['-m', 'immediate', '-D', replica.data_dir]) + except: + pass + + # MinRecLSN = replica.get_control_data()['Minimum recovery ending location'] + + # Promote replica with 'immediate' target action + if self.get_version(replica) >= self.version_to_num('12.0'): + recovery_config = 'postgresql.auto.conf' + else: + recovery_config = 'recovery.conf' + + replica.append_conf( + recovery_config, "recovery_target = 'immediate'") + replica.append_conf( + recovery_config, "recovery_target_action = 'pause'") + replica.slow_start(replica=True) + + if self.get_version(node) < 100000: + script = ''' +DO +$$ +relations = plpy.execute("select class.oid from pg_class class WHERE class.relkind IN ('r', 'i', 't', 'm') and class.relpersistence = 'p'") +current_xlog_lsn = plpy.execute("SELECT min_recovery_end_location as lsn FROM pg_control_recovery()")[0]['lsn'] +plpy.notice('CURRENT LSN: {0}'.format(current_xlog_lsn)) +found_corruption = False +for relation in relations: + pages_from_future = plpy.execute("with number_of_blocks as (select blknum from generate_series(0, pg_relation_size({0}) / 8192 -1) as blknum) select blknum, lsn, checksum, flags, lower, upper, special, pagesize, version, prune_xid from number_of_blocks, page_header(get_raw_page('{0}'::oid::regclass::text, number_of_blocks.blknum::int)) where lsn > '{1}'::pg_lsn".format(relation['oid'], current_xlog_lsn)) + + if pages_from_future.nrows() == 0: + continue + + for page in pages_from_future: + plpy.notice('Found page from future. OID: {0}, BLKNUM: {1}, LSN: {2}'.format(relation['oid'], page['blknum'], page['lsn'])) + found_corruption = True +if found_corruption: + plpy.error('Found Corruption') +$$ LANGUAGE plpython3u; +''' + else: + script = ''' +DO +$$ +relations = plpy.execute("select class.oid from pg_class class WHERE class.relkind IN ('r', 'i', 't', 'm') and class.relpersistence = 'p'") +current_xlog_lsn = plpy.execute("select pg_last_wal_replay_lsn() as lsn")[0]['lsn'] +plpy.notice('CURRENT LSN: {0}'.format(current_xlog_lsn)) +found_corruption = False +for relation in relations: + pages_from_future = plpy.execute("with number_of_blocks as (select blknum from generate_series(0, pg_relation_size({0}) / 8192 -1) as blknum) select blknum, lsn, checksum, flags, lower, upper, special, pagesize, version, prune_xid from number_of_blocks, page_header(get_raw_page('{0}'::oid::regclass::text, number_of_blocks.blknum::int)) where lsn > '{1}'::pg_lsn".format(relation['oid'], current_xlog_lsn)) + + if pages_from_future.nrows() == 0: + continue + + for page in pages_from_future: + plpy.notice('Found page from future. OID: {0}, BLKNUM: {1}, LSN: {2}'.format(relation['oid'], page['blknum'], page['lsn'])) + found_corruption = True +if found_corruption: + plpy.error('Found Corruption') +$$ LANGUAGE plpython3u; +''' + + # Find blocks from future + replica.safe_psql( + 'postgres', + script) + + # error is expected if version < 10.6 + # gdb_backup.continue_execution_until_exit() + + # do basebackup + + # do pg_probackup, expect error diff --git a/tests/pgpro560_test.py b/tests/pgpro560_test.py new file mode 100644 index 000000000..b665fd200 --- /dev/null +++ b/tests/pgpro560_test.py @@ -0,0 +1,123 @@ +import os +import unittest +from .helpers.ptrack_helpers import ProbackupTest, ProbackupException +from datetime import datetime, timedelta +import subprocess +from time import sleep + + +class CheckSystemID(ProbackupTest, unittest.TestCase): + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_pgpro560_control_file_loss(self): + """ + https://jira.postgrespro.ru/browse/PGPRO-560 + make node with stream support, delete control file + make backup + check that backup failed + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + file = os.path.join(node.base_dir, 'data', 'global', 'pg_control') + # Not delete this file permanently + os.rename(file, os.path.join(node.base_dir, 'data', 'global', 'pg_control_copy')) + + try: + self.backup_node(backup_dir, 'node', node, options=['--stream']) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because pg_control was deleted.\n " + "Output: {0} \n CMD: {1}".format(repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertTrue( + 'ERROR: Could not open file' in e.message and + 'pg_control' in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + # Return this file to avoid Postger fail + os.rename(os.path.join(node.base_dir, 'data', 'global', 'pg_control_copy'), file) + + def test_pgpro560_systemid_mismatch(self): + """ + https://jira.postgrespro.ru/browse/PGPRO-560 + make node1 and node2 + feed to backup PGDATA from node1 and PGPORT from node2 + check that backup failed + """ + node1 = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node1'), + set_replication=True, + initdb_params=['--data-checksums']) + + node1.slow_start() + node2 = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node2'), + set_replication=True, + initdb_params=['--data-checksums']) + + node2.slow_start() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node1', node1) + + try: + self.backup_node(backup_dir, 'node1', node2, options=['--stream']) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because of SYSTEM ID mismatch.\n " + "Output: {0} \n CMD: {1}".format(repr(self.output), self.cmd)) + except ProbackupException as e: + if self.get_version(node1) > 90600: + self.assertTrue( + 'ERROR: Backup data directory was ' + 'initialized for system id' in e.message and + 'but connected instance system id is' in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + else: + self.assertIn( + 'ERROR: System identifier mismatch. ' + 'Connected PostgreSQL instance has system id', + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + sleep(1) + + try: + self.backup_node( + backup_dir, 'node1', node2, + data_dir=node1.data_dir, options=['--stream']) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because of of SYSTEM ID mismatch.\n " + "Output: {0} \n CMD: {1}".format(repr(self.output), self.cmd)) + except ProbackupException as e: + if self.get_version(node1) > 90600: + self.assertTrue( + 'ERROR: Backup data directory was initialized ' + 'for system id' in e.message and + 'but connected instance system id is' in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + else: + self.assertIn( + 'ERROR: System identifier mismatch. ' + 'Connected PostgreSQL instance has system id', + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) diff --git a/tests/pgpro589_test.py b/tests/pgpro589_test.py new file mode 100644 index 000000000..8ce8e1f56 --- /dev/null +++ b/tests/pgpro589_test.py @@ -0,0 +1,72 @@ +import os +import unittest +from .helpers.ptrack_helpers import ProbackupTest, ProbackupException, idx_ptrack +from datetime import datetime, timedelta +import subprocess + + +class ArchiveCheck(ProbackupTest, unittest.TestCase): + + def test_pgpro589(self): + """ + https://jira.postgrespro.ru/browse/PGPRO-589 + make node without archive support, make backup which should fail + check that backup status equal to ERROR + check that no files where copied to backup catalogue + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + + # make erroneous archive_command + self.set_auto_conf(node, {'archive_command': 'exit 0'}) + node.slow_start() + + node.pgbench_init(scale=5) + pgbench = node.pgbench( + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + options=["-c", "4", "-T", "10"] + ) + pgbench.wait() + pgbench.stdout.close() + path = node.safe_psql( + "postgres", + "select pg_relation_filepath('pgbench_accounts')").rstrip().decode( + "utf-8") + + try: + self.backup_node( + backup_dir, 'node', node, + options=['--archive-timeout=10']) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because of missing archive wal " + "segment with start_lsn.\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertTrue( + 'INFO: Wait for WAL segment' in e.message and + 'ERROR: WAL segment' in e.message and + 'could not be archived in 10 seconds' in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + backup_id = self.show_pb(backup_dir, 'node')[0]['id'] + self.assertEqual( + 'ERROR', self.show_pb(backup_dir, 'node', backup_id)['status'], + 'Backup should have ERROR status') + file = os.path.join( + backup_dir, 'backups', 'node', + backup_id, 'database', path) + self.assertFalse( + os.path.isfile(file), + "\n Start LSN was not found in archive but datafiles where " + "copied to backup catalogue.\n For example: {0}\n " + "It is not optimal".format(file)) diff --git a/tests/ptrack_test.py b/tests/ptrack_test.py new file mode 100644 index 000000000..6e5786f8c --- /dev/null +++ b/tests/ptrack_test.py @@ -0,0 +1,4407 @@ +import os +import unittest +from .helpers.ptrack_helpers import ProbackupTest, ProbackupException, idx_ptrack +from datetime import datetime, timedelta +import subprocess +from testgres import QueryException, StartNodeException +import shutil +import sys +from time import sleep +from threading import Thread + + +class PtrackTest(ProbackupTest, unittest.TestCase): + def setUp(self): + if self.pg_config_version < self.version_to_num('11.0'): + self.skipTest('You need PostgreSQL >= 11 for this test') + self.fname = self.id().split('.')[3] + + # @unittest.skip("skip") + def test_drop_rel_during_backup_ptrack(self): + """ + drop relation during ptrack backup + """ + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + ptrack_enable=self.ptrack, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") + + node.safe_psql( + "postgres", + "create table t_heap as select i" + " as id from generate_series(0,100) i") + + relative_path = node.safe_psql( + "postgres", + "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip() + + absolute_path = os.path.join(node.data_dir, relative_path) + + # FULL backup + self.backup_node(backup_dir, 'node', node, options=['--stream']) + + # PTRACK backup + gdb = self.backup_node( + backup_dir, 'node', node, backup_type='ptrack', + gdb=True, options=['--log-level-file=LOG']) + + gdb.set_breakpoint('backup_files') + gdb.run_until_break() + + # REMOVE file + os.remove(absolute_path) + + # File removed, we can proceed with backup + gdb.continue_execution_until_exit() + + pgdata = self.pgdata_content(node.data_dir) + + with open(os.path.join(backup_dir, 'log', 'pg_probackup.log')) as f: + log_content = f.read() + self.assertTrue( + 'LOG: File not found: "{0}"'.format(absolute_path) in log_content, + 'File "{0}" should be deleted but it`s not'.format(absolute_path)) + + node.cleanup() + self.restore_node(backup_dir, 'node', node, options=["-j", "4"]) + + # Physical comparison + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + def test_ptrack_without_full(self): + """ptrack backup without validated full backup""" + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums'], + ptrack_enable=True) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") + + try: + self.backup_node(backup_dir, 'node', node, backup_type="ptrack") + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because page backup should not be possible " + "without valid full backup.\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertTrue( + "WARNING: Valid full backup on current timeline 1 is not found" in e.message and + "ERROR: Create new full backup before an incremental one" in e.message, + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) + + self.assertEqual( + self.show_pb(backup_dir, 'node')[0]['status'], + "ERROR") + + # @unittest.skip("skip") + def test_ptrack_threads(self): + """ptrack multi thread backup mode""" + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums'], + ptrack_enable=True) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") + + self.backup_node( + backup_dir, 'node', node, + backup_type="full", options=["-j", "4"]) + self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK") + + self.backup_node( + backup_dir, 'node', node, + backup_type="ptrack", options=["-j", "4"]) + self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK") + + # @unittest.skip("skip") + def test_ptrack_stop_pg(self): + """ + create node, take full backup, + restart node, check that ptrack backup + can be taken + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + ptrack_enable=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") + + node.pgbench_init(scale=1) + + # FULL backup + self.backup_node(backup_dir, 'node', node, options=['--stream']) + + node.stop() + node.slow_start() + + self.backup_node( + backup_dir, 'node', node, + backup_type='ptrack', options=['--stream']) + + # @unittest.skip("skip") + def test_ptrack_multi_timeline_backup(self): + """ + t2 /------P2 + t1 ------F---*-----P1 + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + ptrack_enable=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") + + node.pgbench_init(scale=5) + + # FULL backup + full_id = self.backup_node(backup_dir, 'node', node) + + pgbench = node.pgbench(options=['-T', '30', '-c', '1', '--no-vacuum']) + sleep(15) + + xid = node.safe_psql( + 'postgres', + 'SELECT txid_current()').decode('utf-8').rstrip() + pgbench.wait() + + self.backup_node(backup_dir, 'node', node, backup_type='ptrack') + + node.cleanup() + + # Restore from full backup to create Timeline 2 + print(self.restore_node( + backup_dir, 'node', node, + options=[ + '--recovery-target-xid={0}'.format(xid), + '--recovery-target-action=promote'])) + + node.slow_start() + + pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) + pgbench.wait() + + self.backup_node(backup_dir, 'node', node, backup_type='ptrack') + + pgdata = self.pgdata_content(node.data_dir) + + node.cleanup() + + self.restore_node(backup_dir, 'node', node) + + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + node.slow_start() + + balance = node.safe_psql( + 'postgres', + 'select (select sum(tbalance) from pgbench_tellers) - ' + '( select sum(bbalance) from pgbench_branches) + ' + '( select sum(abalance) from pgbench_accounts ) - ' + '(select sum(delta) from pgbench_history) as must_be_zero').decode('utf-8').rstrip() + + self.assertEqual('0', balance) + + # @unittest.skip("skip") + def test_ptrack_multi_timeline_backup_1(self): + """ + t2 /------ + t1 ---F---P1---* + + # delete P1 + t2 /------P2 + t1 ---F--------* + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + ptrack_enable=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") + + node.pgbench_init(scale=5) + + # FULL backup + full_id = self.backup_node(backup_dir, 'node', node) + + pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) + pgbench.wait() + + ptrack_id = self.backup_node(backup_dir, 'node', node, backup_type='ptrack') + node.cleanup() + + self.restore_node(backup_dir, 'node', node) + + node.slow_start() + + pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) + pgbench.wait() + + # delete old PTRACK backup + self.delete_pb(backup_dir, 'node', backup_id=ptrack_id) + + # take new PTRACK backup + self.backup_node(backup_dir, 'node', node, backup_type='ptrack') + + pgdata = self.pgdata_content(node.data_dir) + + node.cleanup() + + self.restore_node(backup_dir, 'node', node) + + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + node.slow_start() + + balance = node.safe_psql( + 'postgres', + 'select (select sum(tbalance) from pgbench_tellers) - ' + '( select sum(bbalance) from pgbench_branches) + ' + '( select sum(abalance) from pgbench_accounts ) - ' + '(select sum(delta) from pgbench_history) as must_be_zero').\ + decode('utf-8').rstrip() + + self.assertEqual('0', balance) + + # @unittest.skip("skip") + def test_ptrack_eat_my_data(self): + """ + PGPRO-4051 + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + ptrack_enable=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") + + node.pgbench_init(scale=50) + + self.backup_node(backup_dir, 'node', node) + + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + + pgbench = node.pgbench(options=['-T', '300', '-c', '1', '--no-vacuum']) + + for i in range(10): + print("Iteration: {0}".format(i)) + + sleep(2) + + self.backup_node(backup_dir, 'node', node, backup_type='ptrack') +# pgdata = self.pgdata_content(node.data_dir) +# +# node_restored.cleanup() +# +# self.restore_node(backup_dir, 'node', node_restored) +# pgdata_restored = self.pgdata_content(node_restored.data_dir) +# +# self.compare_pgdata(pgdata, pgdata_restored) + + pgbench.terminate() + pgbench.wait() + + self.switch_wal_segment(node) + + result = node.safe_psql("postgres", "SELECT * FROM pgbench_accounts") + + node_restored.cleanup() + self.restore_node(backup_dir, 'node', node_restored) + self.set_auto_conf( + node_restored, {'port': node_restored.port}) + + node_restored.slow_start() + + balance = node_restored.safe_psql( + 'postgres', + 'select (select sum(tbalance) from pgbench_tellers) - ' + '( select sum(bbalance) from pgbench_branches) + ' + '( select sum(abalance) from pgbench_accounts ) - ' + '(select sum(delta) from pgbench_history) as must_be_zero').decode('utf-8').rstrip() + + self.assertEqual('0', balance) + + # Logical comparison + self.assertEqual( + result, + node_restored.safe_psql( + 'postgres', + 'SELECT * FROM pgbench_accounts'), + 'Data loss') + + # @unittest.skip("skip") + def test_ptrack_simple(self): + """make node, make full and ptrack stream backups," + " restore them and check data correctness""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + ptrack_enable=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") + + self.backup_node(backup_dir, 'node', node, options=['--stream']) + + node.safe_psql( + "postgres", + "create table t_heap as select i" + " as id from generate_series(0,1) i") + + self.backup_node( + backup_dir, 'node', node, backup_type='ptrack', + options=['--stream']) + + node.safe_psql( + "postgres", + "update t_heap set id = 100500") + + self.backup_node( + backup_dir, 'node', node, + backup_type='ptrack', options=['--stream']) + + if self.paranoia: + pgdata = self.pgdata_content(node.data_dir) + + result = node.safe_psql("postgres", "SELECT * FROM t_heap") + + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored.cleanup() + + self.restore_node( + backup_dir, 'node', node_restored, options=["-j", "4"]) + + # Physical comparison + if self.paranoia: + pgdata_restored = self.pgdata_content( + node_restored.data_dir, ignore_ptrack=False) + self.compare_pgdata(pgdata, pgdata_restored) + + self.set_auto_conf( + node_restored, {'port': node_restored.port}) + + node_restored.slow_start() + + # Logical comparison + self.assertEqual( + result, + node_restored.safe_psql("postgres", "SELECT * FROM t_heap")) + + # @unittest.skip("skip") + def test_ptrack_unprivileged(self): + """""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + ptrack_enable=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + # self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + "postgres", + "CREATE DATABASE backupdb") + + # PG 9.5 + if self.get_version(node) < 90600: + node.safe_psql( + 'backupdb', + "REVOKE ALL ON DATABASE backupdb from PUBLIC; " + "REVOKE ALL ON SCHEMA public from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " + "CREATE ROLE backup WITH LOGIN REPLICATION; " + "GRANT CONNECT ON DATABASE backupdb to backup; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") + # PG 9.6 + elif self.get_version(node) > 90600 and self.get_version(node) < 100000: + node.safe_psql( + 'backupdb', + "REVOKE ALL ON DATABASE backupdb from PUBLIC; " + "REVOKE ALL ON SCHEMA public from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " + "CREATE ROLE backup WITH LOGIN REPLICATION; " + "GRANT CONNECT ON DATABASE backupdb to backup; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + ) + # >= 10 && < 15 + elif self.get_version(node) >= 100000 and self.get_version(node) < 150000: + node.safe_psql( + 'backupdb', + "REVOKE ALL ON DATABASE backupdb from PUBLIC; " + "REVOKE ALL ON SCHEMA public from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " + "CREATE ROLE backup WITH LOGIN REPLICATION; " + "GRANT CONNECT ON DATABASE backupdb to backup; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + ) + # >= 15 + else: + node.safe_psql( + 'backupdb', + "REVOKE ALL ON DATABASE backupdb from PUBLIC; " + "REVOKE ALL ON SCHEMA public from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " + "CREATE ROLE backup WITH LOGIN REPLICATION; " + "GRANT CONNECT ON DATABASE backupdb to backup; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_start(text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_stop(boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + ) + + node.safe_psql( + "backupdb", + "CREATE SCHEMA ptrack") + node.safe_psql( + "backupdb", + "CREATE EXTENSION ptrack WITH SCHEMA ptrack") + node.safe_psql( + "backupdb", + "GRANT USAGE ON SCHEMA ptrack TO backup") + + node.safe_psql( + "backupdb", + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup") + + if ProbackupTest.enterprise: + node.safe_psql( + "backupdb", + "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup; " + 'GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;') + + self.backup_node( + backup_dir, 'node', node, + datname='backupdb', options=['--stream', "-U", "backup"]) + + self.backup_node( + backup_dir, 'node', node, datname='backupdb', + backup_type='ptrack', options=['--stream', "-U", "backup"]) + + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_ptrack_enable(self): + """make ptrack without full backup, should result in error""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, initdb_params=['--data-checksums'], + pg_options={ + 'checkpoint_timeout': '30s', + 'shared_preload_libraries': 'ptrack'}) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") + + # PTRACK BACKUP + try: + self.backup_node( + backup_dir, 'node', node, + backup_type='ptrack', options=["--stream"] + ) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because ptrack disabled.\n" + " Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd + ) + ) + except ProbackupException as e: + self.assertIn( + 'ERROR: Ptrack is disabled\n', + e.message, + '\n Unexpected Error Message: {0}\n' + ' CMD: {1}'.format(repr(e.message), self.cmd) + ) + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_ptrack_disable(self): + """ + Take full backup, disable ptrack restart postgresql, + enable ptrack, restart postgresql, take ptrack backup + which should fail + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + ptrack_enable=True, + initdb_params=['--data-checksums'], + pg_options={'checkpoint_timeout': '30s'}) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") + + # FULL BACKUP + self.backup_node(backup_dir, 'node', node, options=['--stream']) + + # DISABLE PTRACK + node.safe_psql('postgres', "alter system set ptrack.map_size to 0") + node.stop() + node.slow_start() + + # ENABLE PTRACK + node.safe_psql('postgres', "alter system set ptrack.map_size to '128'") + node.safe_psql('postgres', "alter system set shared_preload_libraries to 'ptrack'") + node.stop() + node.slow_start() + + # PTRACK BACKUP + try: + self.backup_node( + backup_dir, 'node', node, + backup_type='ptrack', options=["--stream"] + ) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because ptrack_enable was set to OFF at some" + " point after previous backup.\n" + " Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd + ) + ) + except ProbackupException as e: + self.assertIn( + 'ERROR: LSN from ptrack_control', + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd + ) + ) + + # @unittest.skip("skip") + def test_ptrack_uncommitted_xact(self): + """make ptrack backup while there is uncommitted open transaction""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + ptrack_enable=True, + initdb_params=['--data-checksums'], + pg_options={ + 'wal_level': 'replica'}) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") + + self.backup_node(backup_dir, 'node', node, options=['--stream']) + + con = node.connect("postgres") + con.execute( + "create table t_heap as select i" + " as id from generate_series(0,1) i") + + self.backup_node( + backup_dir, 'node', node, backup_type='ptrack', + options=['--stream']) + + if self.paranoia: + pgdata = self.pgdata_content(node.data_dir) + + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored.cleanup() + + self.restore_node( + backup_dir, 'node', node_restored, + node_restored.data_dir, options=["-j", "4"]) + + if self.paranoia: + pgdata_restored = self.pgdata_content( + node_restored.data_dir, ignore_ptrack=False) + + self.set_auto_conf( + node_restored, {'port': node_restored.port}) + + node_restored.slow_start() + + # Physical comparison + if self.paranoia: + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + def test_ptrack_vacuum_full(self): + """make node, make full and ptrack stream backups, + restore them and check data correctness""" + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + ptrack_enable=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + self.create_tblspace_in_node(node, 'somedata') + + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") + + self.backup_node(backup_dir, 'node', node, options=['--stream']) + + node.safe_psql( + "postgres", + "create table t_heap tablespace somedata as select i" + " as id from generate_series(0,1000000) i" + ) + + pg_connect = node.connect("postgres", autocommit=True) + + gdb = self.gdb_attach(pg_connect.pid) + gdb.set_breakpoint('reform_and_rewrite_tuple') + + gdb.continue_execution_until_running() + + process = Thread( + target=pg_connect.execute, args=["VACUUM FULL t_heap"]) + process.start() + + while not gdb.stopped_in_breakpoint: + sleep(1) + + gdb.continue_execution_until_break(20) + + self.backup_node( + backup_dir, 'node', node, backup_type='ptrack', options=['--stream']) + + self.backup_node( + backup_dir, 'node', node, backup_type='ptrack', options=['--stream']) + + if self.paranoia: + pgdata = self.pgdata_content(node.data_dir) + + gdb.remove_all_breakpoints() + gdb._execute('detach') + process.join() + + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored.cleanup() + + old_tablespace = self.get_tblspace_path(node, 'somedata') + new_tablespace = self.get_tblspace_path(node_restored, 'somedata_new') + + self.restore_node( + backup_dir, 'node', node_restored, + options=["-j", "4", "-T", "{0}={1}".format( + old_tablespace, new_tablespace)] + ) + + # Physical comparison + if self.paranoia: + pgdata_restored = self.pgdata_content( + node_restored.data_dir, ignore_ptrack=False) + self.compare_pgdata(pgdata, pgdata_restored) + + self.set_auto_conf( + node_restored, {'port': node_restored.port}) + + node_restored.slow_start() + + # @unittest.skip("skip") + def test_ptrack_vacuum_truncate(self): + """make node, create table, take full backup, + delete last 3 pages, vacuum relation, + take ptrack backup, take second ptrack backup, + restore last ptrack backup and check data correctness""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + ptrack_enable=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + self.create_tblspace_in_node(node, 'somedata') + + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") + + node.safe_psql( + "postgres", + "create sequence t_seq; " + "create table t_heap tablespace somedata as select i as id, " + "md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,1024) i;") + + node.safe_psql( + "postgres", + "vacuum t_heap") + + self.backup_node(backup_dir, 'node', node, options=['--stream']) + + node.safe_psql( + "postgres", + "delete from t_heap where ctid >= '(11,0)'") + + node.safe_psql( + "postgres", + "vacuum t_heap") + + self.backup_node( + backup_dir, 'node', node, backup_type='ptrack', options=['--stream']) + + self.backup_node( + backup_dir, 'node', node, backup_type='ptrack', options=['--stream']) + + if self.paranoia: + pgdata = self.pgdata_content(node.data_dir) + + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored.cleanup() + + old_tablespace = self.get_tblspace_path(node, 'somedata') + new_tablespace = self.get_tblspace_path(node_restored, 'somedata_new') + + self.restore_node( + backup_dir, 'node', node_restored, + options=["-j", "4", "-T", "{0}={1}".format( + old_tablespace, new_tablespace)] + ) + + # Physical comparison + if self.paranoia: + pgdata_restored = self.pgdata_content( + node_restored.data_dir, + ignore_ptrack=False + ) + self.compare_pgdata(pgdata, pgdata_restored) + + self.set_auto_conf( + node_restored, {'port': node_restored.port}) + + node_restored.slow_start() + + # @unittest.skip("skip") + def test_ptrack_get_block(self): + """ + make node, make full and ptrack stream backups, + restore them and check data correctness + """ + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + ptrack_enable=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") + + node.safe_psql( + "postgres", + "create table t_heap as select i" + " as id from generate_series(0,1) i") + + self.backup_node(backup_dir, 'node', node, options=['--stream']) + gdb = self.backup_node( + backup_dir, 'node', node, backup_type='ptrack', + options=['--stream'], + gdb=True) + + gdb.set_breakpoint('make_pagemap_from_ptrack_2') + gdb.run_until_break() + + node.safe_psql( + "postgres", + "update t_heap set id = 100500") + + gdb.continue_execution_until_exit() + + self.backup_node( + backup_dir, 'node', node, + backup_type='ptrack', options=['--stream']) + + if self.paranoia: + pgdata = self.pgdata_content(node.data_dir) + + result = node.safe_psql("postgres", "SELECT * FROM t_heap") + node.cleanup() + self.restore_node(backup_dir, 'node', node, options=["-j", "4"]) + + # Physical comparison + if self.paranoia: + pgdata_restored = self.pgdata_content( + node.data_dir, ignore_ptrack=False) + self.compare_pgdata(pgdata, pgdata_restored) + + node.slow_start() + # Logical comparison + self.assertEqual( + result, + node.safe_psql("postgres", "SELECT * FROM t_heap")) + + # @unittest.skip("skip") + def test_ptrack_stream(self): + """make node, make full and ptrack stream backups, + restore them and check data correctness""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + ptrack_enable=True, + initdb_params=['--data-checksums'], + pg_options={ + 'checkpoint_timeout': '30s'}) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") + + # FULL BACKUP + node.safe_psql("postgres", "create sequence t_seq") + node.safe_psql( + "postgres", + "create table t_heap as select i as id, nextval('t_seq')" + " as t_seq, md5(i::text) as text, md5(i::text)::tsvector" + " as tsvector from generate_series(0,100) i") + + full_result = node.safe_psql("postgres", "SELECT * FROM t_heap") + full_backup_id = self.backup_node( + backup_dir, 'node', node, options=['--stream']) + + # PTRACK BACKUP + node.safe_psql( + "postgres", + "insert into t_heap select i as id, nextval('t_seq') as t_seq," + " md5(i::text) as text, md5(i::text)::tsvector as tsvector" + " from generate_series(100,200) i") + + ptrack_result = node.safe_psql("postgres", "SELECT * FROM t_heap") + ptrack_backup_id = self.backup_node( + backup_dir, 'node', node, + backup_type='ptrack', options=['--stream']) + + if self.paranoia: + pgdata = self.pgdata_content(node.data_dir) + + # Drop Node + node.cleanup() + + # Restore and check full backup + self.assertIn( + "INFO: Restore of backup {0} completed.".format(full_backup_id), + self.restore_node( + backup_dir, 'node', node, + backup_id=full_backup_id, options=["-j", "4"] + ), + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd) + ) + node.slow_start() + full_result_new = node.safe_psql("postgres", "SELECT * FROM t_heap") + self.assertEqual(full_result, full_result_new) + node.cleanup() + + # Restore and check ptrack backup + self.assertIn( + "INFO: Restore of backup {0} completed.".format(ptrack_backup_id), + self.restore_node( + backup_dir, 'node', node, + backup_id=ptrack_backup_id, options=["-j", "4"] + ), + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd)) + + if self.paranoia: + pgdata_restored = self.pgdata_content( + node.data_dir, ignore_ptrack=False) + self.compare_pgdata(pgdata, pgdata_restored) + + node.slow_start() + ptrack_result_new = node.safe_psql("postgres", "SELECT * FROM t_heap") + self.assertEqual(ptrack_result, ptrack_result_new) + + # @unittest.skip("skip") + def test_ptrack_archive(self): + """make archive node, make full and ptrack backups, + check data correctness in restored instance""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + ptrack_enable=True, + initdb_params=['--data-checksums'], + pg_options={ + 'checkpoint_timeout': '30s'}) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") + + # FULL BACKUP + node.safe_psql( + "postgres", + "create table t_heap as" + " select i as id," + " md5(i::text) as text," + " md5(i::text)::tsvector as tsvector" + " from generate_series(0,100) i") + + full_result = node.safe_psql("postgres", "SELECT * FROM t_heap") + full_backup_id = self.backup_node(backup_dir, 'node', node) + full_target_time = self.show_pb( + backup_dir, 'node', full_backup_id)['recovery-time'] + + # PTRACK BACKUP + node.safe_psql( + "postgres", + "insert into t_heap select i as id," + " md5(i::text) as text," + " md5(i::text)::tsvector as tsvector" + " from generate_series(100,200) i") + + ptrack_result = node.safe_psql("postgres", "SELECT * FROM t_heap") + ptrack_backup_id = self.backup_node( + backup_dir, 'node', node, backup_type='ptrack') + ptrack_target_time = self.show_pb( + backup_dir, 'node', ptrack_backup_id)['recovery-time'] + if self.paranoia: + pgdata = self.pgdata_content(node.data_dir) + + node.safe_psql( + "postgres", + "insert into t_heap select i as id," + " md5(i::text) as text," + " md5(i::text)::tsvector as tsvector" + " from generate_series(200, 300) i") + + # Drop Node + node.cleanup() + + # Check full backup + self.assertIn( + "INFO: Restore of backup {0} completed.".format(full_backup_id), + self.restore_node( + backup_dir, 'node', node, + backup_id=full_backup_id, + options=[ + "-j", "4", "--recovery-target-action=promote", + "--time={0}".format(full_target_time)] + ), + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd) + ) + node.slow_start() + + full_result_new = node.safe_psql("postgres", "SELECT * FROM t_heap") + self.assertEqual(full_result, full_result_new) + node.cleanup() + + # Check ptrack backup + self.assertIn( + "INFO: Restore of backup {0} completed.".format(ptrack_backup_id), + self.restore_node( + backup_dir, 'node', node, + backup_id=ptrack_backup_id, + options=[ + "-j", "4", + "--time={0}".format(ptrack_target_time), + "--recovery-target-action=promote"] + ), + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd) + ) + + if self.paranoia: + pgdata_restored = self.pgdata_content( + node.data_dir, ignore_ptrack=False) + self.compare_pgdata(pgdata, pgdata_restored) + + node.slow_start() + ptrack_result_new = node.safe_psql("postgres", "SELECT * FROM t_heap") + self.assertEqual(ptrack_result, ptrack_result_new) + + node.cleanup() + + @unittest.skip("skip") + def test_ptrack_pgpro417(self): + """ + Make node, take full backup, take ptrack backup, + delete ptrack backup. Try to take ptrack backup, + which should fail. Actual only for PTRACK 1.x + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + ptrack_enable=True, + initdb_params=['--data-checksums'], + pg_options={ + 'checkpoint_timeout': '30s'}) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + # FULL BACKUP + node.safe_psql( + "postgres", + "create table t_heap as select i as id, md5(i::text) as text, " + "md5(i::text)::tsvector as tsvector from generate_series(0,100) i") + node.safe_psql( + "postgres", + "SELECT * FROM t_heap") + + backup_id = self.backup_node( + backup_dir, 'node', node, + backup_type='full', options=["--stream"]) + + start_lsn_full = self.show_pb( + backup_dir, 'node', backup_id)['start-lsn'] + + # PTRACK BACKUP + node.safe_psql( + "postgres", + "insert into t_heap select i as id, md5(i::text) as text, " + "md5(i::text)::tsvector as tsvector " + "from generate_series(100,200) i") + node.safe_psql("postgres", "SELECT * FROM t_heap") + backup_id = self.backup_node( + backup_dir, 'node', node, + backup_type='ptrack', options=["--stream"]) + + start_lsn_ptrack = self.show_pb( + backup_dir, 'node', backup_id)['start-lsn'] + + self.delete_pb(backup_dir, 'node', backup_id) + + # SECOND PTRACK BACKUP + node.safe_psql( + "postgres", + "insert into t_heap select i as id, md5(i::text) as text, " + "md5(i::text)::tsvector as tsvector " + "from generate_series(200,300) i") + + try: + self.backup_node( + backup_dir, 'node', node, + backup_type='ptrack', options=["--stream"]) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because of LSN mismatch from ptrack_control " + "and previous backup start_lsn.\n" + " Output: {0} \n CMD: {1}".format(repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertTrue( + 'ERROR: LSN from ptrack_control' in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + @unittest.skip("skip") + def test_page_pgpro417(self): + """ + Make archive node, take full backup, take page backup, + delete page backup. Try to take ptrack backup, which should fail. + Actual only for PTRACK 1.x + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + ptrack_enable=True, + initdb_params=['--data-checksums'], + pg_options={ + 'checkpoint_timeout': '30s'}) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # FULL BACKUP + node.safe_psql( + "postgres", + "create table t_heap as select i as id, md5(i::text) as text, " + "md5(i::text)::tsvector as tsvector from generate_series(0,100) i") + node.safe_psql("postgres", "SELECT * FROM t_heap") + + # PAGE BACKUP + node.safe_psql( + "postgres", + "insert into t_heap select i as id, md5(i::text) as text, " + "md5(i::text)::tsvector as tsvector " + "from generate_series(100,200) i") + node.safe_psql("postgres", "SELECT * FROM t_heap") + backup_id = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + self.delete_pb(backup_dir, 'node', backup_id) +# sys.exit(1) + + # PTRACK BACKUP + node.safe_psql( + "postgres", + "insert into t_heap select i as id, md5(i::text) as text, " + "md5(i::text)::tsvector as tsvector " + "from generate_series(200,300) i") + + try: + self.backup_node(backup_dir, 'node', node, backup_type='ptrack') + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because of LSN mismatch from ptrack_control " + "and previous backup start_lsn.\n " + "Output: {0}\n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertTrue( + 'ERROR: LSN from ptrack_control' in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + @unittest.skip("skip") + def test_full_pgpro417(self): + """ + Make node, take two full backups, delete full second backup. + Try to take ptrack backup, which should fail. + Relevant only for PTRACK 1.x + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + ptrack_enable=True, + initdb_params=['--data-checksums'], + pg_options={ + 'checkpoint_timeout': '30s'}) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + # FULL BACKUP + node.safe_psql( + "postgres", + "create table t_heap as select i as id, md5(i::text) as text," + " md5(i::text)::tsvector as tsvector " + " from generate_series(0,100) i" + ) + node.safe_psql("postgres", "SELECT * FROM t_heap") + self.backup_node(backup_dir, 'node', node, options=["--stream"]) + + # SECOND FULL BACKUP + node.safe_psql( + "postgres", + "insert into t_heap select i as id, md5(i::text) as text," + " md5(i::text)::tsvector as tsvector" + " from generate_series(100,200) i" + ) + node.safe_psql("postgres", "SELECT * FROM t_heap") + backup_id = self.backup_node( + backup_dir, 'node', node, options=["--stream"]) + + self.delete_pb(backup_dir, 'node', backup_id) + + # PTRACK BACKUP + node.safe_psql( + "postgres", + "insert into t_heap select i as id, md5(i::text) as text, " + "md5(i::text)::tsvector as tsvector " + "from generate_series(200,300) i") + try: + self.backup_node( + backup_dir, 'node', node, + backup_type='ptrack', options=["--stream"]) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because of LSN mismatch from ptrack_control " + "and previous backup start_lsn.\n " + "Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd) + ) + except ProbackupException as e: + self.assertTrue( + "ERROR: LSN from ptrack_control" in e.message and + "Create new full backup before " + "an incremental one" in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + # @unittest.skip("skip") + def test_create_db(self): + """ + Make node, take full backup, create database db1, take ptrack backup, + restore database and check it presense + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + ptrack_enable=True, + initdb_params=['--data-checksums'], + pg_options={ + 'max_wal_size': '10GB'}) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") + + # FULL BACKUP + node.safe_psql( + "postgres", + "create table t_heap as select i as id, md5(i::text) as text, " + "md5(i::text)::tsvector as tsvector from generate_series(0,100) i") + + node.safe_psql("postgres", "SELECT * FROM t_heap") + self.backup_node( + backup_dir, 'node', node, + options=["--stream"]) + + # CREATE DATABASE DB1 + node.safe_psql("postgres", "create database db1") + node.safe_psql( + "db1", + "create table t_heap as select i as id, md5(i::text) as text, " + "md5(i::text)::tsvector as tsvector from generate_series(0,100) i") + + # PTRACK BACKUP + backup_id = self.backup_node( + backup_dir, 'node', node, + backup_type='ptrack', options=["--stream"]) + + if self.paranoia: + pgdata = self.pgdata_content(node.data_dir) + + # RESTORE + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + + node_restored.cleanup() + self.restore_node( + backup_dir, 'node', node_restored, + backup_id=backup_id, options=["-j", "4"]) + + # COMPARE PHYSICAL CONTENT + if self.paranoia: + pgdata_restored = self.pgdata_content( + node_restored.data_dir, ignore_ptrack=False) + self.compare_pgdata(pgdata, pgdata_restored) + + # START RESTORED NODE + self.set_auto_conf( + node_restored, {'port': node_restored.port}) + node_restored.slow_start() + + # DROP DATABASE DB1 + node.safe_psql( + "postgres", "drop database db1") + # SECOND PTRACK BACKUP + backup_id = self.backup_node( + backup_dir, 'node', node, + backup_type='ptrack', options=["--stream"] + ) + + if self.paranoia: + pgdata = self.pgdata_content(node.data_dir) + + # RESTORE SECOND PTRACK BACKUP + node_restored.cleanup() + self.restore_node( + backup_dir, 'node', node_restored, + backup_id=backup_id, options=["-j", "4"]) + + # COMPARE PHYSICAL CONTENT + if self.paranoia: + pgdata_restored = self.pgdata_content( + node_restored.data_dir, ignore_ptrack=False) + self.compare_pgdata(pgdata, pgdata_restored) + + # START RESTORED NODE + self.set_auto_conf( + node_restored, {'port': node_restored.port}) + node_restored.slow_start() + + try: + node_restored.safe_psql('db1', 'select 1') + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because we are connecting to deleted database" + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except QueryException as e: + self.assertTrue( + 'FATAL: database "db1" does not exist' in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + # @unittest.skip("skip") + def test_create_db_on_replica(self): + """ + Make node, take full backup, create replica from it, + take full backup from replica, + create database db1, take ptrack backup from replica, + restore database and check it presense + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + ptrack_enable=True, + initdb_params=['--data-checksums'], + pg_options={ + 'checkpoint_timeout': '30s'}) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") + + # FULL BACKUP + node.safe_psql( + "postgres", + "create table t_heap as select i as id, md5(i::text) as text, " + "md5(i::text)::tsvector as tsvector from generate_series(0,100) i") + + replica = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica.cleanup() + + self.backup_node( + backup_dir, 'node', node, options=['-j10', '--stream']) + + self.restore_node(backup_dir, 'node', replica) + + # Add replica + self.add_instance(backup_dir, 'replica', replica) + self.set_replica(node, replica, 'replica', synchronous=True) + replica.slow_start(replica=True) + + self.backup_node( + backup_dir, 'replica', replica, + options=[ + '-j10', + '--master-host=localhost', + '--master-db=postgres', + '--master-port={0}'.format(node.port), + '--stream' + ] + ) + + # CREATE DATABASE DB1 + node.safe_psql("postgres", "create database db1") + node.safe_psql( + "db1", + "create table t_heap as select i as id, md5(i::text) as text, " + "md5(i::text)::tsvector as tsvector from generate_series(0,100) i") + + # Wait until replica catch up with master + self.wait_until_replica_catch_with_master(node, replica) + replica.safe_psql('postgres', 'checkpoint') + + # PTRACK BACKUP + backup_id = self.backup_node( + backup_dir, 'replica', + replica, backup_type='ptrack', + options=[ + '-j10', + '--stream', + '--master-host=localhost', + '--master-db=postgres', + '--master-port={0}'.format(node.port) + ] + ) + + if self.paranoia: + pgdata = self.pgdata_content(replica.data_dir) + + # RESTORE + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored.cleanup() + + self.restore_node( + backup_dir, 'replica', node_restored, + backup_id=backup_id, options=["-j", "4"]) + + # COMPARE PHYSICAL CONTENT + if self.paranoia: + pgdata_restored = self.pgdata_content( + node_restored.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + def test_alter_table_set_tablespace_ptrack(self): + """Make node, create tablespace with table, take full backup, + alter tablespace location, take ptrack backup, restore database.""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + ptrack_enable=True, + initdb_params=['--data-checksums'], + pg_options={ + 'checkpoint_timeout': '30s'}) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") + + # FULL BACKUP + self.create_tblspace_in_node(node, 'somedata') + node.safe_psql( + "postgres", + "create table t_heap tablespace somedata as select i as id," + " md5(i::text) as text, md5(i::text)::tsvector as tsvector" + " from generate_series(0,100) i") + # FULL backup + self.backup_node(backup_dir, 'node', node, options=["--stream"]) + + # ALTER TABLESPACE + self.create_tblspace_in_node(node, 'somedata_new') + node.safe_psql( + "postgres", + "alter table t_heap set tablespace somedata_new") + + # sys.exit(1) + # PTRACK BACKUP + #result = node.safe_psql( + # "postgres", "select * from t_heap") + self.backup_node( + backup_dir, 'node', node, + backup_type='ptrack', + options=["--stream"] + ) + if self.paranoia: + pgdata = self.pgdata_content(node.data_dir) + # node.stop() + # node.cleanup() + + # RESTORE + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored.cleanup() + + self.restore_node( + backup_dir, 'node', node_restored, + options=[ + "-j", "4", + "-T", "{0}={1}".format( + self.get_tblspace_path(node, 'somedata'), + self.get_tblspace_path(node_restored, 'somedata') + ), + "-T", "{0}={1}".format( + self.get_tblspace_path(node, 'somedata_new'), + self.get_tblspace_path(node_restored, 'somedata_new') + ) + ] + ) + + # GET RESTORED PGDATA AND COMPARE + if self.paranoia: + pgdata_restored = self.pgdata_content( + node_restored.data_dir, ignore_ptrack=False) + self.compare_pgdata(pgdata, pgdata_restored) + + # START RESTORED NODE + self.set_auto_conf( + node_restored, {'port': node_restored.port}) + node_restored.slow_start() + +# result_new = node_restored.safe_psql( +# "postgres", "select * from t_heap") +# +# self.assertEqual(result, result_new, 'lost some data after restore') + + # @unittest.skip("skip") + def test_alter_database_set_tablespace_ptrack(self): + """Make node, create tablespace with database," + " take full backup, alter tablespace location," + " take ptrack backup, restore database.""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + ptrack_enable=True, + initdb_params=['--data-checksums'], + pg_options={ + 'checkpoint_timeout': '30s'}) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") + + # FULL BACKUP + self.backup_node(backup_dir, 'node', node, options=["--stream"]) + + # CREATE TABLESPACE + self.create_tblspace_in_node(node, 'somedata') + + # ALTER DATABASE + node.safe_psql( + "template1", + "alter database postgres set tablespace somedata") + + # PTRACK BACKUP + self.backup_node( + backup_dir, 'node', node, backup_type='ptrack', + options=["--stream"]) + + if self.paranoia: + pgdata = self.pgdata_content(node.data_dir) + node.stop() + + # RESTORE + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored.cleanup() + self.restore_node( + backup_dir, 'node', + node_restored, + options=[ + "-j", "4", + "-T", "{0}={1}".format( + self.get_tblspace_path(node, 'somedata'), + self.get_tblspace_path(node_restored, 'somedata'))]) + + # GET PHYSICAL CONTENT and COMPARE PHYSICAL CONTENT + if self.paranoia: + pgdata_restored = self.pgdata_content( + node_restored.data_dir, ignore_ptrack=False) + self.compare_pgdata(pgdata, pgdata_restored) + + # START RESTORED NODE + node_restored.port = node.port + node_restored.slow_start() + + # @unittest.skip("skip") + def test_drop_tablespace(self): + """ + Make node, create table, alter table tablespace, take ptrack backup, + move table from tablespace, take ptrack backup + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + ptrack_enable=True, + initdb_params=['--data-checksums'], + pg_options={ + 'checkpoint_timeout': '30s'}) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") + + self.create_tblspace_in_node(node, 'somedata') + + # CREATE TABLE + node.safe_psql( + "postgres", + "create table t_heap as select i as id, md5(i::text) as text, " + "md5(i::text)::tsvector as tsvector from generate_series(0,100) i") + + result = node.safe_psql("postgres", "select * from t_heap") + # FULL BACKUP + self.backup_node(backup_dir, 'node', node, options=["--stream"]) + + # Move table to tablespace 'somedata' + node.safe_psql( + "postgres", "alter table t_heap set tablespace somedata") + # PTRACK BACKUP + self.backup_node( + backup_dir, 'node', node, + backup_type='ptrack', options=["--stream"]) + + # Move table back to default tablespace + node.safe_psql( + "postgres", "alter table t_heap set tablespace pg_default") + # SECOND PTRACK BACKUP + self.backup_node( + backup_dir, 'node', node, + backup_type='ptrack', options=["--stream"]) + + # DROP TABLESPACE 'somedata' + node.safe_psql( + "postgres", "drop tablespace somedata") + # THIRD PTRACK BACKUP + self.backup_node( + backup_dir, 'node', node, + backup_type='ptrack', options=["--stream"]) + + if self.paranoia: + pgdata = self.pgdata_content( + node.data_dir, ignore_ptrack=True) + + tblspace = self.get_tblspace_path(node, 'somedata') + node.cleanup() + shutil.rmtree(tblspace, ignore_errors=True) + self.restore_node(backup_dir, 'node', node, options=["-j", "4"]) + + if self.paranoia: + pgdata_restored = self.pgdata_content( + node.data_dir, ignore_ptrack=True) + + node.slow_start() + + tblspc_exist = node.safe_psql( + "postgres", + "select exists(select 1 from " + "pg_tablespace where spcname = 'somedata')") + + if tblspc_exist.rstrip() == 't': + self.assertEqual( + 1, 0, + "Expecting Error because " + "tablespace 'somedata' should not be present") + + result_new = node.safe_psql("postgres", "select * from t_heap") + self.assertEqual(result, result_new) + + if self.paranoia: + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + def test_ptrack_alter_tablespace(self): + """ + Make node, create table, alter table tablespace, take ptrack backup, + move table from tablespace, take ptrack backup + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + ptrack_enable=True, + initdb_params=['--data-checksums'], + pg_options={ + 'checkpoint_timeout': '30s'}) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") + + self.create_tblspace_in_node(node, 'somedata') + tblspc_path = self.get_tblspace_path(node, 'somedata') + + # CREATE TABLE + node.safe_psql( + "postgres", + "create table t_heap as select i as id, md5(i::text) as text, " + "md5(i::text)::tsvector as tsvector from generate_series(0,100) i") + + result = node.safe_psql("postgres", "select * from t_heap") + # FULL BACKUP + self.backup_node(backup_dir, 'node', node, options=["--stream"]) + + # Move table to separate tablespace + node.safe_psql( + "postgres", + "alter table t_heap set tablespace somedata") + # GET LOGICAL CONTENT FROM NODE + result = node.safe_psql("postgres", "select * from t_heap") + + # FIRTS PTRACK BACKUP + self.backup_node( + backup_dir, 'node', node, backup_type='ptrack', + options=["--stream"]) + + # GET PHYSICAL CONTENT FROM NODE + if self.paranoia: + pgdata = self.pgdata_content(node.data_dir) + + # Restore ptrack backup + restored_node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'restored_node')) + restored_node.cleanup() + tblspc_path_new = self.get_tblspace_path( + restored_node, 'somedata_restored') + self.restore_node(backup_dir, 'node', restored_node, options=[ + "-j", "4", "-T", "{0}={1}".format(tblspc_path, tblspc_path_new)]) + + # GET PHYSICAL CONTENT FROM RESTORED NODE and COMPARE PHYSICAL CONTENT + if self.paranoia: + pgdata_restored = self.pgdata_content( + restored_node.data_dir, ignore_ptrack=False) + self.compare_pgdata(pgdata, pgdata_restored) + + # START RESTORED NODE + self.set_auto_conf( + restored_node, {'port': restored_node.port}) + restored_node.slow_start() + + # COMPARE LOGICAL CONTENT + result_new = restored_node.safe_psql( + "postgres", "select * from t_heap") + self.assertEqual(result, result_new) + + restored_node.cleanup() + shutil.rmtree(tblspc_path_new, ignore_errors=True) + + # Move table to default tablespace + node.safe_psql( + "postgres", "alter table t_heap set tablespace pg_default") + # SECOND PTRACK BACKUP + self.backup_node( + backup_dir, 'node', node, backup_type='ptrack', + options=["--stream"]) + + if self.paranoia: + pgdata = self.pgdata_content(node.data_dir) + + # Restore second ptrack backup and check table consistency + self.restore_node( + backup_dir, 'node', restored_node, + options=[ + "-j", "4", "-T", "{0}={1}".format(tblspc_path, tblspc_path_new)]) + + # GET PHYSICAL CONTENT FROM RESTORED NODE and COMPARE PHYSICAL CONTENT + if self.paranoia: + pgdata_restored = self.pgdata_content( + restored_node.data_dir, ignore_ptrack=False) + self.compare_pgdata(pgdata, pgdata_restored) + + # START RESTORED NODE + self.set_auto_conf( + restored_node, {'port': restored_node.port}) + restored_node.slow_start() + + result_new = restored_node.safe_psql( + "postgres", "select * from t_heap") + self.assertEqual(result, result_new) + + # @unittest.skip("skip") + def test_ptrack_multiple_segments(self): + """ + Make node, create table, alter table tablespace, + take ptrack backup, move table from tablespace, take ptrack backup + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + ptrack_enable=True, + initdb_params=['--data-checksums'], + pg_options={ + 'full_page_writes': 'off'}) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") + + self.create_tblspace_in_node(node, 'somedata') + + # CREATE TABLE + node.pgbench_init(scale=100, options=['--tablespace=somedata']) + # FULL BACKUP + self.backup_node(backup_dir, 'node', node, options=['--stream']) + + # PTRACK STUFF + if node.major_version < 11: + idx_ptrack = {'type': 'heap'} + idx_ptrack['path'] = self.get_fork_path(node, 'pgbench_accounts') + idx_ptrack['old_size'] = self.get_fork_size(node, 'pgbench_accounts') + idx_ptrack['old_pages'] = self.get_md5_per_page_for_fork( + idx_ptrack['path'], idx_ptrack['old_size']) + + pgbench = node.pgbench( + options=['-T', '30', '-c', '1', '--no-vacuum']) + pgbench.wait() + + node.safe_psql("postgres", "checkpoint") + + if node.major_version < 11: + idx_ptrack['new_size'] = self.get_fork_size( + node, + 'pgbench_accounts') + + idx_ptrack['new_pages'] = self.get_md5_per_page_for_fork( + idx_ptrack['path'], + idx_ptrack['new_size']) + + idx_ptrack['ptrack'] = self.get_ptrack_bits_per_page_for_fork( + node, + idx_ptrack['path']) + + if not self.check_ptrack_sanity(idx_ptrack): + self.assertTrue( + False, 'Ptrack has failed to register changes in data files') + + # GET LOGICAL CONTENT FROM NODE + # it`s stupid, because hint`s are ignored by ptrack + result = node.safe_psql("postgres", "select * from pgbench_accounts") + # FIRTS PTRACK BACKUP + self.backup_node( + backup_dir, 'node', node, backup_type='ptrack', options=['--stream']) + + # GET PHYSICAL CONTENT FROM NODE + pgdata = self.pgdata_content(node.data_dir) + + # RESTORE NODE + restored_node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'restored_node')) + restored_node.cleanup() + tblspc_path = self.get_tblspace_path(node, 'somedata') + tblspc_path_new = self.get_tblspace_path( + restored_node, + 'somedata_restored') + + self.restore_node( + backup_dir, 'node', restored_node, + options=[ + "-j", "4", "-T", "{0}={1}".format( + tblspc_path, tblspc_path_new)]) + + # GET PHYSICAL CONTENT FROM NODE_RESTORED + if self.paranoia: + pgdata_restored = self.pgdata_content( + restored_node.data_dir, ignore_ptrack=False) + + # START RESTORED NODE + self.set_auto_conf( + restored_node, {'port': restored_node.port}) + restored_node.slow_start() + + result_new = restored_node.safe_psql( + "postgres", + "select * from pgbench_accounts") + + # COMPARE RESTORED FILES + self.assertEqual(result, result_new, 'data is lost') + + if self.paranoia: + self.compare_pgdata(pgdata, pgdata_restored) + + @unittest.skip("skip") + def test_atexit_fail(self): + """ + Take backups of every available types and check that PTRACK is clean. + Relevant only for PTRACK 1.x + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + ptrack_enable=True, + initdb_params=['--data-checksums'], + pg_options={ + 'max_connections': '15'}) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + # Take FULL backup to clean every ptrack + self.backup_node( + backup_dir, 'node', node, options=['--stream']) + + try: + self.backup_node( + backup_dir, 'node', node, backup_type='ptrack', + options=["--stream", "-j 30"]) + + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because we are opening too many connections" + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd) + ) + except ProbackupException as e: + self.assertIn( + 'setting its status to ERROR', + e.message, + '\n Unexpected Error Message: {0}\n' + ' CMD: {1}'.format(repr(e.message), self.cmd) + ) + + self.assertEqual( + node.safe_psql( + "postgres", + "select * from pg_is_in_backup()").rstrip(), + "f") + + @unittest.skip("skip") + # @unittest.expectedFailure + def test_ptrack_clean(self): + """ + Take backups of every available types and check that PTRACK is clean + Relevant only for PTRACK 1.x + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + ptrack_enable=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + self.create_tblspace_in_node(node, 'somedata') + + # Create table and indexes + node.safe_psql( + "postgres", + "create extension bloom; create sequence t_seq; " + "create table t_heap tablespace somedata " + "as select i as id, nextval('t_seq') as t_seq, " + "md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,2560) i") + for i in idx_ptrack: + if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq': + node.safe_psql( + "postgres", + "create index {0} on {1} using {2}({3}) " + "tablespace somedata".format( + i, idx_ptrack[i]['relation'], + idx_ptrack[i]['type'], + idx_ptrack[i]['column'])) + + # Take FULL backup to clean every ptrack + self.backup_node( + backup_dir, 'node', node, + options=['-j10', '--stream']) + node.safe_psql('postgres', 'checkpoint') + + for i in idx_ptrack: + # get fork size and calculate it in pages + idx_ptrack[i]['size'] = self.get_fork_size(node, i) + # get path to heap and index files + idx_ptrack[i]['path'] = self.get_fork_path(node, i) + # get ptrack for every idx + idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork( + node, idx_ptrack[i]['path'], [idx_ptrack[i]['size']]) + self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size']) + + # Update everything and vacuum it + node.safe_psql( + 'postgres', + "update t_heap set t_seq = nextval('t_seq'), " + "text = md5(text), " + "tsvector = md5(repeat(tsvector::text, 10))::tsvector;") + node.safe_psql('postgres', 'vacuum t_heap') + + # Take PTRACK backup to clean every ptrack + backup_id = self.backup_node( + backup_dir, 'node', node, backup_type='ptrack', options=['-j10', '--stream']) + + node.safe_psql('postgres', 'checkpoint') + + for i in idx_ptrack: + # get new size of heap and indexes and calculate it in pages + idx_ptrack[i]['size'] = self.get_fork_size(node, i) + # update path to heap and index files in case they`ve changed + idx_ptrack[i]['path'] = self.get_fork_path(node, i) + # # get ptrack for every idx + idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork( + node, idx_ptrack[i]['path'], [idx_ptrack[i]['size']]) + # check that ptrack bits are cleaned + self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size']) + + # Update everything and vacuum it + node.safe_psql( + 'postgres', + "update t_heap set t_seq = nextval('t_seq'), " + "text = md5(text), " + "tsvector = md5(repeat(tsvector::text, 10))::tsvector;") + node.safe_psql('postgres', 'vacuum t_heap') + + # Take PAGE backup to clean every ptrack + self.backup_node( + backup_dir, 'node', node, + backup_type='page', options=['-j10', '--stream']) + node.safe_psql('postgres', 'checkpoint') + + for i in idx_ptrack: + # get new size of heap and indexes and calculate it in pages + idx_ptrack[i]['size'] = self.get_fork_size(node, i) + # update path to heap and index files in case they`ve changed + idx_ptrack[i]['path'] = self.get_fork_path(node, i) + # # get ptrack for every idx + idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork( + node, idx_ptrack[i]['path'], [idx_ptrack[i]['size']]) + # check that ptrack bits are cleaned + self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size']) + + @unittest.skip("skip") + def test_ptrack_clean_replica(self): + """ + Take backups of every available types from + master and check that PTRACK on replica is clean. + Relevant only for PTRACK 1.x + """ + master = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'master'), + set_replication=True, + ptrack_enable=True, + initdb_params=['--data-checksums'], + pg_options={ + 'archive_timeout': '30s'}) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'master', master) + master.slow_start() + + self.backup_node(backup_dir, 'master', master, options=['--stream']) + + replica = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica.cleanup() + + self.restore_node(backup_dir, 'master', replica) + + self.add_instance(backup_dir, 'replica', replica) + self.set_replica(master, replica, synchronous=True) + replica.slow_start(replica=True) + + # Create table and indexes + master.safe_psql( + "postgres", + "create extension bloom; create sequence t_seq; " + "create table t_heap as select i as id, " + "nextval('t_seq') as t_seq, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,2560) i") + for i in idx_ptrack: + if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq': + master.safe_psql( + "postgres", + "create index {0} on {1} using {2}({3})".format( + i, idx_ptrack[i]['relation'], + idx_ptrack[i]['type'], + idx_ptrack[i]['column'])) + + # Take FULL backup to clean every ptrack + self.backup_node( + backup_dir, + 'replica', + replica, + options=[ + '-j10', '--stream', + '--master-host=localhost', + '--master-db=postgres', + '--master-port={0}'.format(master.port)]) + master.safe_psql('postgres', 'checkpoint') + + for i in idx_ptrack: + # get fork size and calculate it in pages + idx_ptrack[i]['size'] = self.get_fork_size(replica, i) + # get path to heap and index files + idx_ptrack[i]['path'] = self.get_fork_path(replica, i) + # get ptrack for every idx + idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork( + replica, idx_ptrack[i]['path'], [idx_ptrack[i]['size']]) + self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size']) + + # Update everything and vacuum it + master.safe_psql( + 'postgres', + "update t_heap set t_seq = nextval('t_seq'), " + "text = md5(text), " + "tsvector = md5(repeat(tsvector::text, 10))::tsvector;") + master.safe_psql('postgres', 'vacuum t_heap') + + # Take PTRACK backup to clean every ptrack + backup_id = self.backup_node( + backup_dir, + 'replica', + replica, + backup_type='ptrack', + options=[ + '-j10', '--stream', + '--master-host=localhost', + '--master-db=postgres', + '--master-port={0}'.format(master.port)]) + master.safe_psql('postgres', 'checkpoint') + + for i in idx_ptrack: + # get new size of heap and indexes and calculate it in pages + idx_ptrack[i]['size'] = self.get_fork_size(replica, i) + # update path to heap and index files in case they`ve changed + idx_ptrack[i]['path'] = self.get_fork_path(replica, i) + # # get ptrack for every idx + idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork( + replica, idx_ptrack[i]['path'], [idx_ptrack[i]['size']]) + # check that ptrack bits are cleaned + self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size']) + + # Update everything and vacuum it + master.safe_psql( + 'postgres', + "update t_heap set t_seq = nextval('t_seq'), text = md5(text), " + "tsvector = md5(repeat(tsvector::text, 10))::tsvector;") + master.safe_psql('postgres', 'vacuum t_heap') + master.safe_psql('postgres', 'checkpoint') + + # Take PAGE backup to clean every ptrack + self.backup_node( + backup_dir, + 'replica', + replica, + backup_type='page', + options=[ + '-j10', '--master-host=localhost', + '--master-db=postgres', + '--master-port={0}'.format(master.port), + '--stream']) + master.safe_psql('postgres', 'checkpoint') + + for i in idx_ptrack: + # get new size of heap and indexes and calculate it in pages + idx_ptrack[i]['size'] = self.get_fork_size(replica, i) + # update path to heap and index files in case they`ve changed + idx_ptrack[i]['path'] = self.get_fork_path(replica, i) + # # get ptrack for every idx + idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork( + replica, idx_ptrack[i]['path'], [idx_ptrack[i]['size']]) + # check that ptrack bits are cleaned + self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size']) + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_ptrack_cluster_on_btree(self): + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + ptrack_enable=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") + + self.create_tblspace_in_node(node, 'somedata') + + # Create table and indexes + node.safe_psql( + "postgres", + "create extension bloom; create sequence t_seq; " + "create table t_heap tablespace somedata " + "as select i as id, nextval('t_seq') as t_seq, " + "md5(i::text) as text, md5(repeat(i::text,10))::tsvector " + "as tsvector from generate_series(0,2560) i") + for i in idx_ptrack: + if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq': + node.safe_psql( + "postgres", + "create index {0} on {1} using {2}({3}) " + "tablespace somedata".format( + i, idx_ptrack[i]['relation'], + idx_ptrack[i]['type'], idx_ptrack[i]['column'])) + + node.safe_psql('postgres', 'vacuum t_heap') + node.safe_psql('postgres', 'checkpoint') + + if node.major_version < 11: + for i in idx_ptrack: + # get size of heap and indexes. size calculated in pages + idx_ptrack[i]['old_size'] = self.get_fork_size(node, i) + # get path to heap and index files + idx_ptrack[i]['path'] = self.get_fork_path(node, i) + # calculate md5sums of pages + idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork( + idx_ptrack[i]['path'], idx_ptrack[i]['old_size']) + + self.backup_node( + backup_dir, 'node', node, options=['-j10', '--stream']) + + node.safe_psql('postgres', 'delete from t_heap where id%2 = 1') + node.safe_psql('postgres', 'cluster t_heap using t_btree') + node.safe_psql('postgres', 'checkpoint') + + # CHECK PTRACK SANITY + if node.major_version < 11: + self.check_ptrack_map_sanity(node, idx_ptrack) + + # @unittest.skip("skip") + def test_ptrack_cluster_on_gist(self): + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + ptrack_enable=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") + + # Create table and indexes + node.safe_psql( + "postgres", + "create extension bloom; create sequence t_seq; " + "create table t_heap as select i as id, " + "nextval('t_seq') as t_seq, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,2560) i") + for i in idx_ptrack: + if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq': + node.safe_psql( + "postgres", + "create index {0} on {1} using {2}({3})".format( + i, idx_ptrack[i]['relation'], + idx_ptrack[i]['type'], idx_ptrack[i]['column'])) + + node.safe_psql('postgres', 'vacuum t_heap') + node.safe_psql('postgres', 'checkpoint') + + for i in idx_ptrack: + # get size of heap and indexes. size calculated in pages + idx_ptrack[i]['old_size'] = self.get_fork_size(node, i) + # get path to heap and index files + idx_ptrack[i]['path'] = self.get_fork_path(node, i) + # calculate md5sums of pages + idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork( + idx_ptrack[i]['path'], idx_ptrack[i]['old_size']) + + self.backup_node( + backup_dir, 'node', node, options=['-j10', '--stream']) + + node.safe_psql('postgres', 'delete from t_heap where id%2 = 1') + node.safe_psql('postgres', 'cluster t_heap using t_gist') + node.safe_psql('postgres', 'checkpoint') + + # CHECK PTRACK SANITY + if node.major_version < 11: + self.check_ptrack_map_sanity(node, idx_ptrack) + + self.backup_node( + backup_dir, 'node', node, + backup_type='ptrack', options=['-j10', '--stream']) + + pgdata = self.pgdata_content(node.data_dir) + node.cleanup() + + self.restore_node(backup_dir, 'node', node) + + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + def test_ptrack_cluster_on_btree_replica(self): + master = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'master'), + set_replication=True, + ptrack_enable=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'master', master) + master.slow_start() + + if master.major_version >= 11: + master.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") + + self.backup_node(backup_dir, 'master', master, options=['--stream']) + + replica = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica.cleanup() + + self.restore_node(backup_dir, 'master', replica) + + self.add_instance(backup_dir, 'replica', replica) + self.set_replica(master, replica, synchronous=True) + replica.slow_start(replica=True) + + # Create table and indexes + master.safe_psql( + "postgres", + "create extension bloom; create sequence t_seq; " + "create table t_heap as select i as id, " + "nextval('t_seq') as t_seq, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,2560) i") + + for i in idx_ptrack: + if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq': + master.safe_psql( + "postgres", + "create index {0} on {1} using {2}({3})".format( + i, idx_ptrack[i]['relation'], + idx_ptrack[i]['type'], + idx_ptrack[i]['column'])) + + master.safe_psql('postgres', 'vacuum t_heap') + master.safe_psql('postgres', 'checkpoint') + + self.backup_node( + backup_dir, 'replica', replica, options=[ + '-j10', '--stream', '--master-host=localhost', + '--master-db=postgres', '--master-port={0}'.format( + master.port)]) + + for i in idx_ptrack: + # get size of heap and indexes. size calculated in pages + idx_ptrack[i]['old_size'] = self.get_fork_size(replica, i) + # get path to heap and index files + idx_ptrack[i]['path'] = self.get_fork_path(replica, i) + # calculate md5sums of pages + idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork( + idx_ptrack[i]['path'], idx_ptrack[i]['old_size']) + + master.safe_psql('postgres', 'delete from t_heap where id%2 = 1') + master.safe_psql('postgres', 'cluster t_heap using t_btree') + master.safe_psql('postgres', 'checkpoint') + + # Sync master and replica + self.wait_until_replica_catch_with_master(master, replica) + replica.safe_psql('postgres', 'checkpoint') + + # CHECK PTRACK SANITY + if master.major_version < 11: + self.check_ptrack_map_sanity(replica, idx_ptrack) + + self.backup_node( + backup_dir, 'replica', replica, + backup_type='ptrack', options=['-j10', '--stream']) + + pgdata = self.pgdata_content(replica.data_dir) + + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node')) + node.cleanup() + + self.restore_node(backup_dir, 'replica', node) + + pgdata_restored = self.pgdata_content(replica.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + def test_ptrack_cluster_on_gist_replica(self): + master = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'master'), + set_replication=True, + ptrack_enable=True) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'master', master) + master.slow_start() + + if master.major_version >= 11: + master.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") + + self.backup_node(backup_dir, 'master', master, options=['--stream']) + + replica = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica.cleanup() + + self.restore_node(backup_dir, 'master', replica) + + self.add_instance(backup_dir, 'replica', replica) + self.set_replica(master, replica, 'replica', synchronous=True) + replica.slow_start(replica=True) + + # Create table and indexes + master.safe_psql( + "postgres", + "create extension bloom; create sequence t_seq; " + "create table t_heap as select i as id, " + "nextval('t_seq') as t_seq, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,2560) i") + + for i in idx_ptrack: + if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq': + master.safe_psql( + "postgres", + "create index {0} on {1} using {2}({3})".format( + i, idx_ptrack[i]['relation'], + idx_ptrack[i]['type'], + idx_ptrack[i]['column'])) + + master.safe_psql('postgres', 'vacuum t_heap') + master.safe_psql('postgres', 'checkpoint') + + # Sync master and replica + self.wait_until_replica_catch_with_master(master, replica) + replica.safe_psql('postgres', 'checkpoint') + + self.backup_node( + backup_dir, 'replica', replica, options=[ + '-j10', '--stream', '--master-host=localhost', + '--master-db=postgres', '--master-port={0}'.format( + master.port)]) + + for i in idx_ptrack: + # get size of heap and indexes. size calculated in pages + idx_ptrack[i]['old_size'] = self.get_fork_size(replica, i) + # get path to heap and index files + idx_ptrack[i]['path'] = self.get_fork_path(replica, i) + # calculate md5sums of pages + idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork( + idx_ptrack[i]['path'], idx_ptrack[i]['old_size']) + + master.safe_psql('postgres', 'DELETE FROM t_heap WHERE id%2 = 1') + master.safe_psql('postgres', 'CLUSTER t_heap USING t_gist') + + if master.major_version < 11: + master.safe_psql('postgres', 'CHECKPOINT') + + # Sync master and replica + self.wait_until_replica_catch_with_master(master, replica) + + if master.major_version < 11: + replica.safe_psql('postgres', 'CHECKPOINT') + self.check_ptrack_map_sanity(replica, idx_ptrack) + + self.backup_node( + backup_dir, 'replica', replica, + backup_type='ptrack', options=['-j10', '--stream']) + + if self.paranoia: + pgdata = self.pgdata_content(replica.data_dir) + + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node')) + node.cleanup() + + self.restore_node(backup_dir, 'replica', node) + + if self.paranoia: + pgdata_restored = self.pgdata_content(replica.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_ptrack_empty(self): + """Take backups of every available types and check that PTRACK is clean""" + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + ptrack_enable=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") + + self.create_tblspace_in_node(node, 'somedata') + + # Create table + node.safe_psql( + "postgres", + "create extension bloom; create sequence t_seq; " + "create table t_heap " + "(id int DEFAULT nextval('t_seq'), text text, tsvector tsvector) " + "tablespace somedata") + + # Take FULL backup to clean every ptrack + self.backup_node( + backup_dir, 'node', node, + options=['-j10', '--stream']) + + # Create indexes + for i in idx_ptrack: + if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq': + node.safe_psql( + "postgres", + "create index {0} on {1} using {2}({3}) " + "tablespace somedata".format( + i, idx_ptrack[i]['relation'], + idx_ptrack[i]['type'], + idx_ptrack[i]['column'])) + + node.safe_psql('postgres', 'checkpoint') + + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored.cleanup() + + tblspace1 = self.get_tblspace_path(node, 'somedata') + tblspace2 = self.get_tblspace_path(node_restored, 'somedata') + + # Take PTRACK backup + backup_id = self.backup_node( + backup_dir, 'node', node, backup_type='ptrack', + options=['-j10', '--stream']) + + if self.paranoia: + pgdata = self.pgdata_content(node.data_dir) + + self.restore_node( + backup_dir, 'node', node_restored, + backup_id=backup_id, + options=[ + "-j", "4", + "-T{0}={1}".format(tblspace1, tblspace2)]) + + if self.paranoia: + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_ptrack_empty_replica(self): + """ + Take backups of every available types from master + and check that PTRACK on replica is clean + """ + master = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'master'), + set_replication=True, + initdb_params=['--data-checksums'], + ptrack_enable=True) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'master', master) + master.slow_start() + + if master.major_version >= 11: + master.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") + + self.backup_node(backup_dir, 'master', master, options=['--stream']) + + replica = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica.cleanup() + + self.restore_node(backup_dir, 'master', replica) + + self.add_instance(backup_dir, 'replica', replica) + self.set_replica(master, replica, synchronous=True) + replica.slow_start(replica=True) + + # Create table + master.safe_psql( + "postgres", + "create extension bloom; create sequence t_seq; " + "create table t_heap " + "(id int DEFAULT nextval('t_seq'), text text, tsvector tsvector)") + self.wait_until_replica_catch_with_master(master, replica) + + # Take FULL backup + self.backup_node( + backup_dir, + 'replica', + replica, + options=[ + '-j10', '--stream', + '--master-host=localhost', + '--master-db=postgres', + '--master-port={0}'.format(master.port)]) + + # Create indexes + for i in idx_ptrack: + if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq': + master.safe_psql( + "postgres", + "create index {0} on {1} using {2}({3})".format( + i, idx_ptrack[i]['relation'], + idx_ptrack[i]['type'], + idx_ptrack[i]['column'])) + + self.wait_until_replica_catch_with_master(master, replica) + + # Take PTRACK backup + backup_id = self.backup_node( + backup_dir, + 'replica', + replica, + backup_type='ptrack', + options=[ + '-j1', '--stream', + '--master-host=localhost', + '--master-db=postgres', + '--master-port={0}'.format(master.port)]) + + if self.paranoia: + pgdata = self.pgdata_content(replica.data_dir) + + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored.cleanup() + + self.restore_node( + backup_dir, 'replica', node_restored, + backup_id=backup_id, options=["-j", "4"]) + + if self.paranoia: + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_ptrack_truncate(self): + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + ptrack_enable=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") + + self.create_tblspace_in_node(node, 'somedata') + + # Create table and indexes + node.safe_psql( + "postgres", + "create extension bloom; create sequence t_seq; " + "create table t_heap tablespace somedata " + "as select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,2560) i") + + if node.major_version < 11: + for i in idx_ptrack: + if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq': + node.safe_psql( + "postgres", + "create index {0} on {1} using {2}({3}) " + "tablespace somedata".format( + i, idx_ptrack[i]['relation'], + idx_ptrack[i]['type'], idx_ptrack[i]['column'])) + + self.backup_node( + backup_dir, 'node', node, options=['--stream']) + + node.safe_psql('postgres', 'truncate t_heap') + node.safe_psql('postgres', 'checkpoint') + + if node.major_version < 11: + for i in idx_ptrack: + # get fork size and calculate it in pages + idx_ptrack[i]['old_size'] = self.get_fork_size(node, i) + # get path to heap and index files + idx_ptrack[i]['path'] = self.get_fork_path(node, i) + # calculate md5sums for every page of this fork + idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork( + idx_ptrack[i]['path'], idx_ptrack[i]['old_size']) + + # Make backup to clean every ptrack + self.backup_node( + backup_dir, 'node', node, + backup_type='ptrack', options=['-j10', '--stream']) + + pgdata = self.pgdata_content(node.data_dir) + + if node.major_version < 11: + for i in idx_ptrack: + idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork( + node, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size']]) + self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['old_size']) + + node.cleanup() + shutil.rmtree( + self.get_tblspace_path(node, 'somedata'), + ignore_errors=True) + + self.restore_node(backup_dir, 'node', node) + + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + def test_basic_ptrack_truncate_replica(self): + master = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'master'), + set_replication=True, + ptrack_enable=True, + initdb_params=['--data-checksums'], + pg_options={ + 'max_wal_size': '32MB', + 'archive_timeout': '10s', + 'checkpoint_timeout': '5min'}) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'master', master) + master.slow_start() + + if master.major_version >= 11: + master.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") + + self.backup_node(backup_dir, 'master', master, options=['--stream']) + + replica = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica.cleanup() + + self.restore_node(backup_dir, 'master', replica) + + self.add_instance(backup_dir, 'replica', replica) + self.set_replica(master, replica, 'replica', synchronous=True) + replica.slow_start(replica=True) + + # Create table and indexes + master.safe_psql( + "postgres", + "create extension bloom; create sequence t_seq; " + "create table t_heap " + "as select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,2560) i") + + for i in idx_ptrack: + if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq': + master.safe_psql( + "postgres", + "create index {0} on {1} using {2}({3}) ".format( + i, idx_ptrack[i]['relation'], + idx_ptrack[i]['type'], idx_ptrack[i]['column'])) + + # Sync master and replica + self.wait_until_replica_catch_with_master(master, replica) + replica.safe_psql('postgres', 'checkpoint') + + if replica.major_version < 11: + for i in idx_ptrack: + # get fork size and calculate it in pages + idx_ptrack[i]['old_size'] = self.get_fork_size(replica, i) + # get path to heap and index files + idx_ptrack[i]['path'] = self.get_fork_path(replica, i) + # calculate md5sums for every page of this fork + idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork( + idx_ptrack[i]['path'], idx_ptrack[i]['old_size']) + + # Make backup to clean every ptrack + self.backup_node( + backup_dir, 'replica', replica, + options=[ + '-j10', + '--stream', + '--master-host=localhost', + '--master-db=postgres', + '--master-port={0}'.format(master.port)]) + + if replica.major_version < 11: + for i in idx_ptrack: + idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork( + replica, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size']]) + self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['old_size']) + + master.safe_psql('postgres', 'truncate t_heap') + + # Sync master and replica + self.wait_until_replica_catch_with_master(master, replica) + + if replica.major_version < 10: + replica.safe_psql( + "postgres", + "select pg_xlog_replay_pause()") + else: + replica.safe_psql( + "postgres", + "select pg_wal_replay_pause()") + + self.backup_node( + backup_dir, 'replica', replica, backup_type='ptrack', + options=[ + '-j10', + '--stream', + '--master-host=localhost', + '--master-db=postgres', + '--master-port={0}'.format(master.port)]) + + pgdata = self.pgdata_content(replica.data_dir) + + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node')) + node.cleanup() + + self.restore_node(backup_dir, 'replica', node, data_dir=node.data_dir) + + pgdata_restored = self.pgdata_content(node.data_dir) + + if self.paranoia: + self.compare_pgdata(pgdata, pgdata_restored) + + self.set_auto_conf(node, {'port': node.port}) + + node.slow_start() + + node.safe_psql( + 'postgres', + 'select 1') + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_ptrack_vacuum(self): + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + ptrack_enable=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") + + self.create_tblspace_in_node(node, 'somedata') + + # Create table and indexes + node.safe_psql( + "postgres", + "create extension bloom; create sequence t_seq; " + "create table t_heap tablespace somedata " + "as select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,2560) i") + for i in idx_ptrack: + if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq': + node.safe_psql( + "postgres", + "create index {0} on {1} using {2}({3}) " + "tablespace somedata".format( + i, idx_ptrack[i]['relation'], + idx_ptrack[i]['type'], + idx_ptrack[i]['column'])) + + comparision_exclusion = self.get_known_bugs_comparision_exclusion_dict(node) + + node.safe_psql('postgres', 'vacuum t_heap') + node.safe_psql('postgres', 'checkpoint') + + # Make full backup to clean every ptrack + self.backup_node( + backup_dir, 'node', node, options=['-j10', '--stream']) + + if node.major_version < 11: + for i in idx_ptrack: + # get fork size and calculate it in pages + idx_ptrack[i]['old_size'] = self.get_fork_size(node, i) + # get path to heap and index files + idx_ptrack[i]['path'] = self.get_fork_path(node, i) + # calculate md5sums for every page of this fork + idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork( + idx_ptrack[i]['path'], idx_ptrack[i]['old_size']) + idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork( + node, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size']]) + self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['old_size']) + + # Delete some rows, vacuum it and make checkpoint + node.safe_psql('postgres', 'delete from t_heap where id%2 = 1') + node.safe_psql('postgres', 'vacuum t_heap') + node.safe_psql('postgres', 'checkpoint') + + # CHECK PTRACK SANITY + if node.major_version < 11: + self.check_ptrack_map_sanity(node, idx_ptrack) + + self.backup_node( + backup_dir, 'node', node, + backup_type='ptrack', options=['-j10', '--stream']) + + pgdata = self.pgdata_content(node.data_dir) + node.cleanup() + + shutil.rmtree( + self.get_tblspace_path(node, 'somedata'), + ignore_errors=True) + + self.restore_node(backup_dir, 'node', node) + + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata, pgdata_restored, comparision_exclusion) + + # @unittest.skip("skip") + def test_ptrack_vacuum_replica(self): + master = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'master'), + set_replication=True, + ptrack_enable=True, + initdb_params=['--data-checksums'], + pg_options={ + 'checkpoint_timeout': '30'}) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'master', master) + master.slow_start() + + if master.major_version >= 11: + master.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") + + self.backup_node(backup_dir, 'master', master, options=['--stream']) + + replica = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica.cleanup() + + self.restore_node(backup_dir, 'master', replica) + + self.add_instance(backup_dir, 'replica', replica) + self.set_replica(master, replica, 'replica', synchronous=True) + replica.slow_start(replica=True) + + # Create table and indexes + master.safe_psql( + "postgres", + "create extension bloom; create sequence t_seq; " + "create table t_heap as select i as id, " + "md5(i::text) as text, md5(repeat(i::text,10))::tsvector " + "as tsvector from generate_series(0,2560) i") + + for i in idx_ptrack: + if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq': + master.safe_psql( + "postgres", + "create index {0} on {1} using {2}({3})".format( + i, idx_ptrack[i]['relation'], + idx_ptrack[i]['type'], idx_ptrack[i]['column'])) + + master.safe_psql('postgres', 'vacuum t_heap') + master.safe_psql('postgres', 'checkpoint') + + # Sync master and replica + self.wait_until_replica_catch_with_master(master, replica) + replica.safe_psql('postgres', 'checkpoint') + + # Make FULL backup to clean every ptrack + self.backup_node( + backup_dir, 'replica', replica, options=[ + '-j10', '--master-host=localhost', + '--master-db=postgres', + '--master-port={0}'.format(master.port), + '--stream']) + + if replica.major_version < 11: + for i in idx_ptrack: + # get fork size and calculate it in pages + idx_ptrack[i]['old_size'] = self.get_fork_size(replica, i) + # get path to heap and index files + idx_ptrack[i]['path'] = self.get_fork_path(replica, i) + # calculate md5sums for every page of this fork + idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork( + idx_ptrack[i]['path'], idx_ptrack[i]['old_size']) + idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork( + replica, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size']]) + self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['old_size']) + + # Delete some rows, vacuum it and make checkpoint + master.safe_psql('postgres', 'delete from t_heap where id%2 = 1') + master.safe_psql('postgres', 'vacuum t_heap') + master.safe_psql('postgres', 'checkpoint') + + # Sync master and replica + self.wait_until_replica_catch_with_master(master, replica) + replica.safe_psql('postgres', 'checkpoint') + + # CHECK PTRACK SANITY + if replica.major_version < 11: + self.check_ptrack_map_sanity(master, idx_ptrack) + + self.backup_node( + backup_dir, 'replica', replica, + backup_type='ptrack', options=['-j10', '--stream']) + + pgdata = self.pgdata_content(replica.data_dir) + + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node')) + node.cleanup() + + self.restore_node(backup_dir, 'replica', node, data_dir=node.data_dir) + + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_ptrack_vacuum_bits_frozen(self): + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + ptrack_enable=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") + + self.create_tblspace_in_node(node, 'somedata') + + # Create table and indexes + res = node.safe_psql( + "postgres", + "create extension bloom; create sequence t_seq; " + "create table t_heap tablespace somedata " + "as select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,2560) i") + for i in idx_ptrack: + if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq': + node.safe_psql( + "postgres", + "create index {0} on {1} using {2}({3}) " + "tablespace somedata".format( + i, idx_ptrack[i]['relation'], + idx_ptrack[i]['type'], + idx_ptrack[i]['column'])) + + comparision_exclusion = self.get_known_bugs_comparision_exclusion_dict(node) + node.safe_psql('postgres', 'checkpoint') + + self.backup_node( + backup_dir, 'node', node, options=['-j10', '--stream']) + + node.safe_psql('postgres', 'vacuum freeze t_heap') + node.safe_psql('postgres', 'checkpoint') + + if node.major_version < 11: + for i in idx_ptrack: + # get size of heap and indexes. size calculated in pages + idx_ptrack[i]['old_size'] = self.get_fork_size(node, i) + # get path to heap and index files + idx_ptrack[i]['path'] = self.get_fork_path(node, i) + # calculate md5sums of pages + idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork( + idx_ptrack[i]['path'], idx_ptrack[i]['old_size']) + + # CHECK PTRACK SANITY + if node.major_version < 11: + self.check_ptrack_map_sanity(node, idx_ptrack) + + self.backup_node( + backup_dir, 'node', node, + backup_type='ptrack', options=['-j10', '--stream']) + + pgdata = self.pgdata_content(node.data_dir) + node.cleanup() + shutil.rmtree( + self.get_tblspace_path(node, 'somedata'), + ignore_errors=True) + + self.restore_node(backup_dir, 'node', node) + + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata, pgdata_restored, comparision_exclusion) + + # @unittest.skip("skip") + def test_ptrack_vacuum_bits_frozen_replica(self): + master = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'master'), + set_replication=True, + ptrack_enable=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'master', master) + master.slow_start() + + if master.major_version >= 11: + master.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") + + self.backup_node(backup_dir, 'master', master, options=['--stream']) + + replica = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica.cleanup() + + self.restore_node(backup_dir, 'master', replica) + + self.add_instance(backup_dir, 'replica', replica) + self.set_replica(master, replica, synchronous=True) + replica.slow_start(replica=True) + + # Create table and indexes + master.safe_psql( + "postgres", + "create extension bloom; create sequence t_seq; " + "create table t_heap as select i as id, " + "md5(i::text) as text, md5(repeat(i::text,10))::tsvector " + "as tsvector from generate_series(0,2560) i") + for i in idx_ptrack: + if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq': + master.safe_psql( + "postgres", + "create index {0} on {1} using {2}({3})".format( + i, idx_ptrack[i]['relation'], + idx_ptrack[i]['type'], + idx_ptrack[i]['column'])) + + master.safe_psql('postgres', 'checkpoint') + + # Sync master and replica + self.wait_until_replica_catch_with_master(master, replica) + replica.safe_psql('postgres', 'checkpoint') + + # Take backup to clean every ptrack + self.backup_node( + backup_dir, 'replica', replica, + options=[ + '-j10', + '--master-host=localhost', + '--master-db=postgres', + '--master-port={0}'.format(master.port), + '--stream']) + + if replica.major_version < 11: + for i in idx_ptrack: + # get size of heap and indexes. size calculated in pages + idx_ptrack[i]['old_size'] = self.get_fork_size(replica, i) + # get path to heap and index files + idx_ptrack[i]['path'] = self.get_fork_path(replica, i) + # calculate md5sums of pages + idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork( + idx_ptrack[i]['path'], idx_ptrack[i]['old_size']) + + master.safe_psql('postgres', 'vacuum freeze t_heap') + master.safe_psql('postgres', 'checkpoint') + + # Sync master and replica + self.wait_until_replica_catch_with_master(master, replica) + replica.safe_psql('postgres', 'checkpoint') + + # CHECK PTRACK SANITY + if replica.major_version < 11: + self.check_ptrack_map_sanity(master, idx_ptrack) + + self.backup_node( + backup_dir, 'replica', replica, backup_type='ptrack', + options=['-j10', '--stream']) + + pgdata = self.pgdata_content(replica.data_dir) + replica.cleanup() + + self.restore_node(backup_dir, 'replica', replica) + + pgdata_restored = self.pgdata_content(replica.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_ptrack_vacuum_bits_visibility(self): + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + ptrack_enable=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") + + self.create_tblspace_in_node(node, 'somedata') + + # Create table and indexes + res = node.safe_psql( + "postgres", + "create extension bloom; create sequence t_seq; " + "create table t_heap tablespace somedata " + "as select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,2560) i") + + for i in idx_ptrack: + if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq': + node.safe_psql( + "postgres", + "create index {0} on {1} using {2}({3}) " + "tablespace somedata".format( + i, idx_ptrack[i]['relation'], + idx_ptrack[i]['type'], idx_ptrack[i]['column'])) + + comparision_exclusion = self.get_known_bugs_comparision_exclusion_dict(node) + node.safe_psql('postgres', 'checkpoint') + + self.backup_node( + backup_dir, 'node', node, options=['-j10', '--stream']) + + if node.major_version < 11: + for i in idx_ptrack: + # get size of heap and indexes. size calculated in pages + idx_ptrack[i]['old_size'] = self.get_fork_size(node, i) + # get path to heap and index files + idx_ptrack[i]['path'] = self.get_fork_path(node, i) + # calculate md5sums of pages + idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork( + idx_ptrack[i]['path'], idx_ptrack[i]['old_size']) + + node.safe_psql('postgres', 'vacuum t_heap') + node.safe_psql('postgres', 'checkpoint') + + # CHECK PTRACK SANITY + if node.major_version < 11: + self.check_ptrack_map_sanity(node, idx_ptrack) + + self.backup_node( + backup_dir, 'node', node, + backup_type='ptrack', options=['-j10', '--stream']) + + pgdata = self.pgdata_content(node.data_dir) + node.cleanup() + shutil.rmtree( + self.get_tblspace_path(node, 'somedata'), + ignore_errors=True) + + self.restore_node(backup_dir, 'node', node) + + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata, pgdata_restored, comparision_exclusion) + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_ptrack_vacuum_full_2(self): + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + ptrack_enable=True, + pg_options={ 'wal_log_hints': 'on' }) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") + + self.create_tblspace_in_node(node, 'somedata') + + # Create table and indexes + res = node.safe_psql( + "postgres", + "create extension bloom; create sequence t_seq; " + "create table t_heap tablespace somedata " + "as select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,2560) i") + for i in idx_ptrack: + if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq': + node.safe_psql( + "postgres", "create index {0} on {1} " + "using {2}({3}) tablespace somedata".format( + i, idx_ptrack[i]['relation'], + idx_ptrack[i]['type'], idx_ptrack[i]['column'])) + + node.safe_psql('postgres', 'vacuum t_heap') + node.safe_psql('postgres', 'checkpoint') + + self.backup_node( + backup_dir, 'node', node, options=['-j10', '--stream']) + + if node.major_version < 11: + for i in idx_ptrack: + # get size of heap and indexes. size calculated in pages + idx_ptrack[i]['old_size'] = self.get_fork_size(node, i) + # get path to heap and index files + idx_ptrack[i]['path'] = self.get_fork_path(node, i) + # calculate md5sums of pages + idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork( + idx_ptrack[i]['path'], idx_ptrack[i]['old_size']) + + node.safe_psql('postgres', 'delete from t_heap where id%2 = 1') + node.safe_psql('postgres', 'vacuum full t_heap') + node.safe_psql('postgres', 'checkpoint') + + if node.major_version < 11: + self.check_ptrack_map_sanity(node, idx_ptrack) + + self.backup_node( + backup_dir, 'node', node, + backup_type='ptrack', options=['-j10', '--stream']) + + pgdata = self.pgdata_content(node.data_dir) + node.cleanup() + + shutil.rmtree( + self.get_tblspace_path(node, 'somedata'), + ignore_errors=True) + + self.restore_node(backup_dir, 'node', node) + + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_ptrack_vacuum_full_replica(self): + master = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'master'), + set_replication=True, + ptrack_enable=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'master', master) + master.slow_start() + + if master.major_version >= 11: + master.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") + + self.backup_node(backup_dir, 'master', master, options=['--stream']) + replica = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica.cleanup() + + self.restore_node(backup_dir, 'master', replica) + + self.add_instance(backup_dir, 'replica', replica) + self.set_replica(master, replica, 'replica', synchronous=True) + replica.slow_start(replica=True) + + # Create table and indexes + master.safe_psql( + "postgres", + "create extension bloom; create sequence t_seq; " + "create table t_heap as select i as id, " + "md5(i::text) as text, md5(repeat(i::text,10))::tsvector as " + "tsvector from generate_series(0,256000) i") + + if master.major_version < 11: + for i in idx_ptrack: + if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq': + master.safe_psql( + "postgres", + "create index {0} on {1} using {2}({3})".format( + i, idx_ptrack[i]['relation'], + idx_ptrack[i]['type'], + idx_ptrack[i]['column'])) + + master.safe_psql('postgres', 'vacuum t_heap') + master.safe_psql('postgres', 'checkpoint') + + # Sync master and replica + self.wait_until_replica_catch_with_master(master, replica) + replica.safe_psql('postgres', 'checkpoint') + + # Take FULL backup to clean every ptrack + self.backup_node( + backup_dir, 'replica', replica, + options=[ + '-j10', + '--master-host=localhost', + '--master-db=postgres', + '--master-port={0}'.format(master.port), + '--stream']) + + if replica.major_version < 11: + for i in idx_ptrack: + # get size of heap and indexes. size calculated in pages + idx_ptrack[i]['old_size'] = self.get_fork_size(replica, i) + # get path to heap and index files + idx_ptrack[i]['path'] = self.get_fork_path(replica, i) + # calculate md5sums of pages + idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork( + idx_ptrack[i]['path'], idx_ptrack[i]['old_size']) + + master.safe_psql('postgres', 'delete from t_heap where id%2 = 1') + master.safe_psql('postgres', 'vacuum full t_heap') + master.safe_psql('postgres', 'checkpoint') + + # Sync master and replica + self.wait_until_replica_catch_with_master(master, replica) + replica.safe_psql('postgres', 'checkpoint') + + if replica.major_version < 11: + self.check_ptrack_map_sanity(master, idx_ptrack) + + self.backup_node( + backup_dir, 'replica', replica, + backup_type='ptrack', options=['-j10', '--stream']) + + pgdata = self.pgdata_content(replica.data_dir) + replica.cleanup() + + self.restore_node(backup_dir, 'replica', replica) + + pgdata_restored = self.pgdata_content(replica.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_ptrack_vacuum_truncate_2(self): + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + ptrack_enable=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") + + # Create table and indexes + res = node.safe_psql( + "postgres", + "create extension bloom; create sequence t_seq; " + "create table t_heap " + "as select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,2560) i") + + if node.major_version < 11: + for i in idx_ptrack: + if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq': + node.safe_psql( + "postgres", "create index {0} on {1} using {2}({3})".format( + i, idx_ptrack[i]['relation'], + idx_ptrack[i]['type'], idx_ptrack[i]['column'])) + + node.safe_psql('postgres', 'VACUUM t_heap') + + self.backup_node( + backup_dir, 'node', node, options=['-j10', '--stream']) + + if node.major_version < 11: + for i in idx_ptrack: + # get size of heap and indexes. size calculated in pages + idx_ptrack[i]['old_size'] = self.get_fork_size(node, i) + # get path to heap and index files + idx_ptrack[i]['path'] = self.get_fork_path(node, i) + # calculate md5sums of pages + idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork( + idx_ptrack[i]['path'], idx_ptrack[i]['old_size']) + + node.safe_psql('postgres', 'DELETE FROM t_heap WHERE id > 128') + node.safe_psql('postgres', 'VACUUM t_heap') + node.safe_psql('postgres', 'CHECKPOINT') + + # CHECK PTRACK SANITY + if node.major_version < 11: + self.check_ptrack_map_sanity(node, idx_ptrack) + + self.backup_node( + backup_dir, 'node', node, + backup_type='ptrack', options=['--stream']) + + pgdata = self.pgdata_content(node.data_dir) + + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored.cleanup() + + self.restore_node(backup_dir, 'node', node_restored) + + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_ptrack_vacuum_truncate_replica(self): + master = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'master'), + set_replication=True, + ptrack_enable=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'master', master) + master.slow_start() + + if master.major_version >= 11: + master.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") + + self.backup_node(backup_dir, 'master', master, options=['--stream']) + + replica = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica.cleanup() + + self.restore_node(backup_dir, 'master', replica) + + self.add_instance(backup_dir, 'replica', replica) + self.set_replica(master, replica, 'replica', synchronous=True) + replica.slow_start(replica=True) + + # Create table and indexes + master.safe_psql( + "postgres", + "create extension bloom; create sequence t_seq; " + "create table t_heap as select i as id, " + "md5(i::text) as text, md5(repeat(i::text,10))::tsvector " + "as tsvector from generate_series(0,2560) i") + + for i in idx_ptrack: + if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq': + master.safe_psql( + "postgres", "create index {0} on {1} " + "using {2}({3})".format( + i, idx_ptrack[i]['relation'], + idx_ptrack[i]['type'], idx_ptrack[i]['column'])) + + master.safe_psql('postgres', 'vacuum t_heap') + master.safe_psql('postgres', 'checkpoint') + + # Take FULL backup to clean every ptrack + self.backup_node( + backup_dir, 'replica', replica, + options=[ + '-j10', + '--stream', + '--master-host=localhost', + '--master-db=postgres', + '--master-port={0}'.format(master.port) + ] + ) + + if master.major_version < 11: + for i in idx_ptrack: + # get size of heap and indexes. size calculated in pages + idx_ptrack[i]['old_size'] = self.get_fork_size(replica, i) + # get path to heap and index files + idx_ptrack[i]['path'] = self.get_fork_path(replica, i) + # calculate md5sums of pages + idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork( + idx_ptrack[i]['path'], idx_ptrack[i]['old_size']) + + master.safe_psql('postgres', 'DELETE FROM t_heap WHERE id > 128;') + master.safe_psql('postgres', 'VACUUM t_heap') + master.safe_psql('postgres', 'CHECKPOINT') + + # Sync master and replica + self.wait_until_replica_catch_with_master(master, replica) + replica.safe_psql('postgres', 'CHECKPOINT') + + # CHECK PTRACK SANITY + if master.major_version < 11: + self.check_ptrack_map_sanity(master, idx_ptrack) + + self.backup_node( + backup_dir, 'replica', replica, backup_type='ptrack', + options=[ + '--stream', + '--log-level-file=INFO', + '--archive-timeout=30']) + + pgdata = self.pgdata_content(replica.data_dir) + + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored.cleanup() + + self.restore_node(backup_dir, 'replica', node_restored) + + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + @unittest.skip("skip") + def test_ptrack_recovery(self): + """ + Check that ptrack map contain correct bits after recovery. + Actual only for PTRACK 1.x + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + ptrack_enable=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + self.create_tblspace_in_node(node, 'somedata') + + # Create table + node.safe_psql( + "postgres", + "create extension bloom; create sequence t_seq; " + "create table t_heap tablespace somedata " + "as select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,2560) i") + + # Create indexes + for i in idx_ptrack: + if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq': + node.safe_psql( + "postgres", "create index {0} on {1} using {2}({3}) " + "tablespace somedata".format( + i, idx_ptrack[i]['relation'], + idx_ptrack[i]['type'], idx_ptrack[i]['column'])) + + # get size of heap and indexes. size calculated in pages + idx_ptrack[i]['size'] = int(self.get_fork_size(node, i)) + # get path to heap and index files + idx_ptrack[i]['path'] = self.get_fork_path(node, i) + + if self.verbose: + print('Killing postmaster. Losing Ptrack changes') + node.stop(['-m', 'immediate', '-D', node.data_dir]) + if not node.status(): + node.slow_start() + else: + print("Die! Die! Why won't you die?... Why won't you die?") + exit(1) + + for i in idx_ptrack: + # get ptrack for every idx + idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork( + node, idx_ptrack[i]['path'], [idx_ptrack[i]['size']]) + # check that ptrack has correct bits after recovery + self.check_ptrack_recovery(idx_ptrack[i]) + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_ptrack_recovery_1(self): + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + ptrack_enable=True, + initdb_params=['--data-checksums'], + pg_options={ + 'shared_buffers': '512MB', + 'max_wal_size': '3GB'}) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") + + # Create table + node.safe_psql( + "postgres", + "create extension bloom; create sequence t_seq; " + "create table t_heap " + "as select nextval('t_seq')::int as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " +# "from generate_series(0,25600) i") + "from generate_series(0,2560) i") + + self.backup_node( + backup_dir, 'node', node, options=['--stream']) + + # Create indexes + for i in idx_ptrack: + if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq': + node.safe_psql( + "postgres", + "CREATE INDEX {0} ON {1} USING {2}({3})".format( + i, idx_ptrack[i]['relation'], + idx_ptrack[i]['type'], idx_ptrack[i]['column'])) + + node.safe_psql( + 'postgres', + "update t_heap set id = nextval('t_seq'), text = md5(text), " + "tsvector = md5(repeat(tsvector::text, 10))::tsvector") + + node.safe_psql( + 'postgres', + "create extension pg_buffercache") + + #print(node.safe_psql( + # 'postgres', + # "SELECT count(*) FROM pg_buffercache WHERE isdirty")) + + if self.verbose: + print('Killing postmaster. Losing Ptrack changes') + node.stop(['-m', 'immediate', '-D', node.data_dir]) + + if not node.status(): + node.slow_start() + else: + print("Die! Die! Why won't you die?... Why won't you die?") + exit(1) + + self.backup_node( + backup_dir, 'node', node, + backup_type='ptrack', options=['--stream']) + + pgdata = self.pgdata_content(node.data_dir) + + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored.cleanup() + + self.restore_node( + backup_dir, 'node', node_restored) + + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_ptrack_zero_changes(self): + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + ptrack_enable=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") + + # Create table + node.safe_psql( + "postgres", + "create table t_heap " + "as select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,2560) i") + + self.backup_node( + backup_dir, 'node', node, options=['--stream']) + + self.backup_node( + backup_dir, 'node', node, + backup_type='ptrack', options=['--stream']) + + pgdata = self.pgdata_content(node.data_dir) + node.cleanup() + + self.restore_node(backup_dir, 'node', node) + + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_ptrack_pg_resetxlog(self): + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + ptrack_enable=True, + initdb_params=['--data-checksums'], + pg_options={ + 'shared_buffers': '512MB', + 'max_wal_size': '3GB'}) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") + + # Create table + node.safe_psql( + "postgres", + "create extension bloom; create sequence t_seq; " + "create table t_heap " + "as select nextval('t_seq')::int as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " +# "from generate_series(0,25600) i") + "from generate_series(0,2560) i") + + self.backup_node( + backup_dir, 'node', node, options=['--stream']) + + # Create indexes + for i in idx_ptrack: + if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq': + node.safe_psql( + "postgres", + "CREATE INDEX {0} ON {1} USING {2}({3})".format( + i, idx_ptrack[i]['relation'], + idx_ptrack[i]['type'], idx_ptrack[i]['column'])) + + node.safe_psql( + 'postgres', + "update t_heap set id = nextval('t_seq'), text = md5(text), " + "tsvector = md5(repeat(tsvector::text, 10))::tsvector") + +# node.safe_psql( +# 'postgres', +# "create extension pg_buffercache") +# +# print(node.safe_psql( +# 'postgres', +# "SELECT count(*) FROM pg_buffercache WHERE isdirty")) + + # kill the bastard + if self.verbose: + print('Killing postmaster. Losing Ptrack changes') + node.stop(['-m', 'immediate', '-D', node.data_dir]) + + # now smack it with sledgehammer + if node.major_version >= 10: + pg_resetxlog_path = self.get_bin_path('pg_resetwal') + wal_dir = 'pg_wal' + else: + pg_resetxlog_path = self.get_bin_path('pg_resetxlog') + wal_dir = 'pg_xlog' + + self.run_binary( + [ + pg_resetxlog_path, + '-D', + node.data_dir, + '-o 42', + '-f' + ], + asynchronous=False) + + if not node.status(): + node.slow_start() + else: + print("Die! Die! Why won't you die?... Why won't you die?") + exit(1) + + # take ptrack backup +# self.backup_node( +# backup_dir, 'node', node, +# backup_type='ptrack', options=['--stream']) + + try: + self.backup_node( + backup_dir, 'node', node, + backup_type='ptrack', options=['--stream']) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because instance was brutalized by pg_resetxlog" + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd) + ) + except ProbackupException as e: + self.assertTrue( + 'ERROR: LSN from ptrack_control ' in e.message and + 'is greater than Start LSN of previous backup' in e.message, + '\n Unexpected Error Message: {0}\n' + ' CMD: {1}'.format(repr(e.message), self.cmd)) + +# pgdata = self.pgdata_content(node.data_dir) +# +# node_restored = self.make_simple_node( +# base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) +# node_restored.cleanup() +# +# self.restore_node( +# backup_dir, 'node', node_restored) +# +# pgdata_restored = self.pgdata_content(node_restored.data_dir) +# self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_corrupt_ptrack_map(self): + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + ptrack_enable=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") + + ptrack_version = self.get_ptrack_version(node) + + # Create table + node.safe_psql( + "postgres", + "create extension bloom; create sequence t_seq; " + "create table t_heap " + "as select nextval('t_seq')::int as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,2560) i") + + self.backup_node( + backup_dir, 'node', node, options=['--stream']) + + node.safe_psql( + 'postgres', + "update t_heap set id = nextval('t_seq'), text = md5(text), " + "tsvector = md5(repeat(tsvector::text, 10))::tsvector") + + # kill the bastard + if self.verbose: + print('Killing postmaster. Losing Ptrack changes') + + node.stop(['-m', 'immediate', '-D', node.data_dir]) + + ptrack_map = os.path.join(node.data_dir, 'global', 'ptrack.map') + + # Let`s do index corruption. ptrack.map + with open(ptrack_map, "rb+", 0) as f: + f.seek(42) + f.write(b"blablahblahs") + f.flush() + f.close + +# os.remove(os.path.join(node.logs_dir, node.pg_log_name)) + + if self.verbose: + print('Ptrack version:', ptrack_version) + if ptrack_version >= self.version_to_num("2.3"): + node.slow_start() + + log_file = os.path.join(node.logs_dir, 'postgresql.log') + with open(log_file, 'r') as f: + log_content = f.read() + + self.assertIn( + 'WARNING: ptrack read map: incorrect checksum of file "{0}"'.format(ptrack_map), + log_content) + + node.stop(['-D', node.data_dir]) + else: + try: + node.slow_start() + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because ptrack.map is corrupted" + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except StartNodeException as e: + self.assertIn( + 'Cannot start node', + e.message, + '\n Unexpected Error Message: {0}\n' + ' CMD: {1}'.format(repr(e.message), self.cmd)) + + log_file = os.path.join(node.logs_dir, 'postgresql.log') + with open(log_file, 'r') as f: + log_content = f.read() + + self.assertIn( + 'FATAL: ptrack init: incorrect checksum of file "{0}"'.format(ptrack_map), + log_content) + + self.set_auto_conf(node, {'ptrack.map_size': '0'}) + node.slow_start() + + try: + self.backup_node( + backup_dir, 'node', node, + backup_type='ptrack', options=['--stream']) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because instance ptrack is disabled" + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: Ptrack is disabled', + e.message, + '\n Unexpected Error Message: {0}\n' + ' CMD: {1}'.format(repr(e.message), self.cmd)) + + node.safe_psql( + 'postgres', + "update t_heap set id = nextval('t_seq'), text = md5(text), " + "tsvector = md5(repeat(tsvector::text, 10))::tsvector") + + node.stop(['-m', 'immediate', '-D', node.data_dir]) + + self.set_auto_conf(node, {'ptrack.map_size': '32', 'shared_preload_libraries': 'ptrack'}) + node.slow_start() + + try: + self.backup_node( + backup_dir, 'node', node, + backup_type='ptrack', options=['--stream']) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because ptrack map is from future" + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: LSN from ptrack_control', + e.message, + '\n Unexpected Error Message: {0}\n' + ' CMD: {1}'.format(repr(e.message), self.cmd)) + + self.backup_node( + backup_dir, 'node', node, + backup_type='delta', options=['--stream']) + + node.safe_psql( + 'postgres', + "update t_heap set id = nextval('t_seq'), text = md5(text), " + "tsvector = md5(repeat(tsvector::text, 10))::tsvector") + + self.backup_node( + backup_dir, 'node', node, + backup_type='ptrack', options=['--stream']) + + pgdata = self.pgdata_content(node.data_dir) + + node.cleanup() + + self.restore_node(backup_dir, 'node', node) + + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + def test_horizon_lsn_ptrack(self): + """ + https://github.com/postgrespro/pg_probackup/pull/386 + """ + if not self.probackup_old_path: + self.skipTest("You must specify PGPROBACKUPBIN_OLD" + " for run this test") + self.assertLessEqual( + self.version_to_num(self.old_probackup_version), + self.version_to_num('2.4.15'), + 'You need pg_probackup old_binary =< 2.4.15 for this test') + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + ptrack_enable=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") + + self.assertGreaterEqual( + self.get_ptrack_version(node), + self.version_to_num("2.1"), + "You need ptrack >=2.1 for this test") + + # set map_size to a minimal value + self.set_auto_conf(node, {'ptrack.map_size': '1'}) + node.restart() + + node.pgbench_init(scale=100) + + # FULL backup + full_id = self.backup_node(backup_dir, 'node', node, options=['--stream'], old_binary=True) + + # enable archiving so the WAL size to do interfere with data bytes comparison later + self.set_archiving(backup_dir, 'node', node) + node.restart() + + # change data + pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) + pgbench.wait() + + # DELTA is exemplar + delta_id = self.backup_node( + backup_dir, 'node', node, backup_type='delta') + delta_bytes = self.show_pb(backup_dir, 'node', backup_id=delta_id)["data-bytes"] + self.delete_pb(backup_dir, 'node', backup_id=delta_id) + + # PTRACK with current binary + ptrack_id = self.backup_node(backup_dir, 'node', node, backup_type='ptrack') + ptrack_bytes = self.show_pb(backup_dir, 'node', backup_id=ptrack_id)["data-bytes"] + + # make sure that backup size is exactly the same + self.assertEqual(delta_bytes, ptrack_bytes) diff --git a/tests/remote_test.py b/tests/remote_test.py new file mode 100644 index 000000000..2d36d7346 --- /dev/null +++ b/tests/remote_test.py @@ -0,0 +1,43 @@ +import unittest +import os +from time import sleep +from .helpers.ptrack_helpers import ProbackupTest, ProbackupException +from .helpers.cfs_helpers import find_by_name + + +class RemoteTest(ProbackupTest, unittest.TestCase): + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_remote_sanity(self): + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + output = self.backup_node( + backup_dir, 'node', node, + options=['--stream'], no_remote=True, return_id=False) + self.assertIn('remote: false', output) + + # try: + # self.backup_node( + # backup_dir, 'node', + # node, options=['--remote-proto=ssh', '--stream'], no_remote=True) + # # we should die here because exception is what we expect to happen + # self.assertEqual( + # 1, 0, + # "Expecting Error because remote-host option is missing." + # "\n Output: {0} \n CMD: {1}".format( + # repr(self.output), self.cmd)) + # except ProbackupException as e: + # self.assertIn( + # "Insert correct error", + # e.message, + # "\n Unexpected Error Message: {0}\n CMD: {1}".format( + # repr(e.message), self.cmd)) diff --git a/tests/replica_test.py b/tests/replica_test.py new file mode 100644 index 000000000..9c68de366 --- /dev/null +++ b/tests/replica_test.py @@ -0,0 +1,1654 @@ +import os +import unittest +from .helpers.ptrack_helpers import ProbackupTest, ProbackupException, idx_ptrack +from datetime import datetime, timedelta +import subprocess +import time +from distutils.dir_util import copy_tree +from testgres import ProcessType +from time import sleep + + +class ReplicaTest(ProbackupTest, unittest.TestCase): + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_replica_switchover(self): + """ + check that archiving on replica works correctly + over the course of several switchovers + https://www.postgresql.org/message-id/54b059d4-2b48-13a4-6f43-95a087c92367%40postgrespro.ru + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node1 = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node1'), + set_replication=True, + initdb_params=['--data-checksums']) + + if self.get_version(node1) < self.version_to_num('9.6.0'): + self.skipTest( + 'Skipped because backup from replica is not supported in PG 9.5') + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node1', node1) + + node1.slow_start() + + # take full backup and restore it + self.backup_node(backup_dir, 'node1', node1, options=['--stream']) + node2 = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node2')) + node2.cleanup() + + # create replica + self.restore_node(backup_dir, 'node1', node2) + + # setup replica + self.add_instance(backup_dir, 'node2', node2) + self.set_archiving(backup_dir, 'node2', node2, replica=True) + self.set_replica(node1, node2, synchronous=False) + self.set_auto_conf(node2, {'port': node2.port}) + + node2.slow_start(replica=True) + + # generate some data + node1.pgbench_init(scale=5) + + # take full backup on replica + self.backup_node(backup_dir, 'node2', node2, options=['--stream']) + + # first switchover + node1.stop() + node2.promote() + + self.set_replica(node2, node1, synchronous=False) + node2.reload() + node1.slow_start(replica=True) + + # take incremental backup from new master + self.backup_node( + backup_dir, 'node2', node2, + backup_type='delta', options=['--stream']) + + # second switchover + node2.stop() + node1.promote() + self.set_replica(node1, node2, synchronous=False) + node1.reload() + node2.slow_start(replica=True) + + # generate some more data + node1.pgbench_init(scale=5) + + # take incremental backup from replica + self.backup_node( + backup_dir, 'node2', node2, + backup_type='delta', options=['--stream']) + + # https://github.com/postgrespro/pg_probackup/issues/251 + self.validate_pb(backup_dir) + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_replica_stream_ptrack_backup(self): + """ + make node, take full backup, restore it and make replica from it, + take full stream backup from replica + """ + if not self.ptrack: + self.skipTest('Skipped because ptrack support is disabled') + + if self.pg_config_version > self.version_to_num('9.6.0'): + self.skipTest( + 'Skipped because backup from replica is not supported in PG 9.5') + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + master = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'master'), + set_replication=True, + ptrack_enable=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'master', master) + + master.slow_start() + + if master.major_version >= 12: + master.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") + + # CREATE TABLE + master.psql( + "postgres", + "create table t_heap as select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,256) i") + before = master.safe_psql("postgres", "SELECT * FROM t_heap") + + # take full backup and restore it + self.backup_node(backup_dir, 'master', master, options=['--stream']) + replica = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica.cleanup() + self.restore_node(backup_dir, 'master', replica) + self.set_replica(master, replica) + + # Check data correctness on replica + replica.slow_start(replica=True) + after = replica.safe_psql("postgres", "SELECT * FROM t_heap") + self.assertEqual(before, after) + + # Change data on master, take FULL backup from replica, + # restore taken backup and check that restored data equal + # to original data + master.psql( + "postgres", + "insert into t_heap select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(256,512) i") + before = master.safe_psql("postgres", "SELECT * FROM t_heap") + self.add_instance(backup_dir, 'replica', replica) + + backup_id = self.backup_node( + backup_dir, 'replica', replica, + options=[ + '--stream', + '--master-host=localhost', + '--master-db=postgres', + '--master-port={0}'.format(master.port)]) + self.validate_pb(backup_dir, 'replica') + self.assertEqual( + 'OK', self.show_pb(backup_dir, 'replica', backup_id)['status']) + + # RESTORE FULL BACKUP TAKEN FROM PREVIOUS STEP + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node')) + node.cleanup() + self.restore_node(backup_dir, 'replica', data_dir=node.data_dir) + + self.set_auto_conf(node, {'port': node.port}) + + node.slow_start() + + # CHECK DATA CORRECTNESS + after = node.safe_psql("postgres", "SELECT * FROM t_heap") + self.assertEqual(before, after) + + # Change data on master, take PTRACK backup from replica, + # restore taken backup and check that restored data equal + # to original data + master.psql( + "postgres", + "insert into t_heap select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(512,768) i") + + before = master.safe_psql("postgres", "SELECT * FROM t_heap") + + backup_id = self.backup_node( + backup_dir, 'replica', replica, backup_type='ptrack', + options=[ + '--stream', + '--master-host=localhost', + '--master-db=postgres', + '--master-port={0}'.format(master.port)]) + self.validate_pb(backup_dir, 'replica') + self.assertEqual( + 'OK', self.show_pb(backup_dir, 'replica', backup_id)['status']) + + # RESTORE PTRACK BACKUP TAKEN FROM replica + node.cleanup() + self.restore_node( + backup_dir, 'replica', data_dir=node.data_dir, backup_id=backup_id) + + self.set_auto_conf(node, {'port': node.port}) + + node.slow_start() + + # CHECK DATA CORRECTNESS + after = node.safe_psql("postgres", "SELECT * FROM t_heap") + self.assertEqual(before, after) + + # @unittest.skip("skip") + def test_replica_archive_page_backup(self): + """ + make archive master, take full and page archive backups from master, + set replica, make archive backup from replica + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + master = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'master'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'archive_timeout': '10s', + 'checkpoint_timeout': '30s', + 'max_wal_size': '32MB'}) + + if self.get_version(master) < self.version_to_num('9.6.0'): + self.skipTest( + 'Skipped because backup from replica is not supported in PG 9.5') + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'master', master) + self.set_archiving(backup_dir, 'master', master) + master.slow_start() + + replica = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica.cleanup() + + self.backup_node(backup_dir, 'master', master) + + master.psql( + "postgres", + "create table t_heap as select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,2560) i") + + before = master.safe_psql("postgres", "SELECT * FROM t_heap") + + backup_id = self.backup_node( + backup_dir, 'master', master, backup_type='page') + self.restore_node(backup_dir, 'master', replica) + + # Settings for Replica + self.add_instance(backup_dir, 'replica', replica) + self.set_replica(master, replica, synchronous=True) + self.set_archiving(backup_dir, 'replica', replica, replica=True) + + replica.slow_start(replica=True) + + # Check data correctness on replica + after = replica.safe_psql("postgres", "SELECT * FROM t_heap") + self.assertEqual(before, after) + + # Change data on master, take FULL backup from replica, + # restore taken backup and check that restored data + # equal to original data + master.psql( + "postgres", + "insert into t_heap select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(256,25120) i") + + before = master.safe_psql("postgres", "SELECT * FROM t_heap") + + self.wait_until_replica_catch_with_master(master, replica) + + backup_id = self.backup_node( + backup_dir, 'replica', replica, + options=[ + '--archive-timeout=60', + '--master-host=localhost', + '--master-db=postgres', + '--master-port={0}'.format(master.port)]) + + self.validate_pb(backup_dir, 'replica') + self.assertEqual( + 'OK', self.show_pb(backup_dir, 'replica', backup_id)['status']) + + # RESTORE FULL BACKUP TAKEN FROM replica + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node')) + node.cleanup() + self.restore_node(backup_dir, 'replica', data_dir=node.data_dir) + + self.set_auto_conf(node, {'port': node.port, 'archive_mode': 'off'}) + + node.slow_start() + + # CHECK DATA CORRECTNESS + after = node.safe_psql("postgres", "SELECT * FROM t_heap") + self.assertEqual(before, after) + node.cleanup() + + # Change data on master, make PAGE backup from replica, + # restore taken backup and check that restored data equal + # to original data + master.pgbench_init(scale=5) + + pgbench = master.pgbench( + options=['-T', '30', '-c', '2', '--no-vacuum']) + + backup_id = self.backup_node( + backup_dir, 'replica', + replica, backup_type='page', + options=[ + '--archive-timeout=60', + '--master-host=localhost', + '--master-db=postgres', + '--master-port={0}'.format(master.port)]) + + pgbench.wait() + + self.switch_wal_segment(master) + + before = master.safe_psql("postgres", "SELECT * FROM pgbench_accounts") + + self.validate_pb(backup_dir, 'replica') + self.assertEqual( + 'OK', self.show_pb(backup_dir, 'replica', backup_id)['status']) + + # RESTORE PAGE BACKUP TAKEN FROM replica + self.restore_node( + backup_dir, 'replica', data_dir=node.data_dir, + backup_id=backup_id) + + self.set_auto_conf(node, {'port': node.port, 'archive_mode': 'off'}) + + node.slow_start() + + # CHECK DATA CORRECTNESS + after = node.safe_psql("postgres", "SELECT * FROM pgbench_accounts") + self.assertEqual( + before, after, 'Restored data is not equal to original') + + self.add_instance(backup_dir, 'node', node) + self.backup_node( + backup_dir, 'node', node, options=['--stream']) + + # @unittest.skip("skip") + def test_basic_make_replica_via_restore(self): + """ + make archive master, take full and page archive backups from master, + set replica, make archive backup from replica + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + master = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'master'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'archive_timeout': '10s'}) + + if self.get_version(master) < self.version_to_num('9.6.0'): + self.skipTest( + 'Skipped because backup from replica is not supported in PG 9.5') + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'master', master) + self.set_archiving(backup_dir, 'master', master) + master.slow_start() + + replica = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica.cleanup() + + self.backup_node(backup_dir, 'master', master) + + master.psql( + "postgres", + "create table t_heap as select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,8192) i") + + before = master.safe_psql("postgres", "SELECT * FROM t_heap") + + backup_id = self.backup_node( + backup_dir, 'master', master, backup_type='page') + self.restore_node( + backup_dir, 'master', replica, options=['-R']) + + # Settings for Replica + self.add_instance(backup_dir, 'replica', replica) + self.set_archiving(backup_dir, 'replica', replica, replica=True) + self.set_replica(master, replica, synchronous=True) + + replica.slow_start(replica=True) + + self.backup_node( + backup_dir, 'replica', replica, + options=['--archive-timeout=30s', '--stream']) + + # @unittest.skip("skip") + def test_take_backup_from_delayed_replica(self): + """ + make archive master, take full backups from master, + restore full backup as delayed replica, launch pgbench, + take FULL, PAGE and DELTA backups from replica + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + master = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'master'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={'archive_timeout': '10s'}) + + if self.get_version(master) < self.version_to_num('9.6.0'): + self.skipTest( + 'Skipped because backup from replica is not supported in PG 9.5') + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'master', master) + self.set_archiving(backup_dir, 'master', master) + master.slow_start() + + replica = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica.cleanup() + + self.backup_node(backup_dir, 'master', master) + + master.psql( + "postgres", + "create table t_heap as select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,165000) i") + + master.psql( + "postgres", + "create table t_heap_1 as select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,165000) i") + + self.restore_node( + backup_dir, 'master', replica, options=['-R']) + + # Settings for Replica + self.add_instance(backup_dir, 'replica', replica) + self.set_archiving(backup_dir, 'replica', replica, replica=True) + + self.set_auto_conf(replica, {'port': replica.port}) + + replica.slow_start(replica=True) + + self.wait_until_replica_catch_with_master(master, replica) + + if self.get_version(master) >= self.version_to_num('12.0'): + self.set_auto_conf( + replica, {'recovery_min_apply_delay': '300s'}) + else: + replica.append_conf( + 'recovery.conf', + 'recovery_min_apply_delay = 300s') + + replica.stop() + replica.slow_start(replica=True) + + master.pgbench_init(scale=10) + + pgbench = master.pgbench( + options=['-T', '60', '-c', '2', '--no-vacuum']) + + self.backup_node( + backup_dir, 'replica', + replica, options=['--archive-timeout=60s']) + + self.backup_node( + backup_dir, 'replica', replica, + data_dir=replica.data_dir, + backup_type='page', options=['--archive-timeout=60s']) + + sleep(1) + + self.backup_node( + backup_dir, 'replica', replica, + backup_type='delta', options=['--archive-timeout=60s']) + + pgbench.wait() + + pgbench = master.pgbench( + options=['-T', '30', '-c', '2', '--no-vacuum']) + + self.backup_node( + backup_dir, 'replica', replica, + options=['--stream']) + + self.backup_node( + backup_dir, 'replica', replica, + backup_type='page', options=['--stream']) + + self.backup_node( + backup_dir, 'replica', replica, + backup_type='delta', options=['--stream']) + + pgbench.wait() + + # @unittest.skip("skip") + def test_replica_promote(self): + """ + start backup from replica, during backup promote replica + check that backup is failed + """ + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + master = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'master'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'archive_timeout': '10s', + 'checkpoint_timeout': '30s', + 'max_wal_size': '32MB'}) + + if self.get_version(master) < self.version_to_num('9.6.0'): + self.skipTest( + 'Skipped because backup from replica is not supported in PG 9.5') + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'master', master) + self.set_archiving(backup_dir, 'master', master) + master.slow_start() + + replica = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica.cleanup() + + self.backup_node(backup_dir, 'master', master) + + master.psql( + "postgres", + "create table t_heap as select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,165000) i") + + self.restore_node( + backup_dir, 'master', replica, options=['-R']) + + # Settings for Replica + self.add_instance(backup_dir, 'replica', replica) + self.set_archiving(backup_dir, 'replica', replica, replica=True) + self.set_replica( + master, replica, replica_name='replica', synchronous=True) + + replica.slow_start(replica=True) + + master.psql( + "postgres", + "create table t_heap_1 as select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,165000) i") + + self.wait_until_replica_catch_with_master(master, replica) + + # start backup from replica + gdb = self.backup_node( + backup_dir, 'replica', replica, gdb=True, + options=['--log-level-file=verbose']) + + gdb.set_breakpoint('backup_data_file') + gdb.run_until_break() + gdb.continue_execution_until_break(20) + + replica.promote() + + gdb.remove_all_breakpoints() + gdb.continue_execution_until_exit() + + backup_id = self.show_pb( + backup_dir, 'replica')[0]["id"] + + # read log file content + with open(os.path.join(backup_dir, 'log', 'pg_probackup.log')) as f: + log_content = f.read() + f.close + + self.assertIn( + 'ERROR: the standby was promoted during online backup', + log_content) + + self.assertIn( + 'WARNING: Backup {0} is running, ' + 'setting its status to ERROR'.format(backup_id), + log_content) + + # @unittest.skip("skip") + def test_replica_stop_lsn_null_offset(self): + """ + """ + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + master = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'master'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'checkpoint_timeout': '1h', + 'wal_level': 'replica'}) + + if self.get_version(master) < self.version_to_num('9.6.0'): + self.skipTest( + 'Skipped because backup from replica is not supported in PG 9.5') + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', master) + self.set_archiving(backup_dir, 'node', master) + master.slow_start() + + # freeze bgwriter to get rid of RUNNING XACTS records + bgwriter_pid = master.auxiliary_pids[ProcessType.BackgroundWriter][0] + gdb_checkpointer = self.gdb_attach(bgwriter_pid) + + self.backup_node(backup_dir, 'node', master) + + # Create replica + replica = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica.cleanup() + self.restore_node(backup_dir, 'node', replica) + + # Settings for Replica + self.set_replica(master, replica, synchronous=True) + self.set_archiving(backup_dir, 'node', replica, replica=True) + + replica.slow_start(replica=True) + + self.switch_wal_segment(master) + self.switch_wal_segment(master) + + output = self.backup_node( + backup_dir, 'node', replica, replica.data_dir, + options=[ + '--archive-timeout=30', + '--log-level-console=LOG', + '--no-validate', + '--stream'], + return_id=False) + + self.assertIn( + 'LOG: Invalid offset in stop_lsn value 0/4000000', + output) + + self.assertIn( + 'WARNING: WAL segment 000000010000000000000004 could not be streamed in 30 seconds', + output) + + self.assertIn( + 'WARNING: Failed to get next WAL record after 0/4000000, looking for previous WAL record', + output) + + self.assertIn( + 'LOG: Looking for LSN 0/4000000 in segment: 000000010000000000000003', + output) + + self.assertIn( + 'has endpoint 0/4000000 which is ' + 'equal or greater than requested LSN 0/4000000', + output) + + self.assertIn( + 'LOG: Found prior LSN:', + output) + + # Clean after yourself + gdb_checkpointer.kill() + + # @unittest.skip("skip") + def test_replica_stop_lsn_null_offset_next_record(self): + """ + """ + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + master = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'master'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'checkpoint_timeout': '1h', + 'wal_level': 'replica'}) + + if self.get_version(master) < self.version_to_num('9.6.0'): + self.skipTest( + 'Skipped because backup from replica is not supported in PG 9.5') + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'master', master) + self.set_archiving(backup_dir, 'master', master) + master.slow_start() + + # freeze bgwriter to get rid of RUNNING XACTS records + bgwriter_pid = master.auxiliary_pids[ProcessType.BackgroundWriter][0] + + self.backup_node(backup_dir, 'master', master) + + # Create replica + replica = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica.cleanup() + self.restore_node(backup_dir, 'master', replica) + + # Settings for Replica + self.add_instance(backup_dir, 'replica', replica) + self.set_replica(master, replica, synchronous=True) + self.set_archiving(backup_dir, 'replica', replica, replica=True) + + copy_tree( + os.path.join(backup_dir, 'wal', 'master'), + os.path.join(backup_dir, 'wal', 'replica')) + + replica.slow_start(replica=True) + + self.switch_wal_segment(master) + self.switch_wal_segment(master) + + # open connection to master + conn = master.connect() + + gdb = self.backup_node( + backup_dir, 'replica', replica, + options=[ + '--archive-timeout=40', + '--log-level-file=LOG', + '--no-validate', + '--stream'], + gdb=True) + + # Attention! this breakpoint is set to a probackup internal function, not a postgres core one + gdb.set_breakpoint('pg_stop_backup') + gdb.run_until_break() + gdb.remove_all_breakpoints() + gdb.continue_execution_until_running() + + sleep(5) + + conn.execute("create table t1()") + conn.commit() + + while 'RUNNING' in self.show_pb(backup_dir, 'replica')[0]['status']: + sleep(5) + + file = os.path.join(backup_dir, 'log', 'pg_probackup.log') + + with open(file) as f: + log_content = f.read() + + self.assertIn( + 'LOG: Invalid offset in stop_lsn value 0/4000000', + log_content) + + self.assertIn( + 'LOG: Looking for segment: 000000010000000000000004', + log_content) + + self.assertIn( + 'LOG: First record in WAL segment "000000010000000000000004": 0/4000028', + log_content) + + self.assertIn( + 'INFO: stop_lsn: 0/4000000', + log_content) + + self.assertTrue(self.show_pb(backup_dir, 'replica')[0]['status'] == 'DONE') + + # @unittest.skip("skip") + def test_archive_replica_null_offset(self): + """ + """ + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + master = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'master'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'checkpoint_timeout': '1h', + 'wal_level': 'replica'}) + + if self.get_version(master) < self.version_to_num('9.6.0'): + self.skipTest( + 'Skipped because backup from replica is not supported in PG 9.5') + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', master) + self.set_archiving(backup_dir, 'node', master) + master.slow_start() + + self.backup_node(backup_dir, 'node', master) + + # Create replica + replica = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica.cleanup() + self.restore_node(backup_dir, 'node', replica) + + # Settings for Replica + self.set_replica(master, replica, synchronous=True) + self.set_archiving(backup_dir, 'node', replica, replica=True) + + # freeze bgwriter to get rid of RUNNING XACTS records + bgwriter_pid = master.auxiliary_pids[ProcessType.BackgroundWriter][0] + gdb_checkpointer = self.gdb_attach(bgwriter_pid) + + replica.slow_start(replica=True) + + self.switch_wal_segment(master) + self.switch_wal_segment(master) + + # take backup from replica + output = self.backup_node( + backup_dir, 'node', replica, replica.data_dir, + options=[ + '--archive-timeout=30', + '--log-level-console=LOG', + '--no-validate'], + return_id=False) + + self.assertIn( + 'LOG: Invalid offset in stop_lsn value 0/4000000', + output) + + self.assertIn( + 'WARNING: WAL segment 000000010000000000000004 could not be archived in 30 seconds', + output) + + self.assertIn( + 'WARNING: Failed to get next WAL record after 0/4000000, looking for previous WAL record', + output) + + self.assertIn( + 'LOG: Looking for LSN 0/4000000 in segment: 000000010000000000000003', + output) + + self.assertIn( + 'has endpoint 0/4000000 which is ' + 'equal or greater than requested LSN 0/4000000', + output) + + self.assertIn( + 'LOG: Found prior LSN:', + output) + + print(output) + + # @unittest.skip("skip") + def test_archive_replica_not_null_offset(self): + """ + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + master = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'master'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'checkpoint_timeout': '1h', + 'wal_level': 'replica'}) + + if self.get_version(master) < self.version_to_num('9.6.0'): + self.skipTest( + 'Skipped because backup from replica is not supported in PG 9.5') + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', master) + self.set_archiving(backup_dir, 'node', master) + master.slow_start() + + self.backup_node(backup_dir, 'node', master) + + # Create replica + replica = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica.cleanup() + self.restore_node(backup_dir, 'node', replica) + + # Settings for Replica + self.set_replica(master, replica, synchronous=True) + self.set_archiving(backup_dir, 'node', replica, replica=True) + + replica.slow_start(replica=True) + + # take backup from replica + self.backup_node( + backup_dir, 'node', replica, replica.data_dir, + options=[ + '--archive-timeout=30', + '--log-level-console=LOG', + '--no-validate'], + return_id=False) + + try: + self.backup_node( + backup_dir, 'node', replica, replica.data_dir, + options=[ + '--archive-timeout=30', + '--log-level-console=LOG', + '--no-validate']) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because of archive timeout. " + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + # vanilla -- 0/4000060 + # pgproee -- 0/4000078 + self.assertRegex( + e.message, + r'LOG: Looking for LSN (0/4000060|0/4000078) in segment: 000000010000000000000004', + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) + + self.assertRegex( + e.message, + r'INFO: Wait for LSN (0/4000060|0/4000078) in archived WAL segment', + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) + + self.assertIn( + 'ERROR: WAL segment 000000010000000000000004 could not be archived in 30 seconds', + e.message, + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) + + # @unittest.skip("skip") + def test_replica_toast(self): + """ + make archive master, take full and page archive backups from master, + set replica, make archive backup from replica + """ + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + master = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'master'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'checkpoint_timeout': '1h', + 'wal_level': 'replica', + 'shared_buffers': '128MB'}) + + if self.get_version(master) < self.version_to_num('9.6.0'): + self.skipTest( + 'Skipped because backup from replica is not supported in PG 9.5') + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'master', master) + self.set_archiving(backup_dir, 'master', master) + master.slow_start() + + # freeze bgwriter to get rid of RUNNING XACTS records + bgwriter_pid = master.auxiliary_pids[ProcessType.BackgroundWriter][0] + gdb_checkpointer = self.gdb_attach(bgwriter_pid) + + self.backup_node(backup_dir, 'master', master) + + # Create replica + replica = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica.cleanup() + self.restore_node(backup_dir, 'master', replica) + + # Settings for Replica + self.add_instance(backup_dir, 'replica', replica) + self.set_replica(master, replica, synchronous=True) + self.set_archiving(backup_dir, 'replica', replica, replica=True) + + copy_tree( + os.path.join(backup_dir, 'wal', 'master'), + os.path.join(backup_dir, 'wal', 'replica')) + + replica.slow_start(replica=True) + + self.switch_wal_segment(master) + self.switch_wal_segment(master) + + master.safe_psql( + 'postgres', + 'CREATE TABLE t1 AS ' + 'SELECT i, repeat(md5(i::text),5006056) AS fat_attr ' + 'FROM generate_series(0,10) i') + + self.wait_until_replica_catch_with_master(master, replica) + + output = self.backup_node( + backup_dir, 'replica', replica, + options=[ + '--archive-timeout=30', + '--log-level-console=LOG', + '--no-validate', + '--stream'], + return_id=False) + + pgdata = self.pgdata_content(replica.data_dir) + + self.assertIn( + 'WARNING: Could not read WAL record at', + output) + + self.assertIn( + 'LOG: Found prior LSN:', + output) + + res1 = replica.safe_psql( + 'postgres', + 'select md5(fat_attr) from t1') + + replica.cleanup() + + self.restore_node(backup_dir, 'replica', replica) + pgdata_restored = self.pgdata_content(replica.data_dir) + + replica.slow_start() + + res2 = replica.safe_psql( + 'postgres', + 'select md5(fat_attr) from t1') + + self.assertEqual(res1, res2) + + self.compare_pgdata(pgdata, pgdata_restored) + + # Clean after yourself + gdb_checkpointer.kill() + + # @unittest.skip("skip") + def test_start_stop_lsn_in_the_same_segno(self): + """ + """ + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + master = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'master'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'checkpoint_timeout': '1h', + 'wal_level': 'replica', + 'shared_buffers': '128MB'}) + + if self.get_version(master) < self.version_to_num('9.6.0'): + self.skipTest( + 'Skipped because backup from replica is not supported in PG 9.5') + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'master', master) + master.slow_start() + + # freeze bgwriter to get rid of RUNNING XACTS records + bgwriter_pid = master.auxiliary_pids[ProcessType.BackgroundWriter][0] + + self.backup_node(backup_dir, 'master', master, options=['--stream']) + + # Create replica + replica = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica.cleanup() + self.restore_node(backup_dir, 'master', replica) + + # Settings for Replica + self.add_instance(backup_dir, 'replica', replica) + self.set_replica(master, replica, synchronous=True) + + replica.slow_start(replica=True) + + self.switch_wal_segment(master) + self.switch_wal_segment(master) + + master.safe_psql( + 'postgres', + 'CREATE TABLE t1 AS ' + 'SELECT i, repeat(md5(i::text),5006056) AS fat_attr ' + 'FROM generate_series(0,10) i') + + master.safe_psql( + 'postgres', + 'CHECKPOINT') + + self.wait_until_replica_catch_with_master(master, replica) + + sleep(60) + + self.backup_node( + backup_dir, 'replica', replica, + options=[ + '--archive-timeout=30', + '--log-level-console=LOG', + '--no-validate', + '--stream'], + return_id=False) + + self.backup_node( + backup_dir, 'replica', replica, + options=[ + '--archive-timeout=30', + '--log-level-console=LOG', + '--no-validate', + '--stream'], + return_id=False) + + @unittest.skip("skip") + def test_replica_promote_1(self): + """ + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + master = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'master'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'checkpoint_timeout': '1h', + 'wal_level': 'replica'}) + + if self.get_version(master) < self.version_to_num('9.6.0'): + self.skipTest( + 'Skipped because backup from replica is not supported in PG 9.5') + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'master', master) + # set replica True, so archive_mode 'always' is used. + self.set_archiving(backup_dir, 'master', master, replica=True) + master.slow_start() + + self.backup_node(backup_dir, 'master', master) + + # Create replica + replica = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica.cleanup() + self.restore_node(backup_dir, 'master', replica) + + # Settings for Replica + self.set_replica(master, replica) + + replica.slow_start(replica=True) + + master.safe_psql( + 'postgres', + 'CREATE TABLE t1 AS ' + 'SELECT i, repeat(md5(i::text),5006056) AS fat_attr ' + 'FROM generate_series(0,10) i') + + self.wait_until_replica_catch_with_master(master, replica) + + wal_file = os.path.join( + backup_dir, 'wal', 'master', '000000010000000000000004') + + wal_file_partial = os.path.join( + backup_dir, 'wal', 'master', '000000010000000000000004.partial') + + self.assertFalse(os.path.exists(wal_file)) + + replica.promote() + + while not os.path.exists(wal_file_partial): + sleep(1) + + self.switch_wal_segment(master) + + # sleep to be sure, that any partial timeout is expired + sleep(70) + + self.assertTrue( + os.path.exists(wal_file_partial), + "File {0} disappeared".format(wal_file)) + + self.assertTrue( + os.path.exists(wal_file_partial), + "File {0} disappeared".format(wal_file_partial)) + + # @unittest.skip("skip") + def test_replica_promote_2(self): + """ + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + master = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'master'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'master', master) + # set replica True, so archive_mode 'always' is used. + self.set_archiving( + backup_dir, 'master', master, replica=True) + master.slow_start() + + self.backup_node(backup_dir, 'master', master) + + # Create replica + replica = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica.cleanup() + self.restore_node(backup_dir, 'master', replica) + + # Settings for Replica + self.set_replica(master, replica) + self.set_auto_conf(replica, {'port': replica.port}) + + replica.slow_start(replica=True) + + master.safe_psql( + 'postgres', + 'CREATE TABLE t1 AS ' + 'SELECT i, repeat(md5(i::text),5006056) AS fat_attr ' + 'FROM generate_series(0,1) i') + + self.wait_until_replica_catch_with_master(master, replica) + + replica.promote() + + self.backup_node( + backup_dir, 'master', replica, data_dir=replica.data_dir, + backup_type='page') + + # @unittest.skip("skip") + def test_replica_promote_archive_delta(self): + """ + t3 /---D3--> + t2 /-------> + t1 --F---D1--D2-- + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node1 = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node1'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'checkpoint_timeout': '30s', + 'archive_timeout': '30s'}) + + if self.get_version(node1) < self.version_to_num('9.6.0'): + self.skipTest( + 'Skipped because backup from replica is not supported in PG 9.5') + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node1) + self.set_config( + backup_dir, 'node', options=['--archive-timeout=60s']) + self.set_archiving(backup_dir, 'node', node1) + + node1.slow_start() + + self.backup_node(backup_dir, 'node', node1, options=['--stream']) + + # Create replica + node2 = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node2')) + node2.cleanup() + self.restore_node(backup_dir, 'node', node2, node2.data_dir) + + # Settings for Replica + self.set_replica(node1, node2) + self.set_auto_conf(node2, {'port': node2.port}) + self.set_archiving(backup_dir, 'node', node2, replica=True) + + node2.slow_start(replica=True) + + node1.safe_psql( + 'postgres', + 'CREATE TABLE t1 AS ' + 'SELECT i, repeat(md5(i::text),5006056) AS fat_attr ' + 'FROM generate_series(0,20) i') + self.wait_until_replica_catch_with_master(node1, node2) + + node1.safe_psql( + 'postgres', + 'CREATE TABLE t2 AS ' + 'SELECT i, repeat(md5(i::text),5006056) AS fat_attr ' + 'FROM generate_series(0,20) i') + self.wait_until_replica_catch_with_master(node1, node2) + + # delta backup on replica on timeline 1 + delta1_id = self.backup_node( + backup_dir, 'node', node2, node2.data_dir, + 'delta', options=['--stream']) + + # delta backup on replica on timeline 1 + delta2_id = self.backup_node( + backup_dir, 'node', node2, node2.data_dir, 'delta') + + self.change_backup_status( + backup_dir, 'node', delta2_id, 'ERROR') + + # node2 is now master + node2.promote() + + node2.safe_psql( + 'postgres', + 'CREATE TABLE t3 AS ' + 'SELECT i, repeat(md5(i::text),5006056) AS fat_attr ' + 'FROM generate_series(0,20) i') + + # node1 is now replica + node1.cleanup() + # kludge "backup_id=delta1_id" + self.restore_node( + backup_dir, 'node', node1, node1.data_dir, + backup_id=delta1_id, + options=[ + '--recovery-target-timeline=2', + '--recovery-target=latest']) + + # Settings for Replica + self.set_replica(node2, node1) + self.set_auto_conf(node1, {'port': node1.port}) + self.set_archiving(backup_dir, 'node', node1, replica=True) + + node1.slow_start(replica=True) + + node2.safe_psql( + 'postgres', + 'CREATE TABLE t4 AS ' + 'SELECT i, repeat(md5(i::text),5006056) AS fat_attr ' + 'FROM generate_series(0,30) i') + self.wait_until_replica_catch_with_master(node2, node1) + + # node1 is back to be a master + node1.promote() + + sleep(5) + + # delta backup on timeline 3 + self.backup_node( + backup_dir, 'node', node1, node1.data_dir, 'delta', + options=['--archive-timeout=60']) + + pgdata = self.pgdata_content(node1.data_dir) + + node1.cleanup() + self.restore_node(backup_dir, 'node', node1, node1.data_dir) + + pgdata_restored = self.pgdata_content(node1.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + def test_replica_promote_archive_page(self): + """ + t3 /---P3--> + t2 /-------> + t1 --F---P1--P2-- + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node1 = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node1'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'checkpoint_timeout': '30s', + 'archive_timeout': '30s'}) + + if self.get_version(node1) < self.version_to_num('9.6.0'): + self.skipTest( + 'Skipped because backup from replica is not supported in PG 9.5') + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node1) + self.set_archiving(backup_dir, 'node', node1) + self.set_config( + backup_dir, 'node', options=['--archive-timeout=60s']) + + node1.slow_start() + + self.backup_node(backup_dir, 'node', node1, options=['--stream']) + + # Create replica + node2 = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node2')) + node2.cleanup() + self.restore_node(backup_dir, 'node', node2, node2.data_dir) + + # Settings for Replica + self.set_replica(node1, node2) + self.set_auto_conf(node2, {'port': node2.port}) + self.set_archiving(backup_dir, 'node', node2, replica=True) + + node2.slow_start(replica=True) + + node1.safe_psql( + 'postgres', + 'CREATE TABLE t1 AS ' + 'SELECT i, repeat(md5(i::text),5006056) AS fat_attr ' + 'FROM generate_series(0,20) i') + self.wait_until_replica_catch_with_master(node1, node2) + + node1.safe_psql( + 'postgres', + 'CREATE TABLE t2 AS ' + 'SELECT i, repeat(md5(i::text),5006056) AS fat_attr ' + 'FROM generate_series(0,20) i') + self.wait_until_replica_catch_with_master(node1, node2) + + # page backup on replica on timeline 1 + page1_id = self.backup_node( + backup_dir, 'node', node2, node2.data_dir, + 'page', options=['--stream']) + + # page backup on replica on timeline 1 + page2_id = self.backup_node( + backup_dir, 'node', node2, node2.data_dir, 'page') + + self.change_backup_status( + backup_dir, 'node', page2_id, 'ERROR') + + # node2 is now master + node2.promote() + + node2.safe_psql( + 'postgres', + 'CREATE TABLE t3 AS ' + 'SELECT i, repeat(md5(i::text),5006056) AS fat_attr ' + 'FROM generate_series(0,20) i') + + # node1 is now replica + node1.cleanup() + # kludge "backup_id=page1_id" + self.restore_node( + backup_dir, 'node', node1, node1.data_dir, + backup_id=page1_id, + options=[ + '--recovery-target-timeline=2', + '--recovery-target=latest']) + + # Settings for Replica + self.set_replica(node2, node1) + self.set_auto_conf(node1, {'port': node1.port}) + self.set_archiving(backup_dir, 'node', node1, replica=True) + + node1.slow_start(replica=True) + + node2.safe_psql( + 'postgres', + 'CREATE TABLE t4 AS ' + 'SELECT i, repeat(md5(i::text),5006056) AS fat_attr ' + 'FROM generate_series(0,30) i') + self.wait_until_replica_catch_with_master(node2, node1) + + # node1 is back to be a master + node1.promote() + self.switch_wal_segment(node1) + + sleep(5) + + # delta3_id = self.backup_node( + # backup_dir, 'node', node2, node2.data_dir, 'delta') + # page backup on timeline 3 + page3_id = self.backup_node( + backup_dir, 'node', node1, node1.data_dir, 'page', + options=['--archive-timeout=60']) + + pgdata = self.pgdata_content(node1.data_dir) + + node1.cleanup() + self.restore_node(backup_dir, 'node', node1, node1.data_dir) + + pgdata_restored = self.pgdata_content(node1.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + def test_parent_choosing(self): + """ + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + master = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'master'), + set_replication=True, + initdb_params=['--data-checksums']) + + if self.get_version(master) < self.version_to_num('9.6.0'): + self.skipTest( + 'Skipped because backup from replica is not supported in PG 9.5') + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'master', master) + + master.slow_start() + + self.backup_node(backup_dir, 'master', master, options=['--stream']) + + # Create replica + replica = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica.cleanup() + self.restore_node(backup_dir, 'master', replica) + + # Settings for Replica + self.set_replica(master, replica) + self.set_auto_conf(replica, {'port': replica.port}) + + replica.slow_start(replica=True) + + master.safe_psql( + 'postgres', + 'CREATE TABLE t1 AS ' + 'SELECT i, repeat(md5(i::text),5006056) AS fat_attr ' + 'FROM generate_series(0,20) i') + self.wait_until_replica_catch_with_master(master, replica) + + self.add_instance(backup_dir, 'replica', replica) + + full_id = self.backup_node( + backup_dir, 'replica', + replica, options=['--stream']) + + master.safe_psql( + 'postgres', + 'CREATE TABLE t2 AS ' + 'SELECT i, repeat(md5(i::text),5006056) AS fat_attr ' + 'FROM generate_series(0,20) i') + self.wait_until_replica_catch_with_master(master, replica) + + self.backup_node( + backup_dir, 'replica', replica, + backup_type='delta', options=['--stream']) + + replica.promote() + + # failing, because without archving, it is impossible to + # take multi-timeline backup. + self.backup_node( + backup_dir, 'replica', replica, + backup_type='delta', options=['--stream']) + + # @unittest.skip("skip") + def test_instance_from_the_past(self): + """ + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + + node.slow_start() + + full_id = self.backup_node(backup_dir, 'node', node, options=['--stream']) + + node.pgbench_init(scale=10) + self.backup_node(backup_dir, 'node', node, options=['--stream']) + node.cleanup() + + self.restore_node(backup_dir, 'node', node, backup_id=full_id) + node.slow_start() + + try: + self.backup_node( + backup_dir, 'node', node, + backup_type='delta', options=['--stream']) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because instance is from the past " + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertTrue( + 'ERROR: Current START LSN' in e.message and + 'is lower than START LSN' in e.message and + 'It may indicate that we are trying to backup ' + 'PostgreSQL instance from the past' in e.message, + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) + + # @unittest.skip("skip") + def test_replica_via_basebackup(self): + """ + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={'hot_standby': 'on'}) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + + node.slow_start() + + node.pgbench_init(scale=10) + + #FULL backup + full_id = self.backup_node(backup_dir, 'node', node) + + pgbench = node.pgbench( + options=['-T', '10', '-c', '1', '--no-vacuum']) + pgbench.wait() + + node.cleanup() + + self.restore_node( + backup_dir, 'node', node, + options=['--recovery-target=latest', '--recovery-target-action=promote']) + node.slow_start() + + # Timeline 2 + # Take stream page backup from instance in timeline2 + self.backup_node( + backup_dir, 'node', node, backup_type='full', + options=['--stream', '--log-level-file=verbose']) + + node.cleanup() + + # restore stream backup + self.restore_node(backup_dir, 'node', node) + + xlog_dir = 'pg_wal' + if self.get_version(node) < 100000: + xlog_dir = 'pg_xlog' + + filepath = os.path.join(node.data_dir, xlog_dir, "00000002.history") + self.assertTrue( + os.path.exists(filepath), + "History file do not exists: {0}".format(filepath)) + + node.slow_start() + + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored.cleanup() + + pg_basebackup_path = self.get_bin_path('pg_basebackup') + + self.run_binary( + [ + pg_basebackup_path, '-p', str(node.port), '-h', 'localhost', + '-R', '-X', 'stream', '-D', node_restored.data_dir + ]) + + self.set_auto_conf(node_restored, {'port': node_restored.port}) + node_restored.slow_start(replica=True) + +# TODO: +# null offset STOP LSN and latest record in previous segment is conrecord (manual only) +# archiving from promoted delayed replica diff --git a/tests/restore_test.py b/tests/restore_test.py new file mode 100644 index 000000000..2de3ecc0f --- /dev/null +++ b/tests/restore_test.py @@ -0,0 +1,3822 @@ +import os +import unittest +from .helpers.ptrack_helpers import ProbackupTest, ProbackupException +import subprocess +import sys +from time import sleep +from datetime import datetime, timedelta, timezone +import hashlib +import shutil +import json +from shutil import copyfile +from testgres import QueryException, StartNodeException +from stat import S_ISDIR + + +class RestoreTest(ProbackupTest, unittest.TestCase): + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_restore_full_to_latest(self): + """recovery to latest from full backup""" + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.pgbench_init(scale=2) + pgbench = node.pgbench( + stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + pgbench.wait() + pgbench.stdout.close() + before = node.execute("postgres", "SELECT * FROM pgbench_branches") + backup_id = self.backup_node(backup_dir, 'node', node) + + node.stop() + node.cleanup() + + # 1 - Test recovery from latest + self.assertIn( + "INFO: Restore of backup {0} completed.".format(backup_id), + self.restore_node( + backup_dir, 'node', node, options=["-j", "4"]), + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd)) + + # 2 - Test that recovery.conf was created + # TODO update test + if self.get_version(node) >= self.version_to_num('12.0'): + recovery_conf = os.path.join(node.data_dir, 'postgresql.auto.conf') + with open(recovery_conf, 'r') as f: + print(f.read()) + else: + recovery_conf = os.path.join(node.data_dir, 'recovery.conf') + self.assertEqual(os.path.isfile(recovery_conf), True) + + node.slow_start() + + after = node.execute("postgres", "SELECT * FROM pgbench_branches") + self.assertEqual(before, after) + + # @unittest.skip("skip") + def test_restore_full_page_to_latest(self): + """recovery to latest from full + page backups""" + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.pgbench_init(scale=2) + + self.backup_node(backup_dir, 'node', node) + + pgbench = node.pgbench( + stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + pgbench.wait() + pgbench.stdout.close() + + backup_id = self.backup_node( + backup_dir, 'node', node, backup_type="page") + + before = node.execute("postgres", "SELECT * FROM pgbench_branches") + + node.stop() + node.cleanup() + + self.assertIn( + "INFO: Restore of backup {0} completed.".format(backup_id), + self.restore_node( + backup_dir, 'node', node, options=["-j", "4"]), + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd)) + + node.slow_start() + + after = node.execute("postgres", "SELECT * FROM pgbench_branches") + self.assertEqual(before, after) + + # @unittest.skip("skip") + def test_restore_to_specific_timeline(self): + """recovery to target timeline""" + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.pgbench_init(scale=2) + + before = node.execute("postgres", "SELECT * FROM pgbench_branches") + + backup_id = self.backup_node(backup_dir, 'node', node) + + target_tli = int( + node.get_control_data()["Latest checkpoint's TimeLineID"]) + node.stop() + node.cleanup() + + self.assertIn( + "INFO: Restore of backup {0} completed.".format(backup_id), + self.restore_node( + backup_dir, 'node', node, options=["-j", "4"]), + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd)) + + node.slow_start() + pgbench = node.pgbench( + stdout=subprocess.PIPE, stderr=subprocess.STDOUT, + options=['-T', '10', '-c', '2', '--no-vacuum']) + pgbench.wait() + pgbench.stdout.close() + + self.backup_node(backup_dir, 'node', node) + + node.stop() + node.cleanup() + + # Correct Backup must be choosen for restore + self.assertIn( + "INFO: Restore of backup {0} completed.".format(backup_id), + self.restore_node( + backup_dir, 'node', node, + options=[ + "-j", "4", "--timeline={0}".format(target_tli)] + ), + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd)) + + recovery_target_timeline = self.get_recovery_conf( + node)["recovery_target_timeline"] + self.assertEqual(int(recovery_target_timeline), target_tli) + + node.slow_start() + after = node.execute("postgres", "SELECT * FROM pgbench_branches") + self.assertEqual(before, after) + + # @unittest.skip("skip") + def test_restore_to_time(self): + """recovery to target time""" + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums'], + pg_options={'TimeZone': 'GMT'}) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.pgbench_init(scale=2) + before = node.execute("postgres", "SELECT * FROM pgbench_branches") + + backup_id = self.backup_node(backup_dir, 'node', node) + + target_time = node.execute( + "postgres", "SELECT to_char(now(), 'YYYY-MM-DD HH24:MI:SS+00')" + )[0][0] + pgbench = node.pgbench( + stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + pgbench.wait() + pgbench.stdout.close() + + node.stop() + node.cleanup() + + self.assertIn( + "INFO: Restore of backup {0} completed.".format(backup_id), + self.restore_node( + backup_dir, 'node', node, + options=[ + "-j", "4", '--time={0}'.format(target_time), + "--recovery-target-action=promote" + ] + ), + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd)) + + node.slow_start() + after = node.execute("postgres", "SELECT * FROM pgbench_branches") + self.assertEqual(before, after) + + # @unittest.skip("skip") + def test_restore_to_xid_inclusive(self): + """recovery to target xid""" + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.pgbench_init(scale=2) + with node.connect("postgres") as con: + con.execute("CREATE TABLE tbl0005 (a text)") + con.commit() + + backup_id = self.backup_node(backup_dir, 'node', node) + + pgbench = node.pgbench( + stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + pgbench.wait() + pgbench.stdout.close() + + before = node.safe_psql("postgres", "SELECT * FROM pgbench_branches") + with node.connect("postgres") as con: + res = con.execute("INSERT INTO tbl0005 VALUES ('inserted') RETURNING (xmin)") + con.commit() + target_xid = res[0][0] + + pgbench = node.pgbench( + stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + pgbench.wait() + pgbench.stdout.close() + + node.stop() + node.cleanup() + + self.assertIn( + "INFO: Restore of backup {0} completed.".format(backup_id), + self.restore_node( + backup_dir, 'node', node, + options=[ + "-j", "4", '--xid={0}'.format(target_xid), + "--recovery-target-action=promote"] + ), + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd)) + + node.slow_start() + after = node.safe_psql("postgres", "SELECT * FROM pgbench_branches") + self.assertEqual(before, after) + self.assertEqual( + len(node.execute("postgres", "SELECT * FROM tbl0005")), 1) + + # @unittest.skip("skip") + def test_restore_to_xid_not_inclusive(self): + """recovery with target inclusive false""" + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.pgbench_init(scale=2) + with node.connect("postgres") as con: + con.execute("CREATE TABLE tbl0005 (a text)") + con.commit() + + backup_id = self.backup_node(backup_dir, 'node', node) + + pgbench = node.pgbench( + stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + pgbench.wait() + pgbench.stdout.close() + + before = node.execute("postgres", "SELECT * FROM pgbench_branches") + with node.connect("postgres") as con: + result = con.execute("INSERT INTO tbl0005 VALUES ('inserted') RETURNING (xmin)") + con.commit() + target_xid = result[0][0] + + pgbench = node.pgbench( + stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + pgbench.wait() + pgbench.stdout.close() + + node.stop() + node.cleanup() + + self.assertIn( + "INFO: Restore of backup {0} completed.".format(backup_id), + self.restore_node( + backup_dir, 'node', node, + options=[ + "-j", "4", + '--xid={0}'.format(target_xid), + "--inclusive=false", + "--recovery-target-action=promote"]), + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd)) + + node.slow_start() + after = node.execute("postgres", "SELECT * FROM pgbench_branches") + self.assertEqual(before, after) + self.assertEqual( + len(node.execute("postgres", "SELECT * FROM tbl0005")), 0) + + # @unittest.skip("skip") + def test_restore_to_lsn_inclusive(self): + """recovery to target lsn""" + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + if self.get_version(node) < self.version_to_num('10.0'): + return + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.pgbench_init(scale=2) + with node.connect("postgres") as con: + con.execute("CREATE TABLE tbl0005 (a int)") + con.commit() + + backup_id = self.backup_node(backup_dir, 'node', node) + + pgbench = node.pgbench( + stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + pgbench.wait() + pgbench.stdout.close() + + before = node.safe_psql("postgres", "SELECT * FROM pgbench_branches") + with node.connect("postgres") as con: + con.execute("INSERT INTO tbl0005 VALUES (1)") + con.commit() + res = con.execute("SELECT pg_current_wal_lsn()") + con.commit() + con.execute("INSERT INTO tbl0005 VALUES (2)") + con.commit() + xlogid, xrecoff = res[0][0].split('/') + xrecoff = hex(int(xrecoff, 16) + 1)[2:] + target_lsn = "{0}/{1}".format(xlogid, xrecoff) + + pgbench = node.pgbench( + stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + pgbench.wait() + pgbench.stdout.close() + + node.stop() + node.cleanup() + + self.assertIn( + "INFO: Restore of backup {0} completed.".format(backup_id), + self.restore_node( + backup_dir, 'node', node, + options=[ + "-j", "4", '--lsn={0}'.format(target_lsn), + "--recovery-target-action=promote"] + ), + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd)) + + node.slow_start() + + after = node.safe_psql("postgres", "SELECT * FROM pgbench_branches") + self.assertEqual(before, after) + self.assertEqual( + len(node.execute("postgres", "SELECT * FROM tbl0005")), 2) + + # @unittest.skip("skip") + def test_restore_to_lsn_not_inclusive(self): + """recovery to target lsn""" + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + if self.get_version(node) < self.version_to_num('10.0'): + return + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.pgbench_init(scale=2) + with node.connect("postgres") as con: + con.execute("CREATE TABLE tbl0005 (a int)") + con.commit() + + backup_id = self.backup_node(backup_dir, 'node', node) + + pgbench = node.pgbench( + stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + pgbench.wait() + pgbench.stdout.close() + + before = node.safe_psql("postgres", "SELECT * FROM pgbench_branches") + with node.connect("postgres") as con: + con.execute("INSERT INTO tbl0005 VALUES (1)") + con.commit() + res = con.execute("SELECT pg_current_wal_lsn()") + con.commit() + con.execute("INSERT INTO tbl0005 VALUES (2)") + con.commit() + xlogid, xrecoff = res[0][0].split('/') + xrecoff = hex(int(xrecoff, 16) + 1)[2:] + target_lsn = "{0}/{1}".format(xlogid, xrecoff) + + pgbench = node.pgbench( + stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + pgbench.wait() + pgbench.stdout.close() + + node.stop() + node.cleanup() + + self.assertIn( + "INFO: Restore of backup {0} completed.".format(backup_id), + self.restore_node( + backup_dir, 'node', node, + options=[ + "--inclusive=false", + "-j", "4", '--lsn={0}'.format(target_lsn), + "--recovery-target-action=promote"] + ), + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd)) + + node.slow_start() + + after = node.safe_psql("postgres", "SELECT * FROM pgbench_branches") + self.assertEqual(before, after) + self.assertEqual( + len(node.execute("postgres", "SELECT * FROM tbl0005")), 1) + + # @unittest.skip("skip") + def test_restore_full_ptrack_archive(self): + """recovery to latest from archive full+ptrack backups""" + if not self.ptrack: + self.skipTest('Skipped because ptrack support is disabled') + + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums'], + ptrack_enable=True) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") + + node.pgbench_init(scale=2) + + self.backup_node(backup_dir, 'node', node) + + pgbench = node.pgbench( + stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + pgbench.wait() + pgbench.stdout.close() + + backup_id = self.backup_node( + backup_dir, 'node', node, backup_type="ptrack") + + before = node.execute("postgres", "SELECT * FROM pgbench_branches") + + node.stop() + node.cleanup() + + self.assertIn( + "INFO: Restore of backup {0} completed.".format(backup_id), + self.restore_node( + backup_dir, 'node', node, + options=["-j", "4"]), + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd)) + + node.slow_start() + after = node.execute("postgres", "SELECT * FROM pgbench_branches") + self.assertEqual(before, after) + + # @unittest.skip("skip") + def test_restore_ptrack(self): + """recovery to latest from archive full+ptrack+ptrack backups""" + if not self.ptrack: + self.skipTest('Skipped because ptrack support is disabled') + + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums'], + ptrack_enable=True) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") + + node.pgbench_init(scale=2) + + self.backup_node(backup_dir, 'node', node) + + pgbench = node.pgbench( + stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + pgbench.wait() + pgbench.stdout.close() + + self.backup_node(backup_dir, 'node', node, backup_type="ptrack") + + pgbench = node.pgbench( + stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + pgbench.wait() + pgbench.stdout.close() + + backup_id = self.backup_node( + backup_dir, 'node', node, backup_type="ptrack") + + before = node.execute("postgres", "SELECT * FROM pgbench_branches") + + node.stop() + node.cleanup() + + self.assertIn( + "INFO: Restore of backup {0} completed.".format(backup_id), + self.restore_node( + backup_dir, 'node', node, + options=["-j", "4"]), + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd)) + + node.slow_start() + after = node.execute("postgres", "SELECT * FROM pgbench_branches") + self.assertEqual(before, after) + + # @unittest.skip("skip") + def test_restore_full_ptrack_stream(self): + """recovery in stream mode to latest from full + ptrack backups""" + if not self.ptrack: + self.skipTest('Skipped because ptrack support is disabled') + + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + ptrack_enable=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") + + node.pgbench_init(scale=2) + + self.backup_node(backup_dir, 'node', node, options=["--stream"]) + + pgbench = node.pgbench( + stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + pgbench.wait() + pgbench.stdout.close() + + backup_id = self.backup_node( + backup_dir, 'node', node, + backup_type="ptrack", options=["--stream"]) + + before = node.execute("postgres", "SELECT * FROM pgbench_branches") + + node.stop() + node.cleanup() + + self.assertIn( + "INFO: Restore of backup {0} completed.".format(backup_id), + self.restore_node( + backup_dir, 'node', node, options=["-j", "4"]), + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd)) + + node.slow_start() + after = node.execute("postgres", "SELECT * FROM pgbench_branches") + self.assertEqual(before, after) + + # @unittest.skip("skip") + def test_restore_full_ptrack_under_load(self): + """ + recovery to latest from full + ptrack backups + with loads when ptrack backup do + """ + if not self.ptrack: + self.skipTest('Skipped because ptrack support is disabled') + + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + ptrack_enable=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") + + node.pgbench_init(scale=2) + + self.backup_node(backup_dir, 'node', node) + + pgbench = node.pgbench( + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + options=["-c", "4", "-T", "8"] + ) + + backup_id = self.backup_node( + backup_dir, 'node', node, + backup_type="ptrack", options=["--stream"]) + + pgbench.wait() + pgbench.stdout.close() + + bbalance = node.execute( + "postgres", "SELECT sum(bbalance) FROM pgbench_branches") + delta = node.execute( + "postgres", "SELECT sum(delta) FROM pgbench_history") + + self.assertEqual(bbalance, delta) + node.stop() + node.cleanup() + + self.assertIn( + "INFO: Restore of backup {0} completed.".format(backup_id), + self.restore_node( + backup_dir, 'node', node, options=["-j", "4"]), + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd)) + + node.slow_start() + bbalance = node.execute( + "postgres", "SELECT sum(bbalance) FROM pgbench_branches") + delta = node.execute( + "postgres", "SELECT sum(delta) FROM pgbench_history") + self.assertEqual(bbalance, delta) + + # @unittest.skip("skip") + def test_restore_full_under_load_ptrack(self): + """ + recovery to latest from full + page backups + with loads when full backup do + """ + if not self.ptrack: + self.skipTest('Skipped because ptrack support is disabled') + + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + ptrack_enable=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") + + # wal_segment_size = self.guc_wal_segment_size(node) + node.pgbench_init(scale=2) + + pgbench = node.pgbench( + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + options=["-c", "4", "-T", "8"] + ) + + self.backup_node(backup_dir, 'node', node) + + pgbench.wait() + pgbench.stdout.close() + + backup_id = self.backup_node( + backup_dir, 'node', node, + backup_type="ptrack", options=["--stream"]) + + bbalance = node.execute( + "postgres", "SELECT sum(bbalance) FROM pgbench_branches") + delta = node.execute( + "postgres", "SELECT sum(delta) FROM pgbench_history") + + self.assertEqual(bbalance, delta) + + node.stop() + node.cleanup() + # self.wrong_wal_clean(node, wal_segment_size) + + self.assertIn( + "INFO: Restore of backup {0} completed.".format(backup_id), + self.restore_node( + backup_dir, 'node', node, options=["-j", "4"]), + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd)) + node.slow_start() + bbalance = node.execute( + "postgres", "SELECT sum(bbalance) FROM pgbench_branches") + delta = node.execute( + "postgres", "SELECT sum(delta) FROM pgbench_history") + self.assertEqual(bbalance, delta) + + # @unittest.skip("skip") + def test_restore_with_tablespace_mapping_1(self): + """recovery using tablespace-mapping option""" + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # Create tablespace + tblspc_path = os.path.join(node.base_dir, "tblspc") + os.makedirs(tblspc_path) + with node.connect("postgres") as con: + con.connection.autocommit = True + con.execute("CREATE TABLESPACE tblspc LOCATION '%s'" % tblspc_path) + con.connection.autocommit = False + con.execute("CREATE TABLE test (id int) TABLESPACE tblspc") + con.execute("INSERT INTO test VALUES (1)") + con.commit() + + backup_id = self.backup_node(backup_dir, 'node', node) + self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK") + + # 1 - Try to restore to existing directory + node.stop() + try: + self.restore_node(backup_dir, 'node', node) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because restore destination is not empty.\n " + "Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: Restore destination is not empty:', + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + # 2 - Try to restore to existing tablespace directory + tblspc_path_tmp = os.path.join(node.base_dir, "tblspc_tmp") + os.rename(tblspc_path, tblspc_path_tmp) + node.cleanup() + os.rename(tblspc_path_tmp, tblspc_path) + try: + self.restore_node(backup_dir, 'node', node) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because restore tablespace destination is " + "not empty.\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: Restore tablespace destination is not empty:', + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + # 3 - Restore using tablespace-mapping to not empty directory + tblspc_path_temp = os.path.join(node.base_dir, "tblspc_temp") + os.mkdir(tblspc_path_temp) + with open(os.path.join(tblspc_path_temp, 'file'), 'w+') as f: + f.close() + + try: + self.restore_node( + backup_dir, 'node', node, + options=["-T", "%s=%s" % (tblspc_path, tblspc_path_temp)]) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because restore tablespace destination is " + "not empty.\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: Restore tablespace destination is not empty:', + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + # 4 - Restore using tablespace-mapping + tblspc_path_new = os.path.join(node.base_dir, "tblspc_new") + self.assertIn( + "INFO: Restore of backup {0} completed.".format(backup_id), + self.restore_node( + backup_dir, 'node', node, + options=[ + "-T", "%s=%s" % (tblspc_path, tblspc_path_new)] + ), + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd)) + + node.slow_start() + + result = node.execute("postgres", "SELECT id FROM test") + self.assertEqual(result[0][0], 1) + + # 4 - Restore using tablespace-mapping using page backup + self.backup_node(backup_dir, 'node', node) + with node.connect("postgres") as con: + con.execute("INSERT INTO test VALUES (2)") + con.commit() + backup_id = self.backup_node( + backup_dir, 'node', node, backup_type="page") + + show_pb = self.show_pb(backup_dir, 'node') + self.assertEqual(show_pb[1]['status'], "OK") + self.assertEqual(show_pb[2]['status'], "OK") + + node.stop() + node.cleanup() + tblspc_path_page = os.path.join(node.base_dir, "tblspc_page") + + self.assertIn( + "INFO: Restore of backup {0} completed.".format(backup_id), + self.restore_node( + backup_dir, 'node', node, + options=[ + "-T", "%s=%s" % (tblspc_path_new, tblspc_path_page)]), + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd)) + + node.slow_start() + result = node.execute("postgres", "SELECT id FROM test OFFSET 1") + self.assertEqual(result[0][0], 2) + + # @unittest.skip("skip") + def test_restore_with_tablespace_mapping_2(self): + """recovery using tablespace-mapping option and page backup""" + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # Full backup + self.backup_node(backup_dir, 'node', node) + self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK") + + # Create tablespace + tblspc_path = os.path.join(node.base_dir, "tblspc") + os.makedirs(tblspc_path) + with node.connect("postgres") as con: + con.connection.autocommit = True + con.execute("CREATE TABLESPACE tblspc LOCATION '%s'" % tblspc_path) + con.connection.autocommit = False + con.execute( + "CREATE TABLE tbl AS SELECT * " + "FROM generate_series(0,3) AS integer") + con.commit() + + # First page backup + self.backup_node(backup_dir, 'node', node, backup_type="page") + self.assertEqual(self.show_pb(backup_dir, 'node')[1]['status'], "OK") + self.assertEqual( + self.show_pb(backup_dir, 'node')[1]['backup-mode'], "PAGE") + + # Create tablespace table + with node.connect("postgres") as con: +# con.connection.autocommit = True +# con.execute("CHECKPOINT") +# con.connection.autocommit = False + con.execute("CREATE TABLE tbl1 (a int) TABLESPACE tblspc") + con.execute( + "INSERT INTO tbl1 SELECT * " + "FROM generate_series(0,3) AS integer") + con.commit() + + # Second page backup + backup_id = self.backup_node( + backup_dir, 'node', node, backup_type="page") + self.assertEqual(self.show_pb(backup_dir, 'node')[2]['status'], "OK") + self.assertEqual( + self.show_pb(backup_dir, 'node')[2]['backup-mode'], "PAGE") + + node.stop() + node.cleanup() + + tblspc_path_new = os.path.join(node.base_dir, "tblspc_new") + + self.assertIn( + "INFO: Restore of backup {0} completed.".format(backup_id), + self.restore_node( + backup_dir, 'node', node, + options=[ + "-T", "%s=%s" % (tblspc_path, tblspc_path_new)]), + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd)) + node.slow_start() + + count = node.execute("postgres", "SELECT count(*) FROM tbl") + self.assertEqual(count[0][0], 4) + count = node.execute("postgres", "SELECT count(*) FROM tbl1") + self.assertEqual(count[0][0], 4) + + # @unittest.skip("skip") + def test_restore_with_missing_or_corrupted_tablespace_map(self): + """restore backup with missing or corrupted tablespace_map""" + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # Create tablespace + self.create_tblspace_in_node(node, 'tblspace') + node.pgbench_init(scale=1, tablespace='tblspace') + + # Full backup + self.backup_node(backup_dir, 'node', node) + + # Change some data + pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) + pgbench.wait() + + # Page backup + page_id = self.backup_node(backup_dir, 'node', node, backup_type="page") + + pgdata = self.pgdata_content(node.data_dir) + + node2 = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node2')) + node2.cleanup() + + olddir = self.get_tblspace_path(node, 'tblspace') + newdir = self.get_tblspace_path(node2, 'tblspace') + + # drop tablespace_map + tablespace_map = os.path.join( + backup_dir, 'backups', 'node', + page_id, 'database', 'tablespace_map') + + tablespace_map_tmp = os.path.join( + backup_dir, 'backups', 'node', + page_id, 'database', 'tablespace_map_tmp') + + os.rename(tablespace_map, tablespace_map_tmp) + + try: + self.restore_node( + backup_dir, 'node', node2, + options=["-T", "{0}={1}".format(olddir, newdir)]) + self.assertEqual( + 1, 0, + "Expecting Error because tablespace_map is missing.\n " + "Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: Tablespace map is missing: "{0}", ' + 'probably backup {1} is corrupt, validate it'.format( + tablespace_map, page_id), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + try: + self.restore_node(backup_dir, 'node', node2) + self.assertEqual( + 1, 0, + "Expecting Error because tablespace_map is missing.\n " + "Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: Tablespace map is missing: "{0}", ' + 'probably backup {1} is corrupt, validate it'.format( + tablespace_map, page_id), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + copyfile(tablespace_map_tmp, tablespace_map) + + with open(tablespace_map, "a") as f: + f.write("HELLO\n") + + try: + self.restore_node( + backup_dir, 'node', node2, + options=["-T", "{0}={1}".format(olddir, newdir)]) + self.assertEqual( + 1, 0, + "Expecting Error because tablespace_map is corupted.\n " + "Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: Invalid CRC of tablespace map file "{0}"'.format(tablespace_map), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + try: + self.restore_node(backup_dir, 'node', node2) + self.assertEqual( + 1, 0, + "Expecting Error because tablespace_map is corupted.\n " + "Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: Invalid CRC of tablespace map file "{0}"'.format(tablespace_map), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + # rename it back + os.rename(tablespace_map_tmp, tablespace_map) + + print(self.restore_node( + backup_dir, 'node', node2, + options=["-T", "{0}={1}".format(olddir, newdir)])) + + pgdata_restored = self.pgdata_content(node2.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + def test_archive_node_backup_stream_restore_to_recovery_time(self): + """ + make node with archiving, make stream backup, + make PITR to Recovery Time + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + backup_id = self.backup_node( + backup_dir, 'node', node, options=["--stream"]) + node.safe_psql("postgres", "create table t_heap(a int)") + + node.stop() + node.cleanup() + + recovery_time = self.show_pb( + backup_dir, 'node', backup_id)['recovery-time'] + + self.assertIn( + "INFO: Restore of backup {0} completed.".format(backup_id), + self.restore_node( + backup_dir, 'node', node, + options=[ + "-j", "4", '--time={0}'.format(recovery_time), + "--recovery-target-action=promote" + ] + ), + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd)) + + node.slow_start() + + result = node.psql("postgres", 'select * from t_heap') + self.assertTrue('does not exist' in result[2].decode("utf-8")) + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_archive_node_backup_stream_restore_to_recovery_time(self): + """ + make node with archiving, make stream backup, + make PITR to Recovery Time + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + backup_id = self.backup_node( + backup_dir, 'node', node, options=["--stream"]) + node.safe_psql("postgres", "create table t_heap(a int)") + node.stop() + node.cleanup() + + recovery_time = self.show_pb( + backup_dir, 'node', backup_id)['recovery-time'] + + self.assertIn( + "INFO: Restore of backup {0} completed.".format(backup_id), + self.restore_node( + backup_dir, 'node', node, + options=[ + "-j", "4", '--time={0}'.format(recovery_time), + "--recovery-target-action=promote" + ] + ), + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd)) + + node.slow_start() + result = node.psql("postgres", 'select * from t_heap') + self.assertTrue('does not exist' in result[2].decode("utf-8")) + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_archive_node_backup_stream_pitr(self): + """ + make node with archiving, make stream backup, + create table t_heap, make pitr to Recovery Time, + check that t_heap do not exists + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + backup_id = self.backup_node( + backup_dir, 'node', node, options=["--stream"]) + node.safe_psql("postgres", "create table t_heap(a int)") + node.cleanup() + + recovery_time = self.show_pb( + backup_dir, 'node', backup_id)['recovery-time'] + + self.assertIn( + "INFO: Restore of backup {0} completed.".format(backup_id), + self.restore_node( + backup_dir, 'node', node, + options=[ + "-j", "4", '--time={0}'.format(recovery_time), + "--recovery-target-action=promote" + ] + ), + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd)) + + node.slow_start() + + result = node.psql("postgres", 'select * from t_heap') + self.assertEqual(True, 'does not exist' in result[2].decode("utf-8")) + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_archive_node_backup_archive_pitr_2(self): + """ + make node with archiving, make archive backup, + create table t_heap, make pitr to Recovery Time, + check that t_heap do not exists + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + backup_id = self.backup_node(backup_dir, 'node', node) + if self.paranoia: + pgdata = self.pgdata_content(node.data_dir) + + node.safe_psql("postgres", "create table t_heap(a int)") + node.stop() + + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored.cleanup() + + recovery_time = self.show_pb( + backup_dir, 'node', backup_id)['recovery-time'] + + self.assertIn( + "INFO: Restore of backup {0} completed.".format(backup_id), + self.restore_node( + backup_dir, 'node', node_restored, + options=[ + "-j", "4", '--time={0}'.format(recovery_time), + "--recovery-target-action=promote"] + ), + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd)) + + if self.paranoia: + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + self.set_auto_conf(node_restored, {'port': node_restored.port}) + + node_restored.slow_start() + + result = node_restored.psql("postgres", 'select * from t_heap') + self.assertTrue('does not exist' in result[2].decode("utf-8")) + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_archive_restore_to_restore_point(self): + """ + make node with archiving, make archive backup, + create table t_heap, make pitr to Recovery Time, + check that t_heap do not exists + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + self.backup_node(backup_dir, 'node', node) + + node.safe_psql( + "postgres", + "create table t_heap as select generate_series(0,10000)") + result = node.safe_psql( + "postgres", + "select * from t_heap") + node.safe_psql( + "postgres", "select pg_create_restore_point('savepoint')") + node.safe_psql( + "postgres", + "create table t_heap_1 as select generate_series(0,10000)") + node.cleanup() + + self.restore_node( + backup_dir, 'node', node, + options=[ + "--recovery-target-name=savepoint", + "--recovery-target-action=promote"]) + + node.slow_start() + + result_new = node.safe_psql("postgres", "select * from t_heap") + res = node.psql("postgres", "select * from t_heap_1") + self.assertEqual( + res[0], 1, + "Table t_heap_1 should not exist in restored instance") + + self.assertEqual(result, result_new) + + @unittest.skip("skip") + # @unittest.expectedFailure + def test_zags_block_corrupt(self): + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + self.backup_node(backup_dir, 'node', node) + + conn = node.connect() + with node.connect("postgres") as conn: + + conn.execute( + "create table tbl(i int)") + conn.commit() + conn.execute( + "create index idx ON tbl (i)") + conn.commit() + conn.execute( + "insert into tbl select i from generate_series(0,400) as i") + conn.commit() + conn.execute( + "select pg_relation_size('idx')") + conn.commit() + conn.execute( + "delete from tbl where i < 100") + conn.commit() + conn.execute( + "explain analyze select i from tbl order by i") + conn.commit() + conn.execute( + "select i from tbl order by i") + conn.commit() + conn.execute( + "create extension pageinspect") + conn.commit() + print(conn.execute( + "select * from bt_page_stats('idx',1)")) + conn.commit() + conn.execute( + "insert into tbl select i from generate_series(0,100) as i") + conn.commit() + conn.execute( + "insert into tbl select i from generate_series(0,100) as i") + conn.commit() + conn.execute( + "insert into tbl select i from generate_series(0,100) as i") + conn.commit() + conn.execute( + "insert into tbl select i from generate_series(0,100) as i") + + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored'), + initdb_params=['--data-checksums']) + + node_restored.cleanup() + + self.restore_node( + backup_dir, 'node', node_restored) + + self.set_auto_conf( + node_restored, + {'archive_mode': 'off', 'hot_standby': 'on', 'port': node_restored.port}) + + node_restored.slow_start() + + @unittest.skip("skip") + # @unittest.expectedFailure + def test_zags_block_corrupt_1(self): + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums'], + pg_options={ + 'full_page_writes': 'on'} + ) + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + self.backup_node(backup_dir, 'node', node) + + node.safe_psql('postgres', 'create table tbl(i int)') + + node.safe_psql('postgres', 'create index idx ON tbl (i)') + + node.safe_psql( + 'postgres', + 'insert into tbl select i from generate_series(0,100000) as i') + + node.safe_psql( + 'postgres', + 'delete from tbl where i%2 = 0') + + node.safe_psql( + 'postgres', + 'explain analyze select i from tbl order by i') + + node.safe_psql( + 'postgres', + 'select i from tbl order by i') + + node.safe_psql( + 'postgres', + 'create extension pageinspect') + + node.safe_psql( + 'postgres', + 'insert into tbl select i from generate_series(0,100) as i') + + node.safe_psql( + 'postgres', + 'insert into tbl select i from generate_series(0,100) as i') + + node.safe_psql( + 'postgres', + 'insert into tbl select i from generate_series(0,100) as i') + + node.safe_psql( + 'postgres', + 'insert into tbl select i from generate_series(0,100) as i') + + self.switch_wal_segment(node) + + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored'), + initdb_params=['--data-checksums']) + + pgdata = self.pgdata_content(node.data_dir) + + node_restored.cleanup() + + self.restore_node( + backup_dir, 'node', node_restored) + + self.set_auto_conf( + node_restored, + {'archive_mode': 'off', 'hot_standby': 'on', 'port': node_restored.port}) + + node_restored.slow_start() + + while True: + with open(node_restored.pg_log_file, 'r') as f: + if 'selected new timeline ID' in f.read(): + break + + # with open(node_restored.pg_log_file, 'r') as f: + # print(f.read()) + + pgdata_restored = self.pgdata_content(node_restored.data_dir) + + self.compare_pgdata(pgdata, pgdata_restored) + +# pg_xlogdump_path = self.get_bin_path('pg_xlogdump') + +# pg_xlogdump = self.run_binary( +# [ +# pg_xlogdump_path, '-b', +# os.path.join(backup_dir, 'wal', 'node', '000000010000000000000003'), +# ' | ', 'grep', 'Btree', '' +# ], async=False) + + if pg_xlogdump.returncode: + self.assertFalse( + True, + 'Failed to start pg_wal_dump: {0}'.format( + pg_receivexlog.communicate()[1])) + + # @unittest.skip("skip") + def test_restore_chain(self): + """ + make node, take full backup, take several + ERROR delta backups, take valid delta backup, + restore must be successfull + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # Take FULL + self.backup_node( + backup_dir, 'node', node) + + # Take DELTA + self.backup_node( + backup_dir, 'node', node, backup_type='delta') + + # Take ERROR DELTA + try: + self.backup_node( + backup_dir, 'node', node, + backup_type='delta', options=['-U', 'wrong_name']) + except ProbackupException as e: + pass + + # Take ERROR DELTA + try: + self.backup_node( + backup_dir, 'node', node, + backup_type='delta', options=['-U', 'wrong_name']) + except ProbackupException as e: + pass + + # Take DELTA + self.backup_node( + backup_dir, 'node', node, backup_type='delta') + + # Take ERROR DELTA + try: + self.backup_node( + backup_dir, 'node', node, + backup_type='delta', options=['-U', 'wrong_name']) + except ProbackupException as e: + pass + + self.assertEqual( + 'OK', + self.show_pb(backup_dir, 'node')[0]['status'], + 'Backup STATUS should be "OK"') + + self.assertEqual( + 'OK', + self.show_pb(backup_dir, 'node')[1]['status'], + 'Backup STATUS should be "OK"') + + self.assertEqual( + 'ERROR', + self.show_pb(backup_dir, 'node')[2]['status'], + 'Backup STATUS should be "ERROR"') + + self.assertEqual( + 'ERROR', + self.show_pb(backup_dir, 'node')[3]['status'], + 'Backup STATUS should be "ERROR"') + + self.assertEqual( + 'OK', + self.show_pb(backup_dir, 'node')[4]['status'], + 'Backup STATUS should be "OK"') + + self.assertEqual( + 'ERROR', + self.show_pb(backup_dir, 'node')[5]['status'], + 'Backup STATUS should be "ERROR"') + + node.cleanup() + + self.restore_node(backup_dir, 'node', node) + + # @unittest.skip("skip") + def test_restore_chain_with_corrupted_backup(self): + """more complex test_restore_chain()""" + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # Take FULL + self.backup_node( + backup_dir, 'node', node) + + # Take DELTA + self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # Take ERROR DELTA + try: + self.backup_node( + backup_dir, 'node', node, + backup_type='page', options=['-U', 'wrong_name']) + except ProbackupException as e: + pass + + # Take 1 DELTA + self.backup_node( + backup_dir, 'node', node, backup_type='delta') + + # Take ERROR DELTA + try: + self.backup_node( + backup_dir, 'node', node, + backup_type='delta', options=['-U', 'wrong_name']) + except ProbackupException as e: + pass + + # Take 2 DELTA + self.backup_node( + backup_dir, 'node', node, backup_type='delta') + + # Take ERROR DELTA + try: + self.backup_node( + backup_dir, 'node', node, + backup_type='delta', options=['-U', 'wrong_name']) + except ProbackupException as e: + pass + + # Take 3 DELTA + self.backup_node( + backup_dir, 'node', node, backup_type='delta') + + # Corrupted 4 DELTA + corrupt_id = self.backup_node( + backup_dir, 'node', node, backup_type='delta') + + # ORPHAN 5 DELTA + restore_target_id = self.backup_node( + backup_dir, 'node', node, backup_type='delta') + + # ORPHAN 6 DELTA + self.backup_node( + backup_dir, 'node', node, backup_type='delta') + + # NEXT FULL BACKUP + self.backup_node( + backup_dir, 'node', node, backup_type='full') + + # Next Delta + self.backup_node( + backup_dir, 'node', node, backup_type='delta') + + # do corrupt 6 DELTA backup + file = os.path.join( + backup_dir, 'backups', 'node', + corrupt_id, 'database', 'global', 'pg_control') + + file_new = os.path.join(backup_dir, 'pg_control') + os.rename(file, file_new) + + # RESTORE BACKUP + node.cleanup() + + try: + self.restore_node( + backup_dir, 'node', node, backup_id=restore_target_id) + self.assertEqual( + 1, 0, + "Expecting Error because restore backup is corrupted.\n " + "Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: Backup {0} is orphan'.format(restore_target_id), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.assertEqual( + 'OK', + self.show_pb(backup_dir, 'node')[0]['status'], + 'Backup STATUS should be "OK"') + + self.assertEqual( + 'OK', + self.show_pb(backup_dir, 'node')[1]['status'], + 'Backup STATUS should be "OK"') + + self.assertEqual( + 'ERROR', + self.show_pb(backup_dir, 'node')[2]['status'], + 'Backup STATUS should be "ERROR"') + + self.assertEqual( + 'OK', + self.show_pb(backup_dir, 'node')[3]['status'], + 'Backup STATUS should be "OK"') + + self.assertEqual( + 'ERROR', + self.show_pb(backup_dir, 'node')[4]['status'], + 'Backup STATUS should be "ERROR"') + + self.assertEqual( + 'OK', + self.show_pb(backup_dir, 'node')[5]['status'], + 'Backup STATUS should be "OK"') + + self.assertEqual( + 'ERROR', + self.show_pb(backup_dir, 'node')[6]['status'], + 'Backup STATUS should be "ERROR"') + + self.assertEqual( + 'OK', + self.show_pb(backup_dir, 'node')[7]['status'], + 'Backup STATUS should be "OK"') + + # corruption victim + self.assertEqual( + 'CORRUPT', + self.show_pb(backup_dir, 'node')[8]['status'], + 'Backup STATUS should be "CORRUPT"') + + # orphaned child + self.assertEqual( + 'ORPHAN', + self.show_pb(backup_dir, 'node')[9]['status'], + 'Backup STATUS should be "ORPHAN"') + + # orphaned child + self.assertEqual( + 'ORPHAN', + self.show_pb(backup_dir, 'node')[10]['status'], + 'Backup STATUS should be "ORPHAN"') + + # next FULL + self.assertEqual( + 'OK', + self.show_pb(backup_dir, 'node')[11]['status'], + 'Backup STATUS should be "OK"') + + # next DELTA + self.assertEqual( + 'OK', + self.show_pb(backup_dir, 'node')[12]['status'], + 'Backup STATUS should be "OK"') + + node.cleanup() + + # Skipped, because backups from the future are invalid. + # This cause a "ERROR: Can't assign backup_id, there is already a backup in future" + # now (PBCKP-259). We can conduct such a test again when we + # untie 'backup_id' from 'start_time' + @unittest.skip("skip") + def test_restore_backup_from_future(self): + """more complex test_restore_chain()""" + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # Take FULL + self.backup_node(backup_dir, 'node', node) + + node.pgbench_init(scale=5) + # pgbench = node.pgbench(options=['-T', '20', '-c', '2']) + # pgbench.wait() + + # Take PAGE from future + backup_id = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + with open( + os.path.join( + backup_dir, 'backups', 'node', + backup_id, "backup.control"), "a") as conf: + conf.write("start-time='{:%Y-%m-%d %H:%M:%S}'\n".format( + datetime.now() + timedelta(days=3))) + + # rename directory + new_id = self.show_pb(backup_dir, 'node')[1]['id'] + + os.rename( + os.path.join(backup_dir, 'backups', 'node', backup_id), + os.path.join(backup_dir, 'backups', 'node', new_id)) + + pgbench = node.pgbench(options=['-T', '7', '-c', '1', '--no-vacuum']) + pgbench.wait() + + backup_id = self.backup_node(backup_dir, 'node', node, backup_type='page') + pgdata = self.pgdata_content(node.data_dir) + + node.cleanup() + self.restore_node(backup_dir, 'node', node, backup_id=backup_id) + + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + def test_restore_target_immediate_stream(self): + """ + correct handling of immediate recovery target + for STREAM backups + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + # Take FULL + self.backup_node( + backup_dir, 'node', node, options=['--stream']) + + # Take delta + backup_id = self.backup_node( + backup_dir, 'node', node, + backup_type='delta', options=['--stream']) + + pgdata = self.pgdata_content(node.data_dir) + + # TODO update test + if self.get_version(node) >= self.version_to_num('12.0'): + recovery_conf = os.path.join(node.data_dir, 'postgresql.auto.conf') + with open(recovery_conf, 'r') as f: + print(f.read()) + else: + recovery_conf = os.path.join(node.data_dir, 'recovery.conf') + + # restore delta backup + node.cleanup() + self.restore_node( + backup_dir, 'node', node, options=['--immediate']) + + self.assertTrue( + os.path.isfile(recovery_conf), + "File {0} do not exists".format(recovery_conf)) + + # restore delta backup + node.cleanup() + self.restore_node( + backup_dir, 'node', node, options=['--recovery-target=immediate']) + + self.assertTrue( + os.path.isfile(recovery_conf), + "File {0} do not exists".format(recovery_conf)) + + # @unittest.skip("skip") + def test_restore_target_immediate_archive(self): + """ + correct handling of immediate recovery target + for ARCHIVE backups + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # Take FULL + self.backup_node( + backup_dir, 'node', node) + + # Take delta + backup_id = self.backup_node( + backup_dir, 'node', node, + backup_type='delta') + + pgdata = self.pgdata_content(node.data_dir) + + # TODO update test + if self.get_version(node) >= self.version_to_num('12.0'): + recovery_conf = os.path.join(node.data_dir, 'postgresql.auto.conf') + with open(recovery_conf, 'r') as f: + print(f.read()) + else: + recovery_conf = os.path.join(node.data_dir, 'recovery.conf') + + # restore page backup + node.cleanup() + self.restore_node( + backup_dir, 'node', node, options=['--immediate']) + + # For archive backup with immediate recovery target + # recovery.conf is mandatory + with open(recovery_conf, 'r') as f: + self.assertIn("recovery_target = 'immediate'", f.read()) + + # restore page backup + node.cleanup() + self.restore_node( + backup_dir, 'node', node, options=['--recovery-target=immediate']) + + # For archive backup with immediate recovery target + # recovery.conf is mandatory + with open(recovery_conf, 'r') as f: + self.assertIn("recovery_target = 'immediate'", f.read()) + + # @unittest.skip("skip") + def test_restore_target_latest_archive(self): + """ + make sure that recovery_target 'latest' + is default recovery target + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # Take FULL + self.backup_node(backup_dir, 'node', node) + + if self.get_version(node) >= self.version_to_num('12.0'): + recovery_conf = os.path.join(node.data_dir, 'postgresql.auto.conf') + else: + recovery_conf = os.path.join(node.data_dir, 'recovery.conf') + + # restore + node.cleanup() + self.restore_node(backup_dir, 'node', node) + + # hash_1 = hashlib.md5( + # open(recovery_conf, 'rb').read()).hexdigest() + + with open(recovery_conf, 'r') as f: + content_1 = '' + while True: + line = f.readline() + + if not line: + break + if line.startswith("#"): + continue + content_1 += line + + node.cleanup() + self.restore_node(backup_dir, 'node', node, options=['--recovery-target=latest']) + + # hash_2 = hashlib.md5( + # open(recovery_conf, 'rb').read()).hexdigest() + + with open(recovery_conf, 'r') as f: + content_2 = '' + while True: + line = f.readline() + + if not line: + break + if line.startswith("#"): + continue + content_2 += line + + self.assertEqual(content_1, content_2) + + # @unittest.skip("skip") + def test_restore_target_new_options(self): + """ + check that new --recovery-target-* + options are working correctly + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # Take FULL + self.backup_node(backup_dir, 'node', node) + + # TODO update test + if self.get_version(node) >= self.version_to_num('12.0'): + recovery_conf = os.path.join(node.data_dir, 'postgresql.auto.conf') + with open(recovery_conf, 'r') as f: + print(f.read()) + else: + recovery_conf = os.path.join(node.data_dir, 'recovery.conf') + + node.pgbench_init(scale=2) + pgbench = node.pgbench( + stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + pgbench.wait() + pgbench.stdout.close() + + node.safe_psql( + "postgres", + "CREATE TABLE tbl0005 (a text)") + + node.safe_psql( + "postgres", "select pg_create_restore_point('savepoint')") + + target_name = 'savepoint' + + # in python-3.6+ it can be ...now()..astimezone()... + target_time = datetime.utcnow().replace(tzinfo=timezone.utc).astimezone().strftime("%Y-%m-%d %H:%M:%S %z") + with node.connect("postgres") as con: + res = con.execute( + "INSERT INTO tbl0005 VALUES ('inserted') RETURNING (xmin)") + con.commit() + target_xid = res[0][0] + + with node.connect("postgres") as con: + con.execute("INSERT INTO tbl0005 VALUES (1)") + con.commit() + if self.get_version(node) > self.version_to_num('10.0'): + res = con.execute("SELECT pg_current_wal_lsn()") + else: + res = con.execute("SELECT pg_current_xlog_location()") + + con.commit() + con.execute("INSERT INTO tbl0005 VALUES (2)") + con.commit() + xlogid, xrecoff = res[0][0].split('/') + xrecoff = hex(int(xrecoff, 16) + 1)[2:] + target_lsn = "{0}/{1}".format(xlogid, xrecoff) + + # Restore with recovery target time + node.cleanup() + self.restore_node( + backup_dir, 'node', node, + options=[ + '--recovery-target-time={0}'.format(target_time), + "--recovery-target-action=promote", + '--recovery-target-timeline=1', + ]) + + with open(recovery_conf, 'r') as f: + recovery_conf_content = f.read() + + self.assertIn( + "recovery_target_time = '{0}'".format(target_time), + recovery_conf_content) + + self.assertIn( + "recovery_target_action = 'promote'", + recovery_conf_content) + + self.assertIn( + "recovery_target_timeline = '1'", + recovery_conf_content) + + node.slow_start() + + # Restore with recovery target xid + node.cleanup() + self.restore_node( + backup_dir, 'node', node, + options=[ + '--recovery-target-xid={0}'.format(target_xid), + "--recovery-target-action=promote", + '--recovery-target-timeline=1', + ]) + + with open(recovery_conf, 'r') as f: + recovery_conf_content = f.read() + + self.assertIn( + "recovery_target_xid = '{0}'".format(target_xid), + recovery_conf_content) + + self.assertIn( + "recovery_target_action = 'promote'", + recovery_conf_content) + + self.assertIn( + "recovery_target_timeline = '1'", + recovery_conf_content) + + node.slow_start() + + # Restore with recovery target name + node.cleanup() + self.restore_node( + backup_dir, 'node', node, + options=[ + '--recovery-target-name={0}'.format(target_name), + "--recovery-target-action=promote", + '--recovery-target-timeline=1', + ]) + + with open(recovery_conf, 'r') as f: + recovery_conf_content = f.read() + + self.assertIn( + "recovery_target_name = '{0}'".format(target_name), + recovery_conf_content) + + self.assertIn( + "recovery_target_action = 'promote'", + recovery_conf_content) + + self.assertIn( + "recovery_target_timeline = '1'", + recovery_conf_content) + + node.slow_start() + + # Restore with recovery target lsn + if self.get_version(node) >= 100000: + + node.cleanup() + self.restore_node( + backup_dir, 'node', node, + options=[ + '--recovery-target-lsn={0}'.format(target_lsn), + "--recovery-target-action=promote", + '--recovery-target-timeline=1', + ]) + + with open(recovery_conf, 'r') as f: + recovery_conf_content = f.read() + + self.assertIn( + "recovery_target_lsn = '{0}'".format(target_lsn), + recovery_conf_content) + + self.assertIn( + "recovery_target_action = 'promote'", + recovery_conf_content) + + self.assertIn( + "recovery_target_timeline = '1'", + recovery_conf_content) + + node.slow_start() + + # @unittest.skip("skip") + def test_smart_restore(self): + """ + make node, create database, take full backup, drop database, + take incremental backup and restore it, + make sure that files from dropped database are not + copied during restore + https://github.com/postgrespro/pg_probackup/issues/63 + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # create database + node.safe_psql( + "postgres", + "CREATE DATABASE testdb") + + # take FULL backup + full_id = self.backup_node(backup_dir, 'node', node) + + # drop database + node.safe_psql( + "postgres", + "DROP DATABASE testdb") + + # take PAGE backup + page_id = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # restore PAGE backup + node.cleanup() + self.restore_node( + backup_dir, 'node', node, backup_id=page_id, + options=['--no-validate', '--log-level-file=VERBOSE']) + + logfile = os.path.join(backup_dir, 'log', 'pg_probackup.log') + with open(logfile, 'r') as f: + logfile_content = f.read() + + # get delta between FULL and PAGE filelists + filelist_full = self.get_backup_filelist( + backup_dir, 'node', full_id) + + filelist_page = self.get_backup_filelist( + backup_dir, 'node', page_id) + + filelist_diff = self.get_backup_filelist_diff( + filelist_full, filelist_page) + + for file in filelist_diff: + self.assertNotIn(file, logfile_content) + + # @unittest.skip("skip") + def test_pg_11_group_access(self): + """ + test group access for PG >= 11 + """ + if self.pg_config_version < self.version_to_num('11.0'): + self.skipTest('You need PostgreSQL >= 11 for this test') + + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=[ + '--data-checksums', + '--allow-group-access']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + # take FULL backup + self.backup_node(backup_dir, 'node', node, options=['--stream']) + + pgdata = self.pgdata_content(node.data_dir) + + # restore backup + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored.cleanup() + + self.restore_node( + backup_dir, 'node', node_restored) + + # compare pgdata permissions + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + def test_restore_concurrent_drop_table(self): + """""" + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.pgbench_init(scale=1) + + # FULL backup + self.backup_node( + backup_dir, 'node', node, + options=['--stream', '--compress']) + + # DELTA backup + gdb = self.backup_node( + backup_dir, 'node', node, backup_type='delta', + options=['--stream', '--compress', '--no-validate'], + gdb=True) + + gdb.set_breakpoint('backup_data_file') + gdb.run_until_break() + + node.safe_psql( + 'postgres', + 'DROP TABLE pgbench_accounts') + + # do checkpoint to guarantee filenode removal + node.safe_psql( + 'postgres', + 'CHECKPOINT') + + gdb.remove_all_breakpoints() + gdb.continue_execution_until_exit() + + pgdata = self.pgdata_content(node.data_dir) + node.cleanup() + + self.restore_node( + backup_dir, 'node', node, options=['--no-validate']) + + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + def test_lost_non_data_file(self): + """""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + # FULL backup + backup_id = self.backup_node( + backup_dir, 'node', node, options=['--stream']) + + file = os.path.join( + backup_dir, 'backups', 'node', + backup_id, 'database', 'postgresql.auto.conf') + + os.remove(file) + + node.cleanup() + + try: + self.restore_node( + backup_dir, 'node', node, options=['--no-validate']) + self.assertEqual( + 1, 0, + "Expecting Error because of non-data file dissapearance.\n " + "Output: {0} \n CMD: {1}".format( + self.output, self.cmd)) + except ProbackupException as e: + self.assertIn( + 'No such file or directory', e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + self.assertIn( + 'ERROR: Backup files restoring failed', e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + def test_partial_restore_exclude(self): + """""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + for i in range(1, 10, 1): + node.safe_psql( + 'postgres', + 'CREATE database db{0}'.format(i)) + + db_list_raw = node.safe_psql( + 'postgres', + 'SELECT to_json(a) ' + 'FROM (SELECT oid, datname FROM pg_database) a').decode('utf-8').rstrip() + + db_list_splitted = db_list_raw.splitlines() + + db_list = {} + for line in db_list_splitted: + line = json.loads(line) + db_list[line['datname']] = line['oid'] + + # FULL backup + backup_id = self.backup_node(backup_dir, 'node', node) + pgdata = self.pgdata_content(node.data_dir) + + # restore FULL backup + node_restored_1 = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored_1')) + node_restored_1.cleanup() + + try: + self.restore_node( + backup_dir, 'node', + node_restored_1, options=[ + "--db-include=db1", + "--db-exclude=db2"]) + self.assertEqual( + 1, 0, + "Expecting Error because of 'db-exclude' and 'db-include'.\n " + "Output: {0} \n CMD: {1}".format( + self.output, self.cmd)) + except ProbackupException as e: + self.assertIn( + "ERROR: You cannot specify '--db-include' " + "and '--db-exclude' together", e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.restore_node( + backup_dir, 'node', node_restored_1) + + pgdata_restored_1 = self.pgdata_content(node_restored_1.data_dir) + self.compare_pgdata(pgdata, pgdata_restored_1) + + db1_path = os.path.join( + node_restored_1.data_dir, 'base', db_list['db1']) + db5_path = os.path.join( + node_restored_1.data_dir, 'base', db_list['db5']) + + self.truncate_every_file_in_dir(db1_path) + self.truncate_every_file_in_dir(db5_path) + pgdata_restored_1 = self.pgdata_content(node_restored_1.data_dir) + + node_restored_2 = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored_2')) + node_restored_2.cleanup() + + self.restore_node( + backup_dir, 'node', + node_restored_2, options=[ + "--db-exclude=db1", + "--db-exclude=db5"]) + + pgdata_restored_2 = self.pgdata_content(node_restored_2.data_dir) + self.compare_pgdata(pgdata_restored_1, pgdata_restored_2) + + self.set_auto_conf(node_restored_2, {'port': node_restored_2.port}) + + node_restored_2.slow_start() + + node_restored_2.safe_psql( + 'postgres', + 'select 1') + + try: + node_restored_2.safe_psql( + 'db1', + 'select 1') + except QueryException as e: + self.assertIn('FATAL', e.message) + + try: + node_restored_2.safe_psql( + 'db5', + 'select 1') + except QueryException as e: + self.assertIn('FATAL', e.message) + + with open(node_restored_2.pg_log_file, 'r') as f: + output = f.read() + + self.assertNotIn('PANIC', output) + + def test_partial_restore_exclude_tablespace(self): + """""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + cat_version = node.get_control_data()["Catalog version number"] + version_specific_dir = 'PG_' + node.major_version_str + '_' + cat_version + + # PG_10_201707211 + # pg_tblspc/33172/PG_9.5_201510051/16386/ + + self.create_tblspace_in_node(node, 'somedata') + + node_tablespace = self.get_tblspace_path(node, 'somedata') + + tbl_oid = node.safe_psql( + 'postgres', + "SELECT oid " + "FROM pg_tablespace " + "WHERE spcname = 'somedata'").decode('utf-8').rstrip() + + for i in range(1, 10, 1): + node.safe_psql( + 'postgres', + 'CREATE database db{0} tablespace somedata'.format(i)) + + db_list_raw = node.safe_psql( + 'postgres', + 'SELECT to_json(a) ' + 'FROM (SELECT oid, datname FROM pg_database) a').decode('utf-8').rstrip() + + db_list_splitted = db_list_raw.splitlines() + + db_list = {} + for line in db_list_splitted: + line = json.loads(line) + db_list[line['datname']] = line['oid'] + + # FULL backup + backup_id = self.backup_node(backup_dir, 'node', node) + pgdata = self.pgdata_content(node.data_dir) + + # restore FULL backup + node_restored_1 = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored_1')) + node_restored_1.cleanup() + + node1_tablespace = self.get_tblspace_path(node_restored_1, 'somedata') + + self.restore_node( + backup_dir, 'node', + node_restored_1, options=[ + "-T", "{0}={1}".format( + node_tablespace, node1_tablespace)]) + + pgdata_restored_1 = self.pgdata_content(node_restored_1.data_dir) + self.compare_pgdata(pgdata, pgdata_restored_1) + + # truncate every db + for db in db_list: + # with exception below + if db in ['db1', 'db5']: + self.truncate_every_file_in_dir( + os.path.join( + node_restored_1.data_dir, 'pg_tblspc', + tbl_oid, version_specific_dir, db_list[db])) + + pgdata_restored_1 = self.pgdata_content(node_restored_1.data_dir) + + node_restored_2 = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored_2')) + node_restored_2.cleanup() + node2_tablespace = self.get_tblspace_path(node_restored_2, 'somedata') + + self.restore_node( + backup_dir, 'node', + node_restored_2, options=[ + "--db-exclude=db1", + "--db-exclude=db5", + "-T", "{0}={1}".format( + node_tablespace, node2_tablespace)]) + + pgdata_restored_2 = self.pgdata_content(node_restored_2.data_dir) + self.compare_pgdata(pgdata_restored_1, pgdata_restored_2) + + self.set_auto_conf(node_restored_2, {'port': node_restored_2.port}) + + node_restored_2.slow_start() + + node_restored_2.safe_psql( + 'postgres', + 'select 1') + + try: + node_restored_2.safe_psql( + 'db1', + 'select 1') + except QueryException as e: + self.assertIn('FATAL', e.message) + + try: + node_restored_2.safe_psql( + 'db5', + 'select 1') + except QueryException as e: + self.assertIn('FATAL', e.message) + + with open(node_restored_2.pg_log_file, 'r') as f: + output = f.read() + + self.assertNotIn('PANIC', output) + + def test_partial_restore_include(self): + """ + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + for i in range(1, 10, 1): + node.safe_psql( + 'postgres', + 'CREATE database db{0}'.format(i)) + + db_list_raw = node.safe_psql( + 'postgres', + 'SELECT to_json(a) ' + 'FROM (SELECT oid, datname FROM pg_database) a').decode('utf-8').rstrip() + + db_list_splitted = db_list_raw.splitlines() + + db_list = {} + for line in db_list_splitted: + line = json.loads(line) + db_list[line['datname']] = line['oid'] + + # FULL backup + backup_id = self.backup_node(backup_dir, 'node', node) + pgdata = self.pgdata_content(node.data_dir) + + # restore FULL backup + node_restored_1 = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored_1')) + node_restored_1.cleanup() + + try: + self.restore_node( + backup_dir, 'node', + node_restored_1, options=[ + "--db-include=db1", + "--db-exclude=db2"]) + self.assertEqual( + 1, 0, + "Expecting Error because of 'db-exclude' and 'db-include'.\n " + "Output: {0} \n CMD: {1}".format( + self.output, self.cmd)) + except ProbackupException as e: + self.assertIn( + "ERROR: You cannot specify '--db-include' " + "and '--db-exclude' together", e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.restore_node( + backup_dir, 'node', node_restored_1) + + pgdata_restored_1 = self.pgdata_content(node_restored_1.data_dir) + self.compare_pgdata(pgdata, pgdata_restored_1) + + # truncate every db + for db in db_list: + # with exception below + if db in ['template0', 'template1', 'postgres', 'db1', 'db5']: + continue + self.truncate_every_file_in_dir( + os.path.join( + node_restored_1.data_dir, 'base', db_list[db])) + + pgdata_restored_1 = self.pgdata_content(node_restored_1.data_dir) + + node_restored_2 = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored_2')) + node_restored_2.cleanup() + + self.restore_node( + backup_dir, 'node', + node_restored_2, options=[ + "--db-include=db1", + "--db-include=db5", + "--db-include=postgres"]) + + pgdata_restored_2 = self.pgdata_content(node_restored_2.data_dir) + self.compare_pgdata(pgdata_restored_1, pgdata_restored_2) + + self.set_auto_conf(node_restored_2, {'port': node_restored_2.port}) + node_restored_2.slow_start() + + node_restored_2.safe_psql( + 'db1', + 'select 1') + + node_restored_2.safe_psql( + 'db5', + 'select 1') + + node_restored_2.safe_psql( + 'template1', + 'select 1') + + try: + node_restored_2.safe_psql( + 'db2', + 'select 1') + except QueryException as e: + self.assertIn('FATAL', e.message) + + try: + node_restored_2.safe_psql( + 'db10', + 'select 1') + except QueryException as e: + self.assertIn('FATAL', e.message) + + with open(node_restored_2.pg_log_file, 'r') as f: + output = f.read() + + self.assertNotIn('PANIC', output) + + def test_partial_restore_backward_compatibility_1(self): + """ + old binary should be of version < 2.2.0 + """ + if not self.probackup_old_path: + self.skipTest("You must specify PGPROBACKUPBIN_OLD" + " for run this test") + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir, old_binary=True) + self.add_instance(backup_dir, 'node', node, old_binary=True) + + node.slow_start() + + # create databases + for i in range(1, 10, 1): + node.safe_psql( + 'postgres', + 'CREATE database db{0}'.format(i)) + + # FULL backup with old binary, without partial restore support + backup_id = self.backup_node( + backup_dir, 'node', node, + old_binary=True, options=['--stream']) + + pgdata = self.pgdata_content(node.data_dir) + + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored.cleanup() + + try: + self.restore_node( + backup_dir, 'node', + node_restored, options=[ + "--db-exclude=db5"]) + self.assertEqual( + 1, 0, + "Expecting Error because backup do not support partial restore.\n " + "Output: {0} \n CMD: {1}".format( + self.output, self.cmd)) + except ProbackupException as e: + self.assertIn( + "ERROR: Backup {0} doesn't contain a database_map, " + "partial restore is impossible".format(backup_id), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.restore_node(backup_dir, 'node', node_restored) + + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # incremental backup with partial restore support + for i in range(11, 15, 1): + node.safe_psql( + 'postgres', + 'CREATE database db{0}'.format(i)) + + # get db list + db_list_raw = node.safe_psql( + 'postgres', + 'SELECT to_json(a) ' + 'FROM (SELECT oid, datname FROM pg_database) a').rstrip() + db_list_splitted = db_list_raw.splitlines() + db_list = {} + for line in db_list_splitted: + line = json.loads(line) + db_list[line['datname']] = line['oid'] + + backup_id = self.backup_node( + backup_dir, 'node', node, + backup_type='delta', options=['--stream']) + + # get etalon + node_restored.cleanup() + self.restore_node(backup_dir, 'node', node_restored) + self.truncate_every_file_in_dir( + os.path.join( + node_restored.data_dir, 'base', db_list['db5'])) + self.truncate_every_file_in_dir( + os.path.join( + node_restored.data_dir, 'base', db_list['db14'])) + pgdata_restored = self.pgdata_content(node_restored.data_dir) + + # get new node + node_restored_1 = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored_1')) + node_restored_1.cleanup() + + self.restore_node( + backup_dir, 'node', + node_restored_1, options=[ + "--db-exclude=db5", + "--db-exclude=db14"]) + + pgdata_restored_1 = self.pgdata_content(node_restored_1.data_dir) + + self.compare_pgdata(pgdata_restored, pgdata_restored_1) + + def test_partial_restore_backward_compatibility_merge(self): + """ + old binary should be of version < 2.2.0 + """ + if not self.probackup_old_path: + self.skipTest("You must specify PGPROBACKUPBIN_OLD" + " for run this test") + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir, old_binary=True) + self.add_instance(backup_dir, 'node', node, old_binary=True) + + node.slow_start() + + # create databases + for i in range(1, 10, 1): + node.safe_psql( + 'postgres', + 'CREATE database db{0}'.format(i)) + + # FULL backup with old binary, without partial restore support + backup_id = self.backup_node( + backup_dir, 'node', node, + old_binary=True, options=['--stream']) + + pgdata = self.pgdata_content(node.data_dir) + + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored.cleanup() + + try: + self.restore_node( + backup_dir, 'node', + node_restored, options=[ + "--db-exclude=db5"]) + self.assertEqual( + 1, 0, + "Expecting Error because backup do not support partial restore.\n " + "Output: {0} \n CMD: {1}".format( + self.output, self.cmd)) + except ProbackupException as e: + self.assertIn( + "ERROR: Backup {0} doesn't contain a database_map, " + "partial restore is impossible.".format(backup_id), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.restore_node(backup_dir, 'node', node_restored) + + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # incremental backup with partial restore support + for i in range(11, 15, 1): + node.safe_psql( + 'postgres', + 'CREATE database db{0}'.format(i)) + + # get db list + db_list_raw = node.safe_psql( + 'postgres', + 'SELECT to_json(a) ' + 'FROM (SELECT oid, datname FROM pg_database) a').rstrip() + db_list_splitted = db_list_raw.splitlines() + db_list = {} + for line in db_list_splitted: + line = json.loads(line) + db_list[line['datname']] = line['oid'] + + backup_id = self.backup_node( + backup_dir, 'node', node, + backup_type='delta', options=['--stream']) + + # get etalon + node_restored.cleanup() + self.restore_node(backup_dir, 'node', node_restored) + self.truncate_every_file_in_dir( + os.path.join( + node_restored.data_dir, 'base', db_list['db5'])) + self.truncate_every_file_in_dir( + os.path.join( + node_restored.data_dir, 'base', db_list['db14'])) + pgdata_restored = self.pgdata_content(node_restored.data_dir) + + # get new node + node_restored_1 = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored_1')) + node_restored_1.cleanup() + + # merge + self.merge_backup(backup_dir, 'node', backup_id=backup_id) + + self.restore_node( + backup_dir, 'node', + node_restored_1, options=[ + "--db-exclude=db5", + "--db-exclude=db14"]) + pgdata_restored_1 = self.pgdata_content(node_restored_1.data_dir) + + self.compare_pgdata(pgdata_restored, pgdata_restored_1) + + def test_empty_and_mangled_database_map(self): + """ + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + + node.slow_start() + + # create databases + for i in range(1, 10, 1): + node.safe_psql( + 'postgres', + 'CREATE database db{0}'.format(i)) + + # FULL backup with database_map + backup_id = self.backup_node( + backup_dir, 'node', node, options=['--stream']) + pgdata = self.pgdata_content(node.data_dir) + + # truncate database_map + path = os.path.join( + backup_dir, 'backups', 'node', + backup_id, 'database', 'database_map') + with open(path, "w") as f: + f.close() + + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored.cleanup() + + try: + self.restore_node( + backup_dir, 'node', node_restored, + options=["--db-include=db1", '--no-validate']) + self.assertEqual( + 1, 0, + "Expecting Error because database_map is empty.\n " + "Output: {0} \n CMD: {1}".format( + self.output, self.cmd)) + except ProbackupException as e: + self.assertIn( + "ERROR: Backup {0} has empty or mangled database_map, " + "partial restore is impossible".format(backup_id), e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + try: + self.restore_node( + backup_dir, 'node', node_restored, + options=["--db-exclude=db1", '--no-validate']) + self.assertEqual( + 1, 0, + "Expecting Error because database_map is empty.\n " + "Output: {0} \n CMD: {1}".format( + self.output, self.cmd)) + except ProbackupException as e: + self.assertIn( + "ERROR: Backup {0} has empty or mangled database_map, " + "partial restore is impossible".format(backup_id), e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + # mangle database_map + with open(path, "w") as f: + f.write("42") + f.close() + + try: + self.restore_node( + backup_dir, 'node', node_restored, + options=["--db-include=db1", '--no-validate']) + self.assertEqual( + 1, 0, + "Expecting Error because database_map is empty.\n " + "Output: {0} \n CMD: {1}".format( + self.output, self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: field "dbOid" is not found in the line 42 of ' + 'the file backup_content.control', e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + try: + self.restore_node( + backup_dir, 'node', node_restored, + options=["--db-exclude=db1", '--no-validate']) + self.assertEqual( + 1, 0, + "Expecting Error because database_map is empty.\n " + "Output: {0} \n CMD: {1}".format( + self.output, self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: field "dbOid" is not found in the line 42 of ' + 'the file backup_content.control', e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + # check that simple restore is still possible + self.restore_node( + backup_dir, 'node', node_restored, options=['--no-validate']) + + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + def test_missing_database_map(self): + """ + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + ptrack_enable=self.ptrack, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + + node.slow_start() + + # create databases + for i in range(1, 10, 1): + node.safe_psql( + 'postgres', + 'CREATE database db{0}'.format(i)) + + node.safe_psql( + "postgres", + "CREATE DATABASE backupdb") + + # PG 9.5 + if self.get_version(node) < 90600: + node.safe_psql( + 'backupdb', + "REVOKE ALL ON DATABASE backupdb from PUBLIC; " + "REVOKE ALL ON SCHEMA public from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " + "CREATE ROLE backup WITH LOGIN REPLICATION; " + "GRANT CONNECT ON DATABASE backupdb to backup; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") + # PG 9.6 + elif self.get_version(node) > 90600 and self.get_version(node) < 100000: + node.safe_psql( + 'backupdb', + "REVOKE ALL ON DATABASE backupdb from PUBLIC; " + "REVOKE ALL ON SCHEMA public from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " + "CREATE ROLE backup WITH LOGIN REPLICATION; " + "GRANT CONNECT ON DATABASE backupdb to backup; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + ) + # >= 10 && < 15 + elif self.get_version(node) >= 100000 and self.get_version(node) < 150000: + node.safe_psql( + 'backupdb', + "REVOKE ALL ON DATABASE backupdb from PUBLIC; " + "REVOKE ALL ON SCHEMA public from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " + "CREATE ROLE backup WITH LOGIN REPLICATION; " + "GRANT CONNECT ON DATABASE backupdb to backup; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + ) + # >= 15 + else: + node.safe_psql( + 'backupdb', + "REVOKE ALL ON DATABASE backupdb from PUBLIC; " + "REVOKE ALL ON SCHEMA public from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " + "CREATE ROLE backup WITH LOGIN REPLICATION; " + "GRANT CONNECT ON DATABASE backupdb to backup; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_start(text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_stop(boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + ) + + if self.ptrack: + # TODO why backup works without these grants ? + # 'pg_ptrack_get_pagemapset(pg_lsn)', + # 'pg_ptrack_control_lsn()', + # because PUBLIC + node.safe_psql( + "backupdb", + "CREATE SCHEMA ptrack; " + "GRANT USAGE ON SCHEMA ptrack TO backup; " + "CREATE EXTENSION ptrack WITH SCHEMA ptrack") + + if ProbackupTest.enterprise: + + node.safe_psql( + "backupdb", + "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;") + + # FULL backup without database_map + backup_id = self.backup_node( + backup_dir, 'node', node, datname='backupdb', + options=['--stream', "-U", "backup", '--log-level-file=verbose']) + + pgdata = self.pgdata_content(node.data_dir) + + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored.cleanup() + + # backup has missing database_map and that is legal + try: + self.restore_node( + backup_dir, 'node', node_restored, + options=["--db-exclude=db5", "--db-exclude=db9"]) + self.assertEqual( + 1, 0, + "Expecting Error because user do not have pg_database access.\n " + "Output: {0} \n CMD: {1}".format( + self.output, self.cmd)) + except ProbackupException as e: + self.assertIn( + "ERROR: Backup {0} doesn't contain a database_map, " + "partial restore is impossible.".format( + backup_id), e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + try: + self.restore_node( + backup_dir, 'node', node_restored, + options=["--db-include=db1"]) + self.assertEqual( + 1, 0, + "Expecting Error because user do not have pg_database access.\n " + "Output: {0} \n CMD: {1}".format( + self.output, self.cmd)) + except ProbackupException as e: + self.assertIn( + "ERROR: Backup {0} doesn't contain a database_map, " + "partial restore is impossible.".format( + backup_id), e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + # check that simple restore is still possible + self.restore_node(backup_dir, 'node', node_restored) + + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + def test_stream_restore_command_option(self): + """ + correct handling of restore command options + when restoring STREAM backup + + 1. Restore STREAM backup with --restore-command only + parameter, check that PostgreSQL recovery uses + restore_command to obtain WAL from archive. + + 2. Restore STREAM backup wuth --restore-command + as replica, check that PostgreSQL recovery uses + restore_command to obtain WAL from archive. + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={'max_wal_size': '32MB'}) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # TODO update test + if self.get_version(node) >= self.version_to_num('12.0'): + recovery_conf = os.path.join(node.data_dir, 'postgresql.auto.conf') + with open(recovery_conf, 'r') as f: + print(f.read()) + else: + recovery_conf = os.path.join(node.data_dir, 'recovery.conf') + + # Take FULL + self.backup_node( + backup_dir, 'node', node, options=['--stream']) + + node.pgbench_init(scale=5) + + node.safe_psql( + 'postgres', + 'create table t1()') + + # restore backup + node.cleanup() + shutil.rmtree(os.path.join(node.logs_dir)) + + restore_cmd = self.get_restore_command(backup_dir, 'node', node) + + self.restore_node( + backup_dir, 'node', node, + options=[ + '--restore-command={0}'.format(restore_cmd)]) + + self.assertTrue( + os.path.isfile(recovery_conf), + "File '{0}' do not exists".format(recovery_conf)) + + if self.get_version(node) >= self.version_to_num('12.0'): + recovery_signal = os.path.join(node.data_dir, 'recovery.signal') + self.assertTrue( + os.path.isfile(recovery_signal), + "File '{0}' do not exists".format(recovery_signal)) + + node.slow_start() + + node.safe_psql( + 'postgres', + 'select * from t1') + + timeline_id = node.safe_psql( + 'postgres', + 'select timeline_id from pg_control_checkpoint()').decode('utf-8').rstrip() + + self.assertEqual('2', timeline_id) + + # @unittest.skip("skip") + def test_restore_primary_conninfo(self): + """ + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + # Take FULL + self.backup_node(backup_dir, 'node', node, options=['--stream']) + + node.pgbench_init(scale=1) + + #primary_conninfo = 'host=192.168.1.50 port=5432 user=foo password=foopass' + + replica = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica.cleanup() + str_conninfo='host=192.168.1.50 port=5432 user=foo password=foopass' + + self.restore_node( + backup_dir, 'node', replica, + options=['-R', '--primary-conninfo={0}'.format(str_conninfo)]) + + if self.get_version(node) >= self.version_to_num('12.0'): + standby_signal = os.path.join(replica.data_dir, 'standby.signal') + self.assertTrue( + os.path.isfile(standby_signal), + "File '{0}' do not exists".format(standby_signal)) + + # TODO update test + if self.get_version(node) >= self.version_to_num('12.0'): + recovery_conf = os.path.join(replica.data_dir, 'postgresql.auto.conf') + with open(recovery_conf, 'r') as f: + print(f.read()) + else: + recovery_conf = os.path.join(replica.data_dir, 'recovery.conf') + + with open(os.path.join(replica.data_dir, recovery_conf), 'r') as f: + recovery_conf_content = f.read() + + self.assertIn(str_conninfo, recovery_conf_content) + + # @unittest.skip("skip") + def test_restore_primary_slot_info(self): + """ + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + # Take FULL + self.backup_node(backup_dir, 'node', node, options=['--stream']) + + node.pgbench_init(scale=1) + + replica = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica.cleanup() + + node.safe_psql( + "SELECT pg_create_physical_replication_slot('master_slot')") + + self.restore_node( + backup_dir, 'node', replica, + options=['-R', '--primary-slot-name=master_slot']) + + self.set_auto_conf(replica, {'port': replica.port}) + self.set_auto_conf(replica, {'hot_standby': 'on'}) + + if self.get_version(node) >= self.version_to_num('12.0'): + standby_signal = os.path.join(replica.data_dir, 'standby.signal') + self.assertTrue( + os.path.isfile(standby_signal), + "File '{0}' do not exists".format(standby_signal)) + + replica.slow_start(replica=True) + + def test_issue_249(self): + """ + https://github.com/postgrespro/pg_probackup/issues/249 + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + 'postgres', + 'CREATE database db1') + + node.pgbench_init(scale=5) + + node.safe_psql( + 'postgres', + 'CREATE TABLE t1 as SELECT * from pgbench_accounts where aid > 200000 and aid < 450000') + + node.safe_psql( + 'postgres', + 'DELETE from pgbench_accounts where aid > 200000 and aid < 450000') + + node.safe_psql( + 'postgres', + 'select * from pgbench_accounts') + + # FULL backup + self.backup_node(backup_dir, 'node', node) + + node.safe_psql( + 'postgres', + 'INSERT INTO pgbench_accounts SELECT * FROM t1') + + # restore FULL backup + node_restored_1 = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored_1')) + node_restored_1.cleanup() + + self.restore_node( + backup_dir, 'node', + node_restored_1, options=["--db-include=db1"]) + + self.set_auto_conf( + node_restored_1, + {'port': node_restored_1.port, 'hot_standby': 'off'}) + + node_restored_1.slow_start() + + node_restored_1.safe_psql( + 'db1', + 'select 1') + + try: + node_restored_1.safe_psql( + 'postgres', + 'select 1') + except QueryException as e: + self.assertIn('FATAL', e.message) + + def test_pg_12_probackup_recovery_conf_compatibility(self): + """ + https://github.com/postgrespro/pg_probackup/issues/249 + + pg_probackup version must be 12 or greater + """ + if not self.probackup_old_path: + self.skipTest("You must specify PGPROBACKUPBIN_OLD" + " for run this test") + if self.pg_config_version < self.version_to_num('12.0'): + self.skipTest('You need PostgreSQL >= 12 for this test') + + if self.version_to_num(self.old_probackup_version) >= self.version_to_num('2.4.5'): + self.assertTrue(False, 'You need pg_probackup < 2.4.5 for this test') + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # FULL backup + self.backup_node(backup_dir, 'node', node, old_binary=True) + + node.pgbench_init(scale=5) + + node.safe_psql( + 'postgres', + 'CREATE TABLE t1 as SELECT * from pgbench_accounts where aid > 200000 and aid < 450000') + + time = node.safe_psql( + 'SELECT current_timestamp(0)::timestamptz;').decode('utf-8').rstrip() + + node.safe_psql( + 'postgres', + 'DELETE from pgbench_accounts where aid > 200000 and aid < 450000') + + node.cleanup() + + self.restore_node( + backup_dir, 'node',node, + options=[ + "--recovery-target-time={0}".format(time), + "--recovery-target-action=promote"], + old_binary=True) + + node.slow_start() + + self.backup_node(backup_dir, 'node', node, old_binary=True) + + node.pgbench_init(scale=5) + + xid = node.safe_psql( + 'SELECT txid_current()').decode('utf-8').rstrip() + node.pgbench_init(scale=1) + + node.cleanup() + + self.restore_node( + backup_dir, 'node',node, + options=[ + "--recovery-target-xid={0}".format(xid), + "--recovery-target-action=promote"]) + + node.slow_start() + + def test_drop_postgresql_auto_conf(self): + """ + https://github.com/postgrespro/pg_probackup/issues/249 + + pg_probackup version must be 12 or greater + """ + + if self.pg_config_version < self.version_to_num('12.0'): + self.skipTest('You need PostgreSQL >= 12 for this test') + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # FULL backup + self.backup_node(backup_dir, 'node', node) + + # drop postgresql.auto.conf + auto_path = os.path.join(node.data_dir, "postgresql.auto.conf") + os.remove(auto_path) + + self.backup_node(backup_dir, 'node', node, backup_type='page') + + node.cleanup() + + self.restore_node( + backup_dir, 'node',node, + options=[ + "--recovery-target=latest", + "--recovery-target-action=promote"]) + + node.slow_start() + + self.assertTrue(os.path.exists(auto_path)) + + def test_truncate_postgresql_auto_conf(self): + """ + https://github.com/postgrespro/pg_probackup/issues/249 + + pg_probackup version must be 12 or greater + """ + + if self.pg_config_version < self.version_to_num('12.0'): + self.skipTest('You need PostgreSQL >= 12 for this test') + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # FULL backup + self.backup_node(backup_dir, 'node', node) + + # truncate postgresql.auto.conf + auto_path = os.path.join(node.data_dir, "postgresql.auto.conf") + with open(auto_path, "w+") as f: + f.truncate() + + self.backup_node(backup_dir, 'node', node, backup_type='page') + + node.cleanup() + + self.restore_node( + backup_dir, 'node',node, + options=[ + "--recovery-target=latest", + "--recovery-target-action=promote"]) + node.slow_start() + + self.assertTrue(os.path.exists(auto_path)) + + # @unittest.skip("skip") + def test_concurrent_restore(self): + """""" + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.pgbench_init(scale=1) + + # FULL backup + self.backup_node( + backup_dir, 'node', node, + options=['--stream', '--compress']) + + pgbench = node.pgbench(options=['-T', '7', '-c', '1', '--no-vacuum']) + pgbench.wait() + + # DELTA backup + self.backup_node( + backup_dir, 'node', node, backup_type='delta', + options=['--stream', '--compress', '--no-validate']) + + pgdata1 = self.pgdata_content(node.data_dir) + + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + + node.cleanup() + node_restored.cleanup() + + gdb = self.restore_node( + backup_dir, 'node', node, options=['--no-validate'], gdb=True) + + gdb.set_breakpoint('restore_data_file') + gdb.run_until_break() + + self.restore_node( + backup_dir, 'node', node_restored, options=['--no-validate']) + + gdb.remove_all_breakpoints() + gdb.continue_execution_until_exit() + + pgdata2 = self.pgdata_content(node.data_dir) + pgdata3 = self.pgdata_content(node_restored.data_dir) + + self.compare_pgdata(pgdata1, pgdata2) + self.compare_pgdata(pgdata2, pgdata3) + + # skip this test until https://github.com/postgrespro/pg_probackup/pull/399 + @unittest.skip("skip") + def test_restore_issue_313(self): + """ + Check that partially restored PostgreSQL instance cannot be started + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # FULL backup + backup_id = self.backup_node(backup_dir, 'node', node) + node.cleanup() + + count = 0 + filelist = self.get_backup_filelist(backup_dir, 'node', backup_id) + for file in filelist: + # count only nondata files + if int(filelist[file]['is_datafile']) == 0 and int(filelist[file]['size']) > 0: + count += 1 + + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored.cleanup() + self.restore_node(backup_dir, 'node', node_restored) + + gdb = self.restore_node(backup_dir, 'node', node, gdb=True, options=['--progress']) + gdb.verbose = False + gdb.set_breakpoint('restore_non_data_file') + gdb.run_until_break() + gdb.continue_execution_until_break(count - 2) + gdb.quit() + + # emulate the user or HA taking care of PG configuration + for fname in os.listdir(node_restored.data_dir): + if fname.endswith('.conf'): + os.rename( + os.path.join(node_restored.data_dir, fname), + os.path.join(node.data_dir, fname)) + + try: + node.slow_start() + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because backup is not fully restored") + except StartNodeException as e: + self.assertIn( + 'Cannot start node', + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + # @unittest.skip("skip") + def test_restore_with_waldir(self): + """recovery using tablespace-mapping option and page backup""" + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + + with node.connect("postgres") as con: + con.execute( + "CREATE TABLE tbl AS SELECT * " + "FROM generate_series(0,3) AS integer") + con.commit() + + # Full backup + backup_id = self.backup_node(backup_dir, 'node', node) + + node.stop() + node.cleanup() + + # Create waldir + waldir_path = os.path.join(node.base_dir, "waldir") + os.makedirs(waldir_path) + + # Test recovery from latest + self.assertIn( + "INFO: Restore of backup {0} completed.".format(backup_id), + self.restore_node( + backup_dir, 'node', node, + options=[ + "-X", "%s" % (waldir_path)]), + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd)) + node.slow_start() + + count = node.execute("postgres", "SELECT count(*) FROM tbl") + self.assertEqual(count[0][0], 4) + + # check pg_wal is symlink + if node.major_version >= 10: + wal_path=os.path.join(node.data_dir, "pg_wal") + else: + wal_path=os.path.join(node.data_dir, "pg_xlog") + + self.assertEqual(os.path.islink(wal_path), True) diff --git a/tests/retention_test.py b/tests/retention_test.py new file mode 100644 index 000000000..88432a00f --- /dev/null +++ b/tests/retention_test.py @@ -0,0 +1,2529 @@ +import os +import unittest +from datetime import datetime, timedelta +from .helpers.ptrack_helpers import ProbackupTest, ProbackupException +from time import sleep +from distutils.dir_util import copy_tree + + +class RetentionTest(ProbackupTest, unittest.TestCase): + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_retention_redundancy_1(self): + """purge backups using redundancy-based retention policy""" + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + self.set_config( + backup_dir, 'node', options=['--retention-redundancy=1']) + + # Make backups to be purged + self.backup_node(backup_dir, 'node', node) + self.backup_node(backup_dir, 'node', node, backup_type="page") + # Make backups to be keeped + self.backup_node(backup_dir, 'node', node) + self.backup_node(backup_dir, 'node', node, backup_type="page") + + self.assertEqual(len(self.show_pb(backup_dir, 'node')), 4) + + output_before = self.show_archive(backup_dir, 'node', tli=1) + + # Purge backups + self.delete_expired( + backup_dir, 'node', options=['--expired', '--wal']) + self.assertEqual(len(self.show_pb(backup_dir, 'node')), 2) + + output_after = self.show_archive(backup_dir, 'node', tli=1) + + self.assertEqual( + output_before['max-segno'], + output_after['max-segno']) + + self.assertNotEqual( + output_before['min-segno'], + output_after['min-segno']) + + # Check that WAL segments were deleted + min_wal = output_after['min-segno'] + max_wal = output_after['max-segno'] + + for wal_name in os.listdir(os.path.join(backup_dir, 'wal', 'node')): + if not wal_name.endswith(".backup"): + + if self.archive_compress: + wal_name = wal_name[-27:] + wal_name = wal_name[:-3] + else: + wal_name = wal_name[-24:] + + self.assertTrue(wal_name >= min_wal) + self.assertTrue(wal_name <= max_wal) + + # @unittest.skip("skip") + def test_retention_window_2(self): + """purge backups using window-based retention policy""" + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + with open( + os.path.join( + backup_dir, + 'backups', + 'node', + "pg_probackup.conf"), "a") as conf: + conf.write("retention-redundancy = 1\n") + conf.write("retention-window = 1\n") + + # Make backups to be purged + self.backup_node(backup_dir, 'node', node) + self.backup_node(backup_dir, 'node', node, backup_type="page") + # Make backup to be keeped + self.backup_node(backup_dir, 'node', node) + + backups = os.path.join(backup_dir, 'backups', 'node') + days_delta = 5 + for backup in os.listdir(backups): + if backup == 'pg_probackup.conf': + continue + with open( + os.path.join( + backups, backup, "backup.control"), "a") as conf: + conf.write("recovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format( + datetime.now() - timedelta(days=days_delta))) + days_delta -= 1 + + # Make backup to be keeped + self.backup_node(backup_dir, 'node', node, backup_type="page") + + self.assertEqual(len(self.show_pb(backup_dir, 'node')), 4) + + # Purge backups + self.delete_expired(backup_dir, 'node', options=['--expired']) + self.assertEqual(len(self.show_pb(backup_dir, 'node')), 2) + + # @unittest.skip("skip") + def test_retention_window_3(self): + """purge all backups using window-based retention policy""" + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # take FULL BACKUP + self.backup_node(backup_dir, 'node', node) + + # Take second FULL BACKUP + self.backup_node(backup_dir, 'node', node) + + # Take third FULL BACKUP + self.backup_node(backup_dir, 'node', node) + + backups = os.path.join(backup_dir, 'backups', 'node') + for backup in os.listdir(backups): + if backup == 'pg_probackup.conf': + continue + with open( + os.path.join( + backups, backup, "backup.control"), "a") as conf: + conf.write("recovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format( + datetime.now() - timedelta(days=3))) + + # Purge backups + self.delete_expired( + backup_dir, 'node', options=['--retention-window=1', '--expired']) + + self.assertEqual(len(self.show_pb(backup_dir, 'node')), 0) + + print(self.show_pb( + backup_dir, 'node', as_json=False, as_text=True)) + + # count wal files in ARCHIVE + + # @unittest.skip("skip") + def test_retention_window_4(self): + """purge all backups using window-based retention policy""" + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # take FULL BACKUPs + self.backup_node(backup_dir, 'node', node) + + backup_id_2 = self.backup_node(backup_dir, 'node', node) + + backup_id_3 = self.backup_node(backup_dir, 'node', node) + + backups = os.path.join(backup_dir, 'backups', 'node') + for backup in os.listdir(backups): + if backup == 'pg_probackup.conf': + continue + with open( + os.path.join( + backups, backup, "backup.control"), "a") as conf: + conf.write("recovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format( + datetime.now() - timedelta(days=3))) + + self.delete_pb(backup_dir, 'node', backup_id_2) + self.delete_pb(backup_dir, 'node', backup_id_3) + + # Purge backups + self.delete_expired( + backup_dir, 'node', + options=['--retention-window=1', '--expired', '--wal']) + + self.assertEqual(len(self.show_pb(backup_dir, 'node')), 0) + + print(self.show_pb( + backup_dir, 'node', as_json=False, as_text=True)) + + # count wal files in ARCHIVE + wals_dir = os.path.join(backup_dir, 'wal', 'node') + # n_wals = len(os.listdir(wals_dir)) + + # self.assertTrue(n_wals > 0) + + # self.delete_expired( + # backup_dir, 'node', + # options=['--retention-window=1', '--expired', '--wal']) + + # count again + n_wals = len(os.listdir(wals_dir)) + self.assertTrue(n_wals == 0) + + # @unittest.skip("skip") + def test_window_expire_interleaved_incremental_chains(self): + """complicated case of interleaved backup chains""" + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # take FULL BACKUPs + backup_id_a = self.backup_node(backup_dir, 'node', node) + backup_id_b = self.backup_node(backup_dir, 'node', node) + + # Change FULLb backup status to ERROR + self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR') + + # FULLb ERROR + # FULLa OK + + # Take PAGEa1 backup + page_id_a1 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # PAGEa1 OK + # FULLb ERROR + # FULLa OK + + # Change FULLb backup status to OK + self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK') + + # Change PAGEa1 and FULLa to ERROR + self.change_backup_status(backup_dir, 'node', page_id_a1, 'ERROR') + self.change_backup_status(backup_dir, 'node', backup_id_a, 'ERROR') + + # PAGEa1 ERROR + # FULLb OK + # FULLa ERROR + + page_id_b1 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # PAGEb1 OK + # PAGEa1 ERROR + # FULLb OK + # FULLa ERROR + + # Now we start to play with first generation of PAGE backups + # Change PAGEb1 and FULLb to ERROR + self.change_backup_status(backup_dir, 'node', page_id_b1, 'ERROR') + self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR') + + # Change PAGEa1 and FULLa to OK + self.change_backup_status(backup_dir, 'node', page_id_a1, 'OK') + self.change_backup_status(backup_dir, 'node', backup_id_a, 'OK') + + # PAGEb1 ERROR + # PAGEa1 OK + # FULLb ERROR + # FULLa OK + + page_id_a2 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # PAGEa2 OK + # PAGEb1 ERROR + # PAGEa1 OK + # FULLb ERROR + # FULLa OK + + # Change PAGEa2 and FULLa to ERROR + self.change_backup_status(backup_dir, 'node', page_id_a2, 'ERROR') + self.change_backup_status(backup_dir, 'node', backup_id_a, 'ERROR') + + # Change PAGEb1 and FULLb to OK + self.change_backup_status(backup_dir, 'node', page_id_b1, 'OK') + self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK') + + # PAGEa2 ERROR + # PAGEb1 OK + # PAGEa1 OK + # FULLb OK + # FULLa ERROR + + page_id_b2 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # Change PAGEa2 and FULla to OK + self.change_backup_status(backup_dir, 'node', page_id_a2, 'OK') + self.change_backup_status(backup_dir, 'node', backup_id_a, 'OK') + + # PAGEb2 OK + # PAGEa2 OK + # PAGEb1 OK + # PAGEa1 OK + # FULLb OK + # FULLa OK + + # Purge backups + backups = os.path.join(backup_dir, 'backups', 'node') + for backup in os.listdir(backups): + if backup not in [page_id_a2, page_id_b2, 'pg_probackup.conf']: + with open( + os.path.join( + backups, backup, "backup.control"), "a") as conf: + conf.write("recovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format( + datetime.now() - timedelta(days=3))) + + self.delete_expired( + backup_dir, 'node', + options=['--retention-window=1', '--expired']) + + self.assertEqual(len(self.show_pb(backup_dir, 'node')), 6) + + print(self.show_pb( + backup_dir, 'node', as_json=False, as_text=True)) + + # @unittest.skip("skip") + def test_redundancy_expire_interleaved_incremental_chains(self): + """complicated case of interleaved backup chains""" + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # take FULL BACKUPs + backup_id_a = self.backup_node(backup_dir, 'node', node) + backup_id_b = self.backup_node(backup_dir, 'node', node) + + # Change FULL B backup status to ERROR + self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR') + + # FULLb ERROR + # FULLa OK + # Take PAGEa1 backup + page_id_a1 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # PAGEa1 OK + # FULLb ERROR + # FULLa OK + + # Change FULLb backup status to OK + self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK') + + # Change PAGEa1 and FULLa backup status to ERROR + self.change_backup_status(backup_dir, 'node', page_id_a1, 'ERROR') + self.change_backup_status(backup_dir, 'node', backup_id_a, 'ERROR') + + # PAGEa1 ERROR + # FULLb OK + # FULLa ERROR + + page_id_b1 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # PAGEb1 OK + # PAGEa1 ERROR + # FULLb OK + # FULLa ERROR + + # Now we start to play with first generation of PAGE backups + # Change PAGEb1 status to ERROR + self.change_backup_status(backup_dir, 'node', page_id_b1, 'ERROR') + self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR') + + # Change PAGEa1 status to OK + self.change_backup_status(backup_dir, 'node', page_id_a1, 'OK') + self.change_backup_status(backup_dir, 'node', backup_id_a, 'OK') + + # PAGEb1 ERROR + # PAGEa1 OK + # FULLb ERROR + # FULLa OK + page_id_a2 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # PAGEa2 OK + # PAGEb1 ERROR + # PAGEa1 OK + # FULLb ERROR + # FULLa OK + + # Change PAGEa2 and FULLa status to ERROR + self.change_backup_status(backup_dir, 'node', page_id_a2, 'ERROR') + self.change_backup_status(backup_dir, 'node', backup_id_a, 'ERROR') + + # Change PAGEb1 and FULLb status to OK + self.change_backup_status(backup_dir, 'node', page_id_b1, 'OK') + self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK') + + # PAGEa2 ERROR + # PAGEb1 OK + # PAGEa1 OK + # FULLb OK + # FULLa ERROR + self.backup_node(backup_dir, 'node', node, backup_type='page') + + # Change PAGEa2 and FULLa status to OK + self.change_backup_status(backup_dir, 'node', page_id_a2, 'OK') + self.change_backup_status(backup_dir, 'node', backup_id_a, 'OK') + + # PAGEb2 OK + # PAGEa2 OK + # PAGEb1 OK + # PAGEa1 OK + # FULLb OK + # FULLa OK + + self.delete_expired( + backup_dir, 'node', + options=['--retention-redundancy=1', '--expired']) + + self.assertEqual(len(self.show_pb(backup_dir, 'node')), 3) + + print(self.show_pb( + backup_dir, 'node', as_json=False, as_text=True)) + + # @unittest.skip("skip") + def test_window_merge_interleaved_incremental_chains(self): + """complicated case of interleaved backup chains""" + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # Take FULL BACKUPs + backup_id_a = self.backup_node(backup_dir, 'node', node) + backup_id_b = self.backup_node(backup_dir, 'node', node) + + # Change FULLb backup status to ERROR + self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR') + + # FULLb ERROR + # FULLa OK + + # Take PAGEa1 backup + page_id_a1 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # PAGEa1 OK + # FULLb ERROR + # FULLa OK + + # Change FULLb to OK + self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK') + + # Change PAGEa1 backup status to ERROR + self.change_backup_status(backup_dir, 'node', page_id_a1, 'ERROR') + + # PAGEa1 ERROR + # FULLb OK + # FULLa OK + + page_id_b1 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # PAGEb1 OK + # PAGEa1 ERROR + # FULLb OK + # FULLa OK + + # Now we start to play with first generation of PAGE backups + # Change PAGEb1 and FULLb to ERROR + self.change_backup_status(backup_dir, 'node', page_id_b1, 'ERROR') + self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR') + + # Change PAGEa1 to OK + self.change_backup_status(backup_dir, 'node', page_id_a1, 'OK') + + # PAGEb1 ERROR + # PAGEa1 OK + # FULLb ERROR + # FULLa OK + + page_id_a2 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # PAGEa2 OK + # PAGEb1 ERROR + # PAGEa1 OK + # FULLb ERROR + # FULLa OK + + # Change PAGEa2 and FULLa to ERROR + self.change_backup_status(backup_dir, 'node', page_id_a2, 'ERROR') + self.change_backup_status(backup_dir, 'node', backup_id_a, 'ERROR') + + # Change PAGEb1 and FULLb to OK + self.change_backup_status(backup_dir, 'node', page_id_b1, 'OK') + self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK') + + # PAGEa2 ERROR + # PAGEb1 OK + # PAGEa1 OK + # FULLb OK + # FULLa ERROR + + page_id_b2 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # Change PAGEa2 and FULLa to OK + self.change_backup_status(backup_dir, 'node', page_id_a2, 'OK') + self.change_backup_status(backup_dir, 'node', backup_id_a, 'OK') + + # PAGEb2 OK + # PAGEa2 OK + # PAGEb1 OK + # PAGEa1 OK + # FULLb OK + # FULLa OK + + # Purge backups + backups = os.path.join(backup_dir, 'backups', 'node') + for backup in os.listdir(backups): + if backup not in [page_id_a2, page_id_b2, 'pg_probackup.conf']: + with open( + os.path.join( + backups, backup, "backup.control"), "a") as conf: + conf.write("recovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format( + datetime.now() - timedelta(days=3))) + + output = self.delete_expired( + backup_dir, 'node', + options=['--retention-window=1', '--expired', '--merge-expired']) + + self.assertIn( + "Merge incremental chain between full backup {0} and backup {1}".format( + backup_id_a, page_id_a2), + output) + + self.assertIn( + "Rename merged full backup {0} to {1}".format( + backup_id_a, page_id_a2), output) + + self.assertIn( + "Merge incremental chain between full backup {0} and backup {1}".format( + backup_id_b, page_id_b2), + output) + + self.assertIn( + "Rename merged full backup {0} to {1}".format( + backup_id_b, page_id_b2), output) + + self.assertEqual(len(self.show_pb(backup_dir, 'node')), 2) + + # @unittest.skip("skip") + def test_window_merge_interleaved_incremental_chains_1(self): + """ + PAGEb3 + PAGEb2 + PAGEb1 + PAGEa1 + FULLb + FULLa + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.pgbench_init(scale=5) + + # Take FULL BACKUPs + self.backup_node(backup_dir, 'node', node) + pgbench = node.pgbench(options=['-t', '20', '-c', '1']) + pgbench.wait() + + backup_id_b = self.backup_node(backup_dir, 'node', node) + pgbench = node.pgbench(options=['-t', '20', '-c', '1']) + pgbench.wait() + + # Change FULL B backup status to ERROR + self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR') + + page_id_a1 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + pgdata_a1 = self.pgdata_content(node.data_dir) + + pgbench = node.pgbench(options=['-t', '20', '-c', '1']) + pgbench.wait() + + # PAGEa1 OK + # FULLb ERROR + # FULLa OK + # Change FULL B backup status to OK + self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK') + + # Change PAGEa1 backup status to ERROR + self.change_backup_status(backup_dir, 'node', page_id_a1, 'ERROR') + + # PAGEa1 ERROR + # FULLb OK + # FULLa OK + self.backup_node( + backup_dir, 'node', node, backup_type='page') + + pgbench = node.pgbench(options=['-t', '20', '-c', '1']) + pgbench.wait() + + self.backup_node( + backup_dir, 'node', node, backup_type='page') + + pgbench = node.pgbench(options=['-t', '20', '-c', '1']) + pgbench.wait() + + page_id_b3 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + pgdata_b3 = self.pgdata_content(node.data_dir) + + pgbench = node.pgbench(options=['-t', '20', '-c', '1']) + pgbench.wait() + + # PAGEb3 OK + # PAGEb2 OK + # PAGEb1 OK + # PAGEa1 ERROR + # FULLb OK + # FULLa OK + + # Change PAGEa1 backup status to ERROR + self.change_backup_status(backup_dir, 'node', page_id_a1, 'OK') + + # PAGEb3 OK + # PAGEb2 OK + # PAGEb1 OK + # PAGEa1 OK + # FULLb OK + # FULLa OK + + # Purge backups + backups = os.path.join(backup_dir, 'backups', 'node') + for backup in os.listdir(backups): + if backup in [page_id_a1, page_id_b3, 'pg_probackup.conf']: + continue + + with open( + os.path.join( + backups, backup, "backup.control"), "a") as conf: + conf.write("recovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format( + datetime.now() - timedelta(days=3))) + + self.delete_expired( + backup_dir, 'node', + options=['--retention-window=1', '--expired', '--merge-expired']) + + self.assertEqual(len(self.show_pb(backup_dir, 'node')), 2) + + self.assertEqual( + self.show_pb(backup_dir, 'node')[1]['id'], + page_id_b3) + + self.assertEqual( + self.show_pb(backup_dir, 'node')[0]['id'], + page_id_a1) + + self.assertEqual( + self.show_pb(backup_dir, 'node')[1]['backup-mode'], + 'FULL') + + self.assertEqual( + self.show_pb(backup_dir, 'node')[0]['backup-mode'], + 'FULL') + + node.cleanup() + + # Data correctness of PAGEa3 + self.restore_node(backup_dir, 'node', node, backup_id=page_id_a1) + pgdata_restored_a1 = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata_a1, pgdata_restored_a1) + + node.cleanup() + + # Data correctness of PAGEb3 + self.restore_node(backup_dir, 'node', node, backup_id=page_id_b3) + pgdata_restored_b3 = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata_b3, pgdata_restored_b3) + + # @unittest.skip("skip") + def test_basic_window_merge_multiple_descendants(self): + """ + PAGEb3 + | PAGEa3 + -----------------------------retention window + PAGEb2 / + | PAGEa2 / should be deleted + PAGEb1 \ / + | PAGEa1 + FULLb | + FULLa + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.pgbench_init(scale=3) + + # Take FULL BACKUPs + backup_id_a = self.backup_node(backup_dir, 'node', node) + # pgbench = node.pgbench(options=['-T', '10', '-c', '2']) + # pgbench.wait() + + backup_id_b = self.backup_node(backup_dir, 'node', node) + # pgbench = node.pgbench(options=['-T', '10', '-c', '2']) + # pgbench.wait() + + # Change FULLb backup status to ERROR + self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR') + + page_id_a1 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # pgbench = node.pgbench(options=['-T', '10', '-c', '2']) + # pgbench.wait() + + # Change FULLb to OK + self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK') + + # Change PAGEa1 to ERROR + self.change_backup_status(backup_dir, 'node', page_id_a1, 'ERROR') + + # PAGEa1 ERROR + # FULLb OK + # FULLa OK + + page_id_b1 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # PAGEb1 OK + # PAGEa1 ERROR + # FULLb OK + # FULLa OK + + # pgbench = node.pgbench(options=['-T', '10', '-c', '2']) + # pgbench.wait() + + # Change PAGEa1 to OK + self.change_backup_status(backup_dir, 'node', page_id_a1, 'OK') + + # Change PAGEb1 and FULLb to ERROR + self.change_backup_status(backup_dir, 'node', page_id_b1, 'ERROR') + self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR') + + # PAGEb1 ERROR + # PAGEa1 OK + # FULLb ERROR + # FULLa OK + + page_id_a2 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # pgbench = node.pgbench(options=['-T', '10', '-c', '2']) + # pgbench.wait() + + # PAGEa2 OK + # PAGEb1 ERROR + # PAGEa1 OK + # FULLb ERROR + # FULLa OK + + # Change PAGEb1 and FULLb to OK + self.change_backup_status(backup_dir, 'node', page_id_b1, 'OK') + self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK') + + # Change PAGEa2 and FULLa to ERROR + self.change_backup_status(backup_dir, 'node', page_id_a2, 'ERROR') + self.change_backup_status(backup_dir, 'node', backup_id_a, 'ERROR') + + # PAGEa2 ERROR + # PAGEb1 OK + # PAGEa1 OK + # FULLb OK + # FULLa ERROR + + page_id_b2 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # pgbench = node.pgbench(options=['-T', '10', '-c', '2']) + # pgbench.wait() + + # PAGEb2 OK + # PAGEa2 ERROR + # PAGEb1 OK + # PAGEa1 OK + # FULLb OK + # FULLa ERROR + + # Change PAGEb2 and PAGEb1 to ERROR + self.change_backup_status(backup_dir, 'node', page_id_b2, 'ERROR') + self.change_backup_status(backup_dir, 'node', page_id_b1, 'ERROR') + + # and FULL stuff + self.change_backup_status(backup_dir, 'node', backup_id_a, 'OK') + self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR') + + # PAGEb2 ERROR + # PAGEa2 ERROR + # PAGEb1 ERROR + # PAGEa1 OK + # FULLb ERROR + # FULLa OK + + page_id_a3 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + # pgbench = node.pgbench(options=['-T', '10', '-c', '2']) + # pgbench.wait() + + # PAGEa3 OK + # PAGEb2 ERROR + # PAGEa2 ERROR + # PAGEb1 ERROR + # PAGEa1 OK + # FULLb ERROR + # FULLa OK + + # Change PAGEa3 to ERROR + self.change_backup_status(backup_dir, 'node', page_id_a3, 'ERROR') + + # Change PAGEb2, PAGEb1 and FULLb to OK + self.change_backup_status(backup_dir, 'node', page_id_b2, 'OK') + self.change_backup_status(backup_dir, 'node', page_id_b1, 'OK') + self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK') + + page_id_b3 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # PAGEb3 OK + # PAGEa3 ERROR + # PAGEb2 OK + # PAGEa2 ERROR + # PAGEb1 OK + # PAGEa1 OK + # FULLb OK + # FULLa OK + + # Change PAGEa3, PAGEa2 and PAGEb1 status to OK + self.change_backup_status(backup_dir, 'node', page_id_a3, 'OK') + self.change_backup_status(backup_dir, 'node', page_id_a2, 'OK') + self.change_backup_status(backup_dir, 'node', page_id_b1, 'OK') + + # PAGEb3 OK + # PAGEa3 OK + # PAGEb2 OK + # PAGEa2 OK + # PAGEb1 OK + # PAGEa1 OK + # FULLb OK + # FULLa OK + + # Check that page_id_a3 and page_id_a2 are both direct descendants of page_id_a1 + self.assertEqual( + self.show_pb( + backup_dir, 'node', backup_id=page_id_a3)['parent-backup-id'], + page_id_a1) + + self.assertEqual( + self.show_pb( + backup_dir, 'node', backup_id=page_id_a2)['parent-backup-id'], + page_id_a1) + + # Purge backups + backups = os.path.join(backup_dir, 'backups', 'node') + for backup in os.listdir(backups): + if backup in [page_id_a3, page_id_b3, 'pg_probackup.conf']: + continue + + with open( + os.path.join( + backups, backup, "backup.control"), "a") as conf: + conf.write("recovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format( + datetime.now() - timedelta(days=3))) + + output = self.delete_expired( + backup_dir, 'node', + options=[ + '--retention-window=1', '--delete-expired', + '--merge-expired', '--log-level-console=log']) + + self.assertEqual(len(self.show_pb(backup_dir, 'node')), 2) + + # Merging chain A + self.assertIn( + "Merge incremental chain between full backup {0} and backup {1}".format( + backup_id_a, page_id_a3), + output) + + self.assertIn( + "INFO: Rename merged full backup {0} to {1}".format( + backup_id_a, page_id_a3), output) + +# self.assertIn( +# "WARNING: Backup {0} has multiple valid descendants. " +# "Automatic merge is not possible.".format( +# page_id_a1), output) + + self.assertIn( + "LOG: Consider backup {0} for purge".format( + page_id_a2), output) + + # Merge chain B + self.assertIn( + "Merge incremental chain between full backup {0} and backup {1}".format( + backup_id_b, page_id_b3), + output) + + self.assertIn( + "INFO: Rename merged full backup {0} to {1}".format( + backup_id_b, page_id_b3), output) + + self.assertIn( + "Delete: {0}".format(page_id_a2), output) + + self.assertEqual( + self.show_pb(backup_dir, 'node')[1]['id'], + page_id_b3) + + self.assertEqual( + self.show_pb(backup_dir, 'node')[0]['id'], + page_id_a3) + + self.assertEqual( + self.show_pb(backup_dir, 'node')[1]['backup-mode'], + 'FULL') + + self.assertEqual( + self.show_pb(backup_dir, 'node')[0]['backup-mode'], + 'FULL') + + # @unittest.skip("skip") + def test_basic_window_merge_multiple_descendants_1(self): + """ + PAGEb3 + | PAGEa3 + -----------------------------retention window + PAGEb2 / + | PAGEa2 / + PAGEb1 \ / + | PAGEa1 + FULLb | + FULLa + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.pgbench_init(scale=3) + + # Take FULL BACKUPs + backup_id_a = self.backup_node(backup_dir, 'node', node) + # pgbench = node.pgbench(options=['-T', '10', '-c', '2']) + # pgbench.wait() + + backup_id_b = self.backup_node(backup_dir, 'node', node) + # pgbench = node.pgbench(options=['-T', '10', '-c', '2']) + # pgbench.wait() + + # Change FULLb backup status to ERROR + self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR') + + page_id_a1 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # pgbench = node.pgbench(options=['-T', '10', '-c', '2']) + # pgbench.wait() + + # Change FULLb to OK + self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK') + + # Change PAGEa1 to ERROR + self.change_backup_status(backup_dir, 'node', page_id_a1, 'ERROR') + + # PAGEa1 ERROR + # FULLb OK + # FULLa OK + + page_id_b1 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # PAGEb1 OK + # PAGEa1 ERROR + # FULLb OK + # FULLa OK + + # pgbench = node.pgbench(options=['-T', '10', '-c', '2']) + # pgbench.wait() + + # Change PAGEa1 to OK + self.change_backup_status(backup_dir, 'node', page_id_a1, 'OK') + + # Change PAGEb1 and FULLb to ERROR + self.change_backup_status(backup_dir, 'node', page_id_b1, 'ERROR') + self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR') + + # PAGEb1 ERROR + # PAGEa1 OK + # FULLb ERROR + # FULLa OK + + page_id_a2 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # pgbench = node.pgbench(options=['-T', '10', '-c', '2']) + # pgbench.wait() + + # PAGEa2 OK + # PAGEb1 ERROR + # PAGEa1 OK + # FULLb ERROR + # FULLa OK + + # Change PAGEb1 and FULLb to OK + self.change_backup_status(backup_dir, 'node', page_id_b1, 'OK') + self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK') + + # Change PAGEa2 and FULLa to ERROR + self.change_backup_status(backup_dir, 'node', page_id_a2, 'ERROR') + self.change_backup_status(backup_dir, 'node', backup_id_a, 'ERROR') + + # PAGEa2 ERROR + # PAGEb1 OK + # PAGEa1 OK + # FULLb OK + # FULLa ERROR + + page_id_b2 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # pgbench = node.pgbench(options=['-T', '10', '-c', '2']) + # pgbench.wait() + + # PAGEb2 OK + # PAGEa2 ERROR + # PAGEb1 OK + # PAGEa1 OK + # FULLb OK + # FULLa ERROR + + # Change PAGEb2 and PAGEb1 to ERROR + self.change_backup_status(backup_dir, 'node', page_id_b2, 'ERROR') + self.change_backup_status(backup_dir, 'node', page_id_b1, 'ERROR') + + # and FULL stuff + self.change_backup_status(backup_dir, 'node', backup_id_a, 'OK') + self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR') + + # PAGEb2 ERROR + # PAGEa2 ERROR + # PAGEb1 ERROR + # PAGEa1 OK + # FULLb ERROR + # FULLa OK + + page_id_a3 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + # pgbench = node.pgbench(options=['-T', '10', '-c', '2']) + # pgbench.wait() + + # PAGEa3 OK + # PAGEb2 ERROR + # PAGEa2 ERROR + # PAGEb1 ERROR + # PAGEa1 OK + # FULLb ERROR + # FULLa OK + + # Change PAGEa3 to ERROR + self.change_backup_status(backup_dir, 'node', page_id_a3, 'ERROR') + + # Change PAGEb2, PAGEb1 and FULLb to OK + self.change_backup_status(backup_dir, 'node', page_id_b2, 'OK') + self.change_backup_status(backup_dir, 'node', page_id_b1, 'OK') + self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK') + + page_id_b3 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # PAGEb3 OK + # PAGEa3 ERROR + # PAGEb2 OK + # PAGEa2 ERROR + # PAGEb1 OK + # PAGEa1 OK + # FULLb OK + # FULLa OK + + # Change PAGEa3, PAGEa2 and PAGEb1 status to OK + self.change_backup_status(backup_dir, 'node', page_id_a3, 'OK') + self.change_backup_status(backup_dir, 'node', page_id_a2, 'OK') + self.change_backup_status(backup_dir, 'node', page_id_b1, 'OK') + + # PAGEb3 OK + # PAGEa3 OK + # PAGEb2 OK + # PAGEa2 OK + # PAGEb1 OK + # PAGEa1 OK + # FULLb OK + # FULLa OK + + # Check that page_id_a3 and page_id_a2 are both direct descendants of page_id_a1 + self.assertEqual( + self.show_pb( + backup_dir, 'node', backup_id=page_id_a3)['parent-backup-id'], + page_id_a1) + + self.assertEqual( + self.show_pb( + backup_dir, 'node', backup_id=page_id_a2)['parent-backup-id'], + page_id_a1) + + # Purge backups + backups = os.path.join(backup_dir, 'backups', 'node') + for backup in os.listdir(backups): + if backup in [page_id_a3, page_id_b3, 'pg_probackup.conf']: + continue + + with open( + os.path.join( + backups, backup, "backup.control"), "a") as conf: + conf.write("recovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format( + datetime.now() - timedelta(days=3))) + + output = self.delete_expired( + backup_dir, 'node', + options=[ + '--retention-window=1', + '--merge-expired', '--log-level-console=log']) + + self.assertEqual(len(self.show_pb(backup_dir, 'node')), 3) + + # Merging chain A + self.assertIn( + "Merge incremental chain between full backup {0} and backup {1}".format( + backup_id_a, page_id_a3), + output) + + self.assertIn( + "INFO: Rename merged full backup {0} to {1}".format( + backup_id_a, page_id_a3), output) + +# self.assertIn( +# "WARNING: Backup {0} has multiple valid descendants. " +# "Automatic merge is not possible.".format( +# page_id_a1), output) + + # Merge chain B + self.assertIn( + "Merge incremental chain between full backup {0} and backup {1}".format( + backup_id_b, page_id_b3), output) + + self.assertIn( + "INFO: Rename merged full backup {0} to {1}".format( + backup_id_b, page_id_b3), output) + + self.assertEqual( + self.show_pb(backup_dir, 'node')[2]['id'], + page_id_b3) + + self.assertEqual( + self.show_pb(backup_dir, 'node')[1]['id'], + page_id_a3) + + self.assertEqual( + self.show_pb(backup_dir, 'node')[0]['id'], + page_id_a2) + + self.assertEqual( + self.show_pb(backup_dir, 'node')[2]['backup-mode'], + 'FULL') + + self.assertEqual( + self.show_pb(backup_dir, 'node')[1]['backup-mode'], + 'FULL') + + self.assertEqual( + self.show_pb(backup_dir, 'node')[0]['backup-mode'], + 'PAGE') + + output = self.delete_expired( + backup_dir, 'node', + options=[ + '--retention-window=1', + '--delete-expired', '--log-level-console=log']) + + # @unittest.skip("skip") + def test_window_chains(self): + """ + PAGE + -------window + PAGE + PAGE + FULL + PAGE + PAGE + FULL + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.pgbench_init(scale=3) + + # Chain A + self.backup_node(backup_dir, 'node', node) + self.backup_node( + backup_dir, 'node', node, backup_type='page') + + self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # Chain B + self.backup_node(backup_dir, 'node', node) + + pgbench = node.pgbench(options=['-T', '10', '-c', '2']) + pgbench.wait() + + self.backup_node( + backup_dir, 'node', node, backup_type='delta') + + pgbench = node.pgbench(options=['-T', '10', '-c', '2']) + pgbench.wait() + + self.backup_node( + backup_dir, 'node', node, backup_type='page') + + pgbench = node.pgbench(options=['-T', '10', '-c', '2']) + pgbench.wait() + + page_id_b3 = self.backup_node( + backup_dir, 'node', node, backup_type='delta') + + pgdata = self.pgdata_content(node.data_dir) + + # Purge backups + backups = os.path.join(backup_dir, 'backups', 'node') + for backup in os.listdir(backups): + if backup in [page_id_b3, 'pg_probackup.conf']: + continue + + with open( + os.path.join( + backups, backup, "backup.control"), "a") as conf: + conf.write("recovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format( + datetime.now() - timedelta(days=3))) + + self.delete_expired( + backup_dir, 'node', + options=[ + '--retention-window=1', '--expired', + '--merge-expired', '--log-level-console=log']) + + self.assertEqual(len(self.show_pb(backup_dir, 'node')), 1) + + node.cleanup() + + self.restore_node(backup_dir, 'node', node) + + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + def test_window_chains_1(self): + """ + PAGE + -------window + PAGE + PAGE + FULL + PAGE + PAGE + FULL + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.pgbench_init(scale=3) + + # Chain A + self.backup_node(backup_dir, 'node', node) + self.backup_node( + backup_dir, 'node', node, backup_type='page') + + self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # Chain B + self.backup_node(backup_dir, 'node', node) + + self.backup_node( + backup_dir, 'node', node, backup_type='delta') + + self.backup_node( + backup_dir, 'node', node, backup_type='page') + + page_id_b3 = self.backup_node( + backup_dir, 'node', node, backup_type='delta') + + self.pgdata_content(node.data_dir) + + # Purge backups + backups = os.path.join(backup_dir, 'backups', 'node') + for backup in os.listdir(backups): + if backup in [page_id_b3, 'pg_probackup.conf']: + continue + + with open( + os.path.join( + backups, backup, "backup.control"), "a") as conf: + conf.write("recovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format( + datetime.now() - timedelta(days=3))) + + output = self.delete_expired( + backup_dir, 'node', + options=[ + '--retention-window=1', + '--merge-expired', '--log-level-console=log']) + + self.assertEqual(len(self.show_pb(backup_dir, 'node')), 4) + + self.assertIn( + "There are no backups to delete by retention policy", + output) + + self.assertIn( + "Retention merging finished", + output) + + output = self.delete_expired( + backup_dir, 'node', + options=[ + '--retention-window=1', + '--expired', '--log-level-console=log']) + + self.assertEqual(len(self.show_pb(backup_dir, 'node')), 1) + + self.assertIn( + "There are no backups to merge by retention policy", + output) + + self.assertIn( + "Purging finished", + output) + + @unittest.skip("skip") + def test_window_error_backups(self): + """ + PAGE ERROR + -------window + PAGE ERROR + PAGE ERROR + PAGE ERROR + FULL ERROR + FULL + -------redundancy + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # Take FULL BACKUPs + self.backup_node(backup_dir, 'node', node) + self.backup_node( + backup_dir, 'node', node, backup_type='page') + + self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # Change FULLb backup status to ERROR + # self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR') + + # @unittest.skip("skip") + def test_window_error_backups_1(self): + """ + DELTA + PAGE ERROR + FULL + -------window + """ + self._check_gdb_flag_or_skip_test() + + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # Take FULL BACKUP + self.backup_node(backup_dir, 'node', node) + + # Take PAGE BACKUP + gdb = self.backup_node( + backup_dir, 'node', node, backup_type='page', gdb=True) + + # Attention! this breakpoint has been set on internal probackup function, not on a postgres core one + gdb.set_breakpoint('pg_stop_backup') + gdb.run_until_break() + gdb.remove_all_breakpoints() + gdb._execute('signal SIGINT') + gdb.continue_execution_until_error() + + self.show_pb(backup_dir, 'node')[1]['id'] + + # Take DELTA backup + self.backup_node( + backup_dir, 'node', node, backup_type='delta', + options=['--retention-window=2', '--delete-expired']) + + # Take FULL BACKUP + self.backup_node(backup_dir, 'node', node) + + self.assertEqual(len(self.show_pb(backup_dir, 'node')), 4) + + # @unittest.skip("skip") + def test_window_error_backups_2(self): + """ + DELTA + PAGE ERROR + FULL + -------window + """ + self._check_gdb_flag_or_skip_test() + + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # Take FULL BACKUP + self.backup_node(backup_dir, 'node', node) + + # Take PAGE BACKUP + gdb = self.backup_node( + backup_dir, 'node', node, backup_type='page', gdb=True) + + # Attention! this breakpoint has been set on internal probackup function, not on a postgres core one + gdb.set_breakpoint('pg_stop_backup') + gdb.run_until_break() + gdb._execute('signal SIGKILL') + gdb.continue_execution_until_error() + + self.show_pb(backup_dir, 'node')[1]['id'] + + if self.get_version(node) < 90600: + node.safe_psql( + 'postgres', + 'SELECT pg_catalog.pg_stop_backup()') + + # Take DELTA backup + self.backup_node( + backup_dir, 'node', node, backup_type='delta', + options=['--retention-window=2', '--delete-expired']) + + self.assertEqual(len(self.show_pb(backup_dir, 'node')), 3) + + def test_retention_redundancy_overlapping_chains(self): + """""" + self._check_gdb_flag_or_skip_test() + + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + if self.get_version(node) < 90600: + self.skipTest('Skipped because ptrack support is disabled') + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + self.set_config( + backup_dir, 'node', options=['--retention-redundancy=1']) + + # Make backups to be purged + self.backup_node(backup_dir, 'node', node) + self.backup_node(backup_dir, 'node', node, backup_type="page") + + # Make backups to be keeped + gdb = self.backup_node(backup_dir, 'node', node, gdb=True) + gdb.set_breakpoint('backup_files') + gdb.run_until_break() + + sleep(1) + + self.backup_node(backup_dir, 'node', node, backup_type="page") + + gdb.remove_all_breakpoints() + gdb.continue_execution_until_exit() + + self.backup_node(backup_dir, 'node', node, backup_type="page") + + # Purge backups + self.delete_expired( + backup_dir, 'node', options=['--expired', '--wal']) + self.assertEqual(len(self.show_pb(backup_dir, 'node')), 2) + + self.validate_pb(backup_dir, 'node') + + def test_retention_redundancy_overlapping_chains_1(self): + """""" + self._check_gdb_flag_or_skip_test() + + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + if self.get_version(node) < 90600: + self.skipTest('Skipped because ptrack support is disabled') + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + self.set_config( + backup_dir, 'node', options=['--retention-redundancy=1']) + + # Make backups to be purged + self.backup_node(backup_dir, 'node', node) + self.backup_node(backup_dir, 'node', node, backup_type="page") + + # Make backups to be keeped + gdb = self.backup_node(backup_dir, 'node', node, gdb=True) + gdb.set_breakpoint('backup_files') + gdb.run_until_break() + + sleep(1) + + self.backup_node(backup_dir, 'node', node, backup_type="page") + + gdb.remove_all_breakpoints() + gdb.continue_execution_until_exit() + + self.backup_node(backup_dir, 'node', node, backup_type="page") + + # Purge backups + self.delete_expired( + backup_dir, 'node', options=['--expired', '--wal']) + self.assertEqual(len(self.show_pb(backup_dir, 'node')), 2) + + self.validate_pb(backup_dir, 'node') + + def test_wal_purge_victim(self): + """ + https://github.com/postgrespro/pg_probackup/issues/103 + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # Make ERROR incremental backup + try: + self.backup_node(backup_dir, 'node', node, backup_type='page') + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because page backup should not be possible " + "without valid full backup.\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertTrue( + "WARNING: Valid full backup on current timeline 1 is not found" in e.message and + "ERROR: Create new full backup before an incremental one" in e.message, + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) + + page_id = self.show_pb(backup_dir, 'node')[0]['id'] + + sleep(1) + + # Make FULL backup + full_id = self.backup_node(backup_dir, 'node', node, options=['--delete-wal']) + + try: + self.validate_pb(backup_dir, 'node') + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because page backup should not be possible " + "without valid full backup.\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + "INFO: Backup {0} WAL segments are valid".format(full_id), + e.message) + self.assertIn( + "WARNING: Backup {0} has missing parent 0".format(page_id), + e.message) + + # @unittest.skip("skip") + def test_failed_merge_redundancy_retention(self): + """ + Check that retention purge works correctly with MERGING backups + """ + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join( + self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # FULL1 backup + full_id = self.backup_node(backup_dir, 'node', node) + + # DELTA BACKUP + delta_id = self.backup_node( + backup_dir, 'node', node, backup_type='delta') + + # DELTA BACKUP + self.backup_node( + backup_dir, 'node', node, backup_type='delta') + + # DELTA BACKUP + self.backup_node( + backup_dir, 'node', node, backup_type='delta') + + # FULL2 backup + self.backup_node(backup_dir, 'node', node) + + # DELTA BACKUP + self.backup_node( + backup_dir, 'node', node, backup_type='delta') + + # DELTA BACKUP + self.backup_node( + backup_dir, 'node', node, backup_type='delta') + + # FULL3 backup + self.backup_node(backup_dir, 'node', node) + + # DELTA BACKUP + self.backup_node( + backup_dir, 'node', node, backup_type='delta') + + # DELTA BACKUP + self.backup_node( + backup_dir, 'node', node, backup_type='delta') + + self.set_config( + backup_dir, 'node', options=['--retention-redundancy=2']) + + self.set_config( + backup_dir, 'node', options=['--retention-window=2']) + + # create pair of MERGING backup as a result of failed merge + gdb = self.merge_backup( + backup_dir, 'node', delta_id, gdb=True) + gdb.set_breakpoint('backup_non_data_file') + gdb.run_until_break() + gdb.continue_execution_until_break(2) + gdb._execute('signal SIGKILL') + + # "expire" first full backup + backups = os.path.join(backup_dir, 'backups', 'node') + with open( + os.path.join( + backups, full_id, "backup.control"), "a") as conf: + conf.write("recovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format( + datetime.now() - timedelta(days=3))) + + # run retention merge + self.delete_expired( + backup_dir, 'node', options=['--delete-expired']) + + self.assertEqual( + 'MERGING', + self.show_pb(backup_dir, 'node', full_id)['status'], + 'Backup STATUS should be "MERGING"') + + self.assertEqual( + 'MERGING', + self.show_pb(backup_dir, 'node', delta_id)['status'], + 'Backup STATUS should be "MERGING"') + + self.assertEqual(len(self.show_pb(backup_dir, 'node')), 10) + + def test_wal_depth_1(self): + """ + |-------------B5----------> WAL timeline3 + |-----|-------------------------> WAL timeline2 + B1 B2---| B3 B4-------B6-----> WAL timeline1 + + wal-depth=2 + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'archive_timeout': '30s', + 'checkpoint_timeout': '30s'}) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + + self.set_config(backup_dir, 'node', options=['--archive-timeout=60s']) + + node.slow_start() + + # FULL + node.pgbench_init(scale=1) + self.backup_node(backup_dir, 'node', node) + + # PAGE + node.pgbench_init(scale=1) + B2 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # generate_some more data + node.pgbench_init(scale=1) + + target_xid = node.safe_psql( + "postgres", + "select txid_current()").decode('utf-8').rstrip() + + node.pgbench_init(scale=1) + + self.backup_node( + backup_dir, 'node', node, backup_type='page') + + node.pgbench_init(scale=1) + + self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # Timeline 2 + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + + node_restored.cleanup() + + output = self.restore_node( + backup_dir, 'node', node_restored, + options=[ + '--recovery-target-xid={0}'.format(target_xid), + '--recovery-target-action=promote']) + + self.assertIn( + 'Restore of backup {0} completed'.format(B2), + output) + + self.set_auto_conf(node_restored, options={'port': node_restored.port}) + + node_restored.slow_start() + + node_restored.pgbench_init(scale=1) + + target_xid = node_restored.safe_psql( + "postgres", + "select txid_current()").decode('utf-8').rstrip() + + node_restored.pgbench_init(scale=2) + + # Timeline 3 + node_restored.cleanup() + + output = self.restore_node( + backup_dir, 'node', node_restored, + options=[ + '--recovery-target-xid={0}'.format(target_xid), + '--recovery-target-timeline=2', + '--recovery-target-action=promote']) + + self.assertIn( + 'Restore of backup {0} completed'.format(B2), + output) + + self.set_auto_conf(node_restored, options={'port': node_restored.port}) + + node_restored.slow_start() + + node_restored.pgbench_init(scale=1) + self.backup_node( + backup_dir, 'node', node_restored, data_dir=node_restored.data_dir) + + node.pgbench_init(scale=1) + self.backup_node(backup_dir, 'node', node) + + lsn = self.show_archive(backup_dir, 'node', tli=2)['switchpoint'] + + self.validate_pb( + backup_dir, 'node', backup_id=B2, + options=['--recovery-target-lsn={0}'.format(lsn)]) + + self.validate_pb(backup_dir, 'node') + + def test_wal_purge(self): + """ + -------------------------------------> tli5 + ---------------------------B6--------> tli4 + S2`---------------> tli3 + S1`------------S2---B4-------B5--> tli2 + B1---S1-------------B2--------B3------> tli1 + + B* - backups + S* - switchpoints + + Expected result: + TLI5 will be purged entirely + B6--------> tli4 + S2`---------------> tli3 + S1`------------S2---B4-------B5--> tli2 + B1---S1-------------B2--------B3------> tli1 + + wal-depth=2 + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_config(backup_dir, 'node', options=['--archive-timeout=60s']) + + node.slow_start() + + # STREAM FULL + stream_id = self.backup_node( + backup_dir, 'node', node, options=['--stream']) + + node.stop() + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # FULL + B1 = self.backup_node(backup_dir, 'node', node) + node.pgbench_init(scale=1) + + target_xid = node.safe_psql( + "postgres", + "select txid_current()").decode('utf-8').rstrip() + node.pgbench_init(scale=5) + + # B2 FULL on TLI1 + self.backup_node(backup_dir, 'node', node) + node.pgbench_init(scale=4) + self.backup_node(backup_dir, 'node', node) + node.pgbench_init(scale=4) + + self.delete_pb(backup_dir, 'node', options=['--delete-wal']) + + # TLI 2 + node_tli2 = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_tli2')) + node_tli2.cleanup() + + output = self.restore_node( + backup_dir, 'node', node_tli2, + options=[ + '--recovery-target-xid={0}'.format(target_xid), + '--recovery-target-timeline=1', + '--recovery-target-action=promote']) + + self.assertIn( + 'INFO: Restore of backup {0} completed'.format(B1), + output) + + self.set_auto_conf(node_tli2, options={'port': node_tli2.port}) + node_tli2.slow_start() + node_tli2.pgbench_init(scale=4) + + target_xid = node_tli2.safe_psql( + "postgres", + "select txid_current()").decode('utf-8').rstrip() + node_tli2.pgbench_init(scale=1) + + self.backup_node( + backup_dir, 'node', node_tli2, data_dir=node_tli2.data_dir) + node_tli2.pgbench_init(scale=3) + + self.backup_node( + backup_dir, 'node', node_tli2, data_dir=node_tli2.data_dir) + node_tli2.pgbench_init(scale=1) + node_tli2.cleanup() + + # TLI3 + node_tli3 = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_tli3')) + node_tli3.cleanup() + + # Note, that successful validation here is a happy coincidence + output = self.restore_node( + backup_dir, 'node', node_tli3, + options=[ + '--recovery-target-xid={0}'.format(target_xid), + '--recovery-target-timeline=2', + '--recovery-target-action=promote']) + + self.assertIn( + 'INFO: Restore of backup {0} completed'.format(B1), + output) + self.set_auto_conf(node_tli3, options={'port': node_tli3.port}) + node_tli3.slow_start() + node_tli3.pgbench_init(scale=5) + node_tli3.cleanup() + + # TLI4 + node_tli4 = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_tli4')) + node_tli4.cleanup() + + self.restore_node( + backup_dir, 'node', node_tli4, backup_id=stream_id, + options=[ + '--recovery-target=immediate', + '--recovery-target-action=promote']) + + self.set_auto_conf(node_tli4, options={'port': node_tli4.port}) + self.set_archiving(backup_dir, 'node', node_tli4) + node_tli4.slow_start() + + node_tli4.pgbench_init(scale=5) + + self.backup_node( + backup_dir, 'node', node_tli4, data_dir=node_tli4.data_dir) + node_tli4.pgbench_init(scale=5) + node_tli4.cleanup() + + # TLI5 + node_tli5 = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_tli5')) + node_tli5.cleanup() + + self.restore_node( + backup_dir, 'node', node_tli5, backup_id=stream_id, + options=[ + '--recovery-target=immediate', + '--recovery-target-action=promote']) + + self.set_auto_conf(node_tli5, options={'port': node_tli5.port}) + self.set_archiving(backup_dir, 'node', node_tli5) + node_tli5.slow_start() + node_tli5.pgbench_init(scale=10) + + # delete '.history' file of TLI4 + os.remove(os.path.join(backup_dir, 'wal', 'node', '00000004.history')) + # delete '.history' file of TLI5 + os.remove(os.path.join(backup_dir, 'wal', 'node', '00000005.history')) + + output = self.delete_pb( + backup_dir, 'node', + options=[ + '--delete-wal', '--dry-run', + '--log-level-console=verbose']) + + self.assertIn( + 'INFO: On timeline 4 WAL segments between 000000040000000000000002 ' + 'and 000000040000000000000006 can be removed', + output) + + self.assertIn( + 'INFO: On timeline 5 all files can be removed', + output) + + show_tli1_before = self.show_archive(backup_dir, 'node', tli=1) + show_tli2_before = self.show_archive(backup_dir, 'node', tli=2) + show_tli3_before = self.show_archive(backup_dir, 'node', tli=3) + show_tli4_before = self.show_archive(backup_dir, 'node', tli=4) + show_tli5_before = self.show_archive(backup_dir, 'node', tli=5) + + self.assertTrue(show_tli1_before) + self.assertTrue(show_tli2_before) + self.assertTrue(show_tli3_before) + self.assertTrue(show_tli4_before) + self.assertTrue(show_tli5_before) + + output = self.delete_pb( + backup_dir, 'node', + options=['--delete-wal', '--log-level-console=verbose']) + + self.assertIn( + 'INFO: On timeline 4 WAL segments between 000000040000000000000002 ' + 'and 000000040000000000000006 will be removed', + output) + + self.assertIn( + 'INFO: On timeline 5 all files will be removed', + output) + + show_tli1_after = self.show_archive(backup_dir, 'node', tli=1) + show_tli2_after = self.show_archive(backup_dir, 'node', tli=2) + show_tli3_after = self.show_archive(backup_dir, 'node', tli=3) + show_tli4_after = self.show_archive(backup_dir, 'node', tli=4) + show_tli5_after = self.show_archive(backup_dir, 'node', tli=5) + + self.assertEqual(show_tli1_before, show_tli1_after) + self.assertEqual(show_tli2_before, show_tli2_after) + self.assertEqual(show_tli3_before, show_tli3_after) + self.assertNotEqual(show_tli4_before, show_tli4_after) + self.assertNotEqual(show_tli5_before, show_tli5_after) + + self.assertEqual( + show_tli4_before['min-segno'], + '000000040000000000000002') + + self.assertEqual( + show_tli4_after['min-segno'], + '000000040000000000000006') + + self.assertFalse(show_tli5_after) + + self.validate_pb(backup_dir, 'node') + + def test_wal_depth_2(self): + """ + -------------------------------------> tli5 + ---------------------------B6--------> tli4 + S2`---------------> tli3 + S1`------------S2---B4-------B5--> tli2 + B1---S1-------------B2--------B3------> tli1 + + B* - backups + S* - switchpoints + wal-depth=2 + + Expected result: + TLI5 will be purged entirely + B6--------> tli4 + S2`---------------> tli3 + S1`------------S2 B4-------B5--> tli2 + B1---S1 B2--------B3------> tli1 + + wal-depth=2 + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_config(backup_dir, 'node', options=['--archive-timeout=60s']) + + node.slow_start() + + # STREAM FULL + stream_id = self.backup_node( + backup_dir, 'node', node, options=['--stream']) + + node.stop() + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # FULL + B1 = self.backup_node(backup_dir, 'node', node) + node.pgbench_init(scale=1) + + target_xid = node.safe_psql( + "postgres", + "select txid_current()").decode('utf-8').rstrip() + node.pgbench_init(scale=5) + + # B2 FULL on TLI1 + B2 = self.backup_node(backup_dir, 'node', node) + node.pgbench_init(scale=4) + self.backup_node(backup_dir, 'node', node) + node.pgbench_init(scale=4) + + # TLI 2 + node_tli2 = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_tli2')) + node_tli2.cleanup() + + output = self.restore_node( + backup_dir, 'node', node_tli2, + options=[ + '--recovery-target-xid={0}'.format(target_xid), + '--recovery-target-timeline=1', + '--recovery-target-action=promote']) + + self.assertIn( + 'INFO: Restore of backup {0} completed'.format(B1), + output) + + self.set_auto_conf(node_tli2, options={'port': node_tli2.port}) + node_tli2.slow_start() + node_tli2.pgbench_init(scale=4) + + target_xid = node_tli2.safe_psql( + "postgres", + "select txid_current()").decode('utf-8').rstrip() + node_tli2.pgbench_init(scale=1) + + B4 = self.backup_node( + backup_dir, 'node', node_tli2, data_dir=node_tli2.data_dir) + node_tli2.pgbench_init(scale=3) + + self.backup_node( + backup_dir, 'node', node_tli2, data_dir=node_tli2.data_dir) + node_tli2.pgbench_init(scale=1) + node_tli2.cleanup() + + # TLI3 + node_tli3 = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_tli3')) + node_tli3.cleanup() + + # Note, that successful validation here is a happy coincidence + output = self.restore_node( + backup_dir, 'node', node_tli3, + options=[ + '--recovery-target-xid={0}'.format(target_xid), + '--recovery-target-timeline=2', + '--recovery-target-action=promote']) + + self.assertIn( + 'INFO: Restore of backup {0} completed'.format(B1), + output) + self.set_auto_conf(node_tli3, options={'port': node_tli3.port}) + node_tli3.slow_start() + node_tli3.pgbench_init(scale=5) + node_tli3.cleanup() + + # TLI4 + node_tli4 = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_tli4')) + node_tli4.cleanup() + + self.restore_node( + backup_dir, 'node', node_tli4, backup_id=stream_id, + options=[ + '--recovery-target=immediate', + '--recovery-target-action=promote']) + + self.set_auto_conf(node_tli4, options={'port': node_tli4.port}) + self.set_archiving(backup_dir, 'node', node_tli4) + node_tli4.slow_start() + + node_tli4.pgbench_init(scale=5) + + self.backup_node( + backup_dir, 'node', node_tli4, data_dir=node_tli4.data_dir) + node_tli4.pgbench_init(scale=5) + node_tli4.cleanup() + + # TLI5 + node_tli5 = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_tli5')) + node_tli5.cleanup() + + self.restore_node( + backup_dir, 'node', node_tli5, backup_id=stream_id, + options=[ + '--recovery-target=immediate', + '--recovery-target-action=promote']) + + self.set_auto_conf(node_tli5, options={'port': node_tli5.port}) + self.set_archiving(backup_dir, 'node', node_tli5) + node_tli5.slow_start() + node_tli5.pgbench_init(scale=10) + + # delete '.history' file of TLI4 + os.remove(os.path.join(backup_dir, 'wal', 'node', '00000004.history')) + # delete '.history' file of TLI5 + os.remove(os.path.join(backup_dir, 'wal', 'node', '00000005.history')) + + output = self.delete_pb( + backup_dir, 'node', + options=[ + '--delete-wal', '--dry-run', + '--wal-depth=2', '--log-level-console=verbose']) + + start_lsn_B2 = self.show_pb(backup_dir, 'node', B2)['start-lsn'] + self.assertIn( + 'On timeline 1 WAL is protected from purge at {0}'.format(start_lsn_B2), + output) + + self.assertIn( + 'LOG: Archive backup {0} to stay consistent protect from ' + 'purge WAL interval between 000000010000000000000004 ' + 'and 000000010000000000000005 on timeline 1'.format(B1), output) + + start_lsn_B4 = self.show_pb(backup_dir, 'node', B4)['start-lsn'] + self.assertIn( + 'On timeline 2 WAL is protected from purge at {0}'.format(start_lsn_B4), + output) + + self.assertIn( + 'LOG: Timeline 3 to stay reachable from timeline 1 protect ' + 'from purge WAL interval between 000000020000000000000006 and ' + '000000020000000000000009 on timeline 2', output) + + self.assertIn( + 'LOG: Timeline 3 to stay reachable from timeline 1 protect ' + 'from purge WAL interval between 000000010000000000000004 and ' + '000000010000000000000006 on timeline 1', output) + + show_tli1_before = self.show_archive(backup_dir, 'node', tli=1) + show_tli2_before = self.show_archive(backup_dir, 'node', tli=2) + show_tli3_before = self.show_archive(backup_dir, 'node', tli=3) + show_tli4_before = self.show_archive(backup_dir, 'node', tli=4) + show_tli5_before = self.show_archive(backup_dir, 'node', tli=5) + + self.assertTrue(show_tli1_before) + self.assertTrue(show_tli2_before) + self.assertTrue(show_tli3_before) + self.assertTrue(show_tli4_before) + self.assertTrue(show_tli5_before) + + sleep(5) + + output = self.delete_pb( + backup_dir, 'node', + options=['--delete-wal', '--wal-depth=2', '--log-level-console=verbose']) + +# print(output) + + show_tli1_after = self.show_archive(backup_dir, 'node', tli=1) + show_tli2_after = self.show_archive(backup_dir, 'node', tli=2) + show_tli3_after = self.show_archive(backup_dir, 'node', tli=3) + show_tli4_after = self.show_archive(backup_dir, 'node', tli=4) + show_tli5_after = self.show_archive(backup_dir, 'node', tli=5) + + self.assertNotEqual(show_tli1_before, show_tli1_after) + self.assertNotEqual(show_tli2_before, show_tli2_after) + self.assertEqual(show_tli3_before, show_tli3_after) + self.assertNotEqual(show_tli4_before, show_tli4_after) + self.assertNotEqual(show_tli5_before, show_tli5_after) + + self.assertEqual( + show_tli4_before['min-segno'], + '000000040000000000000002') + + self.assertEqual( + show_tli4_after['min-segno'], + '000000040000000000000006') + + self.assertFalse(show_tli5_after) + + self.assertTrue(show_tli1_after['lost-segments']) + self.assertTrue(show_tli2_after['lost-segments']) + self.assertFalse(show_tli3_after['lost-segments']) + self.assertFalse(show_tli4_after['lost-segments']) + self.assertFalse(show_tli5_after) + + self.assertEqual(len(show_tli1_after['lost-segments']), 1) + self.assertEqual(len(show_tli2_after['lost-segments']), 1) + + self.assertEqual( + show_tli1_after['lost-segments'][0]['begin-segno'], + '000000010000000000000007') + + self.assertEqual( + show_tli1_after['lost-segments'][0]['end-segno'], + '00000001000000000000000A') + + self.assertEqual( + show_tli2_after['lost-segments'][0]['begin-segno'], + '00000002000000000000000A') + + self.assertEqual( + show_tli2_after['lost-segments'][0]['end-segno'], + '00000002000000000000000A') + + self.validate_pb(backup_dir, 'node') + + def test_basic_wal_depth(self): + """ + B1---B1----B3-----B4----B5------> tli1 + + Expected result with wal-depth=1: + B1 B1 B3 B4 B5------> tli1 + + wal-depth=1 + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_config(backup_dir, 'node', options=['--archive-timeout=60s']) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # FULL + node.pgbench_init(scale=1) + B1 = self.backup_node(backup_dir, 'node', node) + + + # B2 + pgbench = node.pgbench(options=['-T', '10', '-c', '2']) + pgbench.wait() + B2 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # B3 + pgbench = node.pgbench(options=['-T', '10', '-c', '2']) + pgbench.wait() + B3 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # B4 + pgbench = node.pgbench(options=['-T', '10', '-c', '2']) + pgbench.wait() + B4 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # B5 + pgbench = node.pgbench(options=['-T', '10', '-c', '2']) + pgbench.wait() + B5 = self.backup_node( + backup_dir, 'node', node, backup_type='page', + options=['--wal-depth=1', '--delete-wal']) + + pgbench = node.pgbench(options=['-T', '10', '-c', '2']) + pgbench.wait() + + target_xid = node.safe_psql( + "postgres", + "select txid_current()").decode('utf-8').rstrip() + + self.switch_wal_segment(node) + + pgbench = node.pgbench(options=['-T', '10', '-c', '2']) + pgbench.wait() + + tli1 = self.show_archive(backup_dir, 'node', tli=1) + + # check that there are 4 lost_segments intervals + self.assertEqual(len(tli1['lost-segments']), 4) + + output = self.validate_pb( + backup_dir, 'node', B5, + options=['--recovery-target-xid={0}'.format(target_xid)]) + + print(output) + + self.assertIn( + 'INFO: Backup validation completed successfully on time', + output) + + self.assertIn( + 'xid {0} and LSN'.format(target_xid), + output) + + for backup_id in [B1, B2, B3, B4]: + try: + self.validate_pb( + backup_dir, 'node', backup_id, + options=['--recovery-target-xid={0}'.format(target_xid)]) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because page backup should not be possible " + "without valid full backup.\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + "ERROR: Not enough WAL records to xid {0}".format(target_xid), + e.message) + + self.validate_pb(backup_dir, 'node') + + def test_concurrent_running_full_backup(self): + """ + https://github.com/postgrespro/pg_probackup/issues/328 + """ + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # FULL + self.backup_node(backup_dir, 'node', node) + + gdb = self.backup_node(backup_dir, 'node', node, gdb=True) + gdb.set_breakpoint('backup_data_file') + gdb.run_until_break() + gdb.kill() + + self.assertTrue( + self.show_pb(backup_dir, 'node')[0]['status'], + 'RUNNING') + + self.backup_node( + backup_dir, 'node', node, backup_type='delta', + options=['--retention-redundancy=2', '--delete-expired']) + + self.assertTrue( + self.show_pb(backup_dir, 'node')[1]['status'], + 'RUNNING') + + self.backup_node(backup_dir, 'node', node) + + gdb = self.backup_node(backup_dir, 'node', node, gdb=True) + gdb.set_breakpoint('backup_data_file') + gdb.run_until_break() + gdb.kill() + + gdb = self.backup_node(backup_dir, 'node', node, gdb=True) + gdb.set_breakpoint('backup_data_file') + gdb.run_until_break() + gdb.kill() + + self.backup_node(backup_dir, 'node', node) + + gdb = self.backup_node(backup_dir, 'node', node, gdb=True) + gdb.set_breakpoint('backup_data_file') + gdb.run_until_break() + gdb.kill() + + self.backup_node( + backup_dir, 'node', node, backup_type='delta', + options=['--retention-redundancy=2', '--delete-expired'], + return_id=False) + + self.assertTrue( + self.show_pb(backup_dir, 'node')[0]['status'], + 'OK') + + self.assertTrue( + self.show_pb(backup_dir, 'node')[1]['status'], + 'RUNNING') + + self.assertTrue( + self.show_pb(backup_dir, 'node')[2]['status'], + 'OK') + + self.assertEqual( + len(self.show_pb(backup_dir, 'node')), + 6) diff --git a/tests/set_backup_test.py b/tests/set_backup_test.py new file mode 100644 index 000000000..e789d174a --- /dev/null +++ b/tests/set_backup_test.py @@ -0,0 +1,476 @@ +import unittest +import subprocess +import os +from .helpers.ptrack_helpers import ProbackupTest, ProbackupException +from sys import exit +from datetime import datetime, timedelta + + +class SetBackupTest(ProbackupTest, unittest.TestCase): + + # @unittest.expectedFailure + # @unittest.skip("skip") + def test_set_backup_sanity(self): + """general sanity for set-backup command""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + backup_id = self.backup_node( + backup_dir, 'node', node, options=['--stream']) + + recovery_time = self.show_pb( + backup_dir, 'node', backup_id=backup_id)['recovery-time'] + + expire_time_1 = "{:%Y-%m-%d %H:%M:%S}".format( + datetime.now() + timedelta(days=5)) + + try: + self.set_backup(backup_dir, False, options=['--ttl=30d']) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because of missing instance. " + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: required parameter not specified: --instance', + e.message, + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) + + try: + self.set_backup( + backup_dir, 'node', + options=[ + "--ttl=30d", + "--expire-time='{0}'".format(expire_time_1)]) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because options cannot be mixed. " + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + "ERROR: You cannot specify '--expire-time' " + "and '--ttl' options together", + e.message, + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) + + try: + self.set_backup(backup_dir, 'node', options=["--ttl=30d"]) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because of missing backup_id. " + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + "ERROR: You must specify parameter (-i, --backup-id) " + "for 'set-backup' command", + e.message, + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) + + self.set_backup( + backup_dir, 'node', backup_id, options=["--ttl=30d"]) + + actual_expire_time = self.show_pb( + backup_dir, 'node', backup_id=backup_id)['expire-time'] + + self.assertNotEqual(expire_time_1, actual_expire_time) + + expire_time_2 = "{:%Y-%m-%d %H:%M:%S}".format( + datetime.now() + timedelta(days=6)) + + self.set_backup( + backup_dir, 'node', backup_id, + options=["--expire-time={0}".format(expire_time_2)]) + + actual_expire_time = self.show_pb( + backup_dir, 'node', backup_id=backup_id)['expire-time'] + + self.assertIn(expire_time_2, actual_expire_time) + + # unpin backup + self.set_backup( + backup_dir, 'node', backup_id, options=["--ttl=0"]) + + attr_list = self.show_pb( + backup_dir, 'node', backup_id=backup_id) + + self.assertNotIn('expire-time', attr_list) + + self.set_backup( + backup_dir, 'node', backup_id, options=["--expire-time={0}".format(recovery_time)]) + + # parse string to datetime object + #new_expire_time = datetime.strptime(new_expire_time, '%Y-%m-%d %H:%M:%S%z') + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_retention_redundancy_pinning(self): + """""" + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + with open(os.path.join( + backup_dir, 'backups', 'node', + "pg_probackup.conf"), "a") as conf: + conf.write("retention-redundancy = 1\n") + + self.set_config( + backup_dir, 'node', options=['--retention-redundancy=1']) + + # Make backups to be purged + full_id = self.backup_node(backup_dir, 'node', node) + page_id = self.backup_node( + backup_dir, 'node', node, backup_type="page") + # Make backups to be keeped + self.backup_node(backup_dir, 'node', node) + self.backup_node(backup_dir, 'node', node, backup_type="page") + + self.assertEqual(len(self.show_pb(backup_dir, 'node')), 4) + + self.set_backup( + backup_dir, 'node', page_id, options=['--ttl=5d']) + + # Purge backups + log = self.delete_expired( + backup_dir, 'node', + options=['--delete-expired', '--log-level-console=LOG']) + self.assertEqual(len(self.show_pb(backup_dir, 'node')), 4) + + self.assertIn('Time Window: 0d/5d', log) + self.assertIn( + 'LOG: Backup {0} is pinned until'.format(page_id), + log) + self.assertIn( + 'LOG: Retain backup {0} because his descendant ' + '{1} is guarded by retention'.format(full_id, page_id), + log) + + # @unittest.skip("skip") + def test_retention_window_pinning(self): + """purge all backups using window-based retention policy""" + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # take FULL BACKUP + backup_id_1 = self.backup_node(backup_dir, 'node', node) + page1 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # Take second FULL BACKUP + backup_id_2 = self.backup_node(backup_dir, 'node', node) + page2 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # Take third FULL BACKUP + backup_id_3 = self.backup_node(backup_dir, 'node', node) + page2 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + backups = os.path.join(backup_dir, 'backups', 'node') + for backup in os.listdir(backups): + if backup == 'pg_probackup.conf': + continue + with open( + os.path.join( + backups, backup, "backup.control"), "a") as conf: + conf.write("recovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format( + datetime.now() - timedelta(days=3))) + + self.set_backup( + backup_dir, 'node', page1, options=['--ttl=30d']) + + # Purge backups + out = self.delete_expired( + backup_dir, 'node', + options=[ + '--log-level-console=LOG', + '--retention-window=1', + '--delete-expired']) + + self.assertEqual(len(self.show_pb(backup_dir, 'node')), 2) + + self.assertIn( + 'LOG: Backup {0} is pinned until'.format(page1), out) + + self.assertIn( + 'LOG: Retain backup {0} because his descendant ' + '{1} is guarded by retention'.format(backup_id_1, page1), + out) + + # @unittest.skip("skip") + def test_wal_retention_and_pinning(self): + """ + B1---B2---P---B3---> + wal-depth=2 + P - pinned backup + + expected result after WAL purge: + B1 B2---P---B3---> + + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # take FULL BACKUP + self.backup_node( + backup_dir, 'node', node, options=['--stream']) + + node.pgbench_init(scale=1) + + # Take PAGE BACKUP + self.backup_node( + backup_dir, 'node', node, + backup_type='page', options=['--stream']) + + node.pgbench_init(scale=1) + + # Take DELTA BACKUP and pin it + expire_time = "{:%Y-%m-%d %H:%M:%S}".format( + datetime.now() + timedelta(days=6)) + backup_id_pinned = self.backup_node( + backup_dir, 'node', node, + backup_type='delta', + options=[ + '--stream', + '--expire-time={0}'.format(expire_time)]) + + node.pgbench_init(scale=1) + + # Take second PAGE BACKUP + self.backup_node( + backup_dir, 'node', node, backup_type='delta', options=['--stream']) + + node.pgbench_init(scale=1) + + # Purge backups + out = self.delete_expired( + backup_dir, 'node', + options=[ + '--log-level-console=LOG', + '--delete-wal', '--wal-depth=2']) + + # print(out) + self.assertIn( + 'Pinned backup {0} is ignored for the ' + 'purpose of WAL retention'.format(backup_id_pinned), + out) + + for instance in self.show_archive(backup_dir): + timelines = instance['timelines'] + + # sanity + for timeline in timelines: + self.assertEqual( + timeline['min-segno'], + '000000010000000000000004') + self.assertEqual(timeline['status'], 'OK') + + # @unittest.skip("skip") + def test_wal_retention_and_pinning_1(self): + """ + P---B1---> + wal-depth=2 + P - pinned backup + + expected result after WAL purge: + P---B1---> + + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + expire_time = "{:%Y-%m-%d %H:%M:%S}".format( + datetime.now() + timedelta(days=6)) + + # take FULL BACKUP + backup_id_pinned = self.backup_node( + backup_dir, 'node', node, + options=['--expire-time={0}'.format(expire_time)]) + + node.pgbench_init(scale=2) + + # Take second PAGE BACKUP + self.backup_node( + backup_dir, 'node', node, backup_type='delta') + + node.pgbench_init(scale=2) + + # Purge backups + out = self.delete_expired( + backup_dir, 'node', + options=[ + '--log-level-console=verbose', + '--delete-wal', '--wal-depth=2']) + + print(out) + self.assertIn( + 'Pinned backup {0} is ignored for the ' + 'purpose of WAL retention'.format(backup_id_pinned), + out) + + for instance in self.show_archive(backup_dir): + timelines = instance['timelines'] + + # sanity + for timeline in timelines: + self.assertEqual( + timeline['min-segno'], + '000000010000000000000002') + self.assertEqual(timeline['status'], 'OK') + + self.validate_pb(backup_dir) + + # @unittest.skip("skip") + def test_add_note_newlines(self): + """""" + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + # FULL + backup_id = self.backup_node( + backup_dir, 'node', node, + options=['--stream', '--note={0}'.format('hello\nhello')]) + + backup_meta = self.show_pb(backup_dir, 'node', backup_id) + self.assertEqual(backup_meta['note'], "hello") + + self.set_backup(backup_dir, 'node', backup_id, options=['--note=hello\nhello']) + + backup_meta = self.show_pb(backup_dir, 'node', backup_id) + self.assertEqual(backup_meta['note'], "hello") + + self.set_backup(backup_dir, 'node', backup_id, options=['--note=none']) + + backup_meta = self.show_pb(backup_dir, 'node', backup_id) + self.assertNotIn('note', backup_meta) + + # @unittest.skip("skip") + def test_add_big_note(self): + """""" + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + +# note = node.safe_psql( +# "postgres", +# "SELECT repeat('hello', 400)").rstrip() # TODO: investigate + + note = node.safe_psql( + "postgres", + "SELECT repeat('hello', 210)").rstrip() + + # FULL + try: + self.backup_node( + backup_dir, 'node', node, + options=['--stream', '--note={0}'.format(note)]) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because note is too large " + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + "ERROR: Backup note cannot exceed 1024 bytes", + e.message, + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) + + note = node.safe_psql( + "postgres", + "SELECT repeat('hello', 200)").decode('utf-8').rstrip() + + backup_id = self.backup_node( + backup_dir, 'node', node, + options=['--stream', '--note={0}'.format(note)]) + + backup_meta = self.show_pb(backup_dir, 'node', backup_id) + self.assertEqual(backup_meta['note'], note) + + + # @unittest.skip("skip") + def test_add_big_note_1(self): + """""" + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + note = node.safe_psql( + "postgres", + "SELECT repeat('q', 1024)").decode('utf-8').rstrip() + + # FULL + backup_id = self.backup_node(backup_dir, 'node', node, options=['--stream']) + + self.set_backup( + backup_dir, 'node', backup_id, + options=['--note={0}'.format(note)]) + + backup_meta = self.show_pb(backup_dir, 'node', backup_id) + + print(backup_meta) + self.assertEqual(backup_meta['note'], note) diff --git a/tests/show_test.py b/tests/show_test.py new file mode 100644 index 000000000..c4b96499d --- /dev/null +++ b/tests/show_test.py @@ -0,0 +1,509 @@ +import os +import unittest +from .helpers.ptrack_helpers import ProbackupTest, ProbackupException + + +class ShowTest(ProbackupTest, unittest.TestCase): + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_show_1(self): + """Status DONE and OK""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + self.assertEqual( + self.backup_node( + backup_dir, 'node', node, + options=["--log-level-console=off"]), + None + ) + self.assertIn("OK", self.show_pb(backup_dir, 'node', as_text=True)) + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_show_json(self): + """Status DONE and OK""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + self.assertEqual( + self.backup_node( + backup_dir, 'node', node, + options=["--log-level-console=off"]), + None + ) + self.backup_node(backup_dir, 'node', node) + self.assertIn("OK", self.show_pb(backup_dir, 'node', as_text=True)) + + # @unittest.skip("skip") + def test_corrupt_2(self): + """Status CORRUPT""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + backup_id = self.backup_node(backup_dir, 'node', node) + + # delete file which belong to backup + file = os.path.join( + backup_dir, "backups", "node", + backup_id, "database", "postgresql.conf") + os.remove(file) + + try: + self.validate_pb(backup_dir, 'node', backup_id) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because backup corrupted." + " Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd + ) + ) + except ProbackupException as e: + self.assertIn( + 'data files are corrupted', + e.message, + '\n Unexpected Error Message: {0}\n' + ' CMD: {1}'.format(repr(e.message), self.cmd) + ) + self.assertIn("CORRUPT", self.show_pb(backup_dir, as_text=True)) + + # @unittest.skip("skip") + def test_no_control_file(self): + """backup.control doesn't exist""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + backup_id = self.backup_node(backup_dir, 'node', node) + + # delete backup.control file + file = os.path.join( + backup_dir, "backups", "node", + backup_id, "backup.control") + os.remove(file) + + output = self.show_pb(backup_dir, 'node', as_text=True, as_json=False) + + self.assertIn( + 'Control file', + output) + + self.assertIn( + 'doesn\'t exist', + output) + + # @unittest.skip("skip") + def test_empty_control_file(self): + """backup.control is empty""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + backup_id = self.backup_node(backup_dir, 'node', node) + + # truncate backup.control file + file = os.path.join( + backup_dir, "backups", "node", + backup_id, "backup.control") + fd = open(file, 'w') + fd.close() + + output = self.show_pb(backup_dir, 'node', as_text=True, as_json=False) + + self.assertIn( + 'Control file', + output) + + self.assertIn( + 'is empty', + output) + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_corrupt_control_file(self): + """backup.control contains invalid option""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + backup_id = self.backup_node(backup_dir, 'node', node) + + # corrupt backup.control file + file = os.path.join( + backup_dir, "backups", "node", + backup_id, "backup.control") + fd = open(file, 'a') + fd.write("statuss = OK") + fd.close() + + self.assertIn( + 'WARNING: Invalid option "statuss" in file', + self.show_pb(backup_dir, 'node', as_json=False, as_text=True)) + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_corrupt_correctness(self): + """backup.control contains invalid option""" + if not self.remote: + self.skipTest("You must enable PGPROBACKUP_SSH_REMOTE" + " for run this test") + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.pgbench_init(scale=1) + + # FULL + backup_local_id = self.backup_node( + backup_dir, 'node', node, no_remote=True) + + output_local = self.show_pb( + backup_dir, 'node', as_json=False, backup_id=backup_local_id) + + backup_remote_id = self.backup_node(backup_dir, 'node', node) + + output_remote = self.show_pb( + backup_dir, 'node', as_json=False, backup_id=backup_remote_id) + + # check correctness + self.assertEqual( + output_local['data-bytes'], + output_remote['data-bytes']) + + self.assertEqual( + output_local['uncompressed-bytes'], + output_remote['uncompressed-bytes']) + + # DELTA + backup_local_id = self.backup_node( + backup_dir, 'node', node, + backup_type='delta', no_remote=True) + + output_local = self.show_pb( + backup_dir, 'node', as_json=False, backup_id=backup_local_id) + self.delete_pb(backup_dir, 'node', backup_local_id) + + backup_remote_id = self.backup_node( + backup_dir, 'node', node, backup_type='delta') + + output_remote = self.show_pb( + backup_dir, 'node', as_json=False, backup_id=backup_remote_id) + self.delete_pb(backup_dir, 'node', backup_remote_id) + + # check correctness + self.assertEqual( + output_local['data-bytes'], + output_remote['data-bytes']) + + self.assertEqual( + output_local['uncompressed-bytes'], + output_remote['uncompressed-bytes']) + + # PAGE + backup_local_id = self.backup_node( + backup_dir, 'node', node, + backup_type='page', no_remote=True) + + output_local = self.show_pb( + backup_dir, 'node', as_json=False, backup_id=backup_local_id) + self.delete_pb(backup_dir, 'node', backup_local_id) + + backup_remote_id = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + output_remote = self.show_pb( + backup_dir, 'node', as_json=False, backup_id=backup_remote_id) + self.delete_pb(backup_dir, 'node', backup_remote_id) + + # check correctness + self.assertEqual( + output_local['data-bytes'], + output_remote['data-bytes']) + + self.assertEqual( + output_local['uncompressed-bytes'], + output_remote['uncompressed-bytes']) + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_corrupt_correctness_1(self): + """backup.control contains invalid option""" + if not self.remote: + self.skipTest("You must enable PGPROBACKUP_SSH_REMOTE" + " for run this test") + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.pgbench_init(scale=1) + + # FULL + backup_local_id = self.backup_node( + backup_dir, 'node', node, no_remote=True) + + output_local = self.show_pb( + backup_dir, 'node', as_json=False, backup_id=backup_local_id) + + backup_remote_id = self.backup_node(backup_dir, 'node', node) + + output_remote = self.show_pb( + backup_dir, 'node', as_json=False, backup_id=backup_remote_id) + + # check correctness + self.assertEqual( + output_local['data-bytes'], + output_remote['data-bytes']) + + self.assertEqual( + output_local['uncompressed-bytes'], + output_remote['uncompressed-bytes']) + + # change data + pgbench = node.pgbench(options=['-T', '10', '--no-vacuum']) + pgbench.wait() + + # DELTA + backup_local_id = self.backup_node( + backup_dir, 'node', node, + backup_type='delta', no_remote=True) + + output_local = self.show_pb( + backup_dir, 'node', as_json=False, backup_id=backup_local_id) + self.delete_pb(backup_dir, 'node', backup_local_id) + + backup_remote_id = self.backup_node( + backup_dir, 'node', node, backup_type='delta') + + output_remote = self.show_pb( + backup_dir, 'node', as_json=False, backup_id=backup_remote_id) + self.delete_pb(backup_dir, 'node', backup_remote_id) + + # check correctness + self.assertEqual( + output_local['data-bytes'], + output_remote['data-bytes']) + + self.assertEqual( + output_local['uncompressed-bytes'], + output_remote['uncompressed-bytes']) + + # PAGE + backup_local_id = self.backup_node( + backup_dir, 'node', node, + backup_type='page', no_remote=True) + + output_local = self.show_pb( + backup_dir, 'node', as_json=False, backup_id=backup_local_id) + self.delete_pb(backup_dir, 'node', backup_local_id) + + backup_remote_id = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + output_remote = self.show_pb( + backup_dir, 'node', as_json=False, backup_id=backup_remote_id) + self.delete_pb(backup_dir, 'node', backup_remote_id) + + # check correctness + self.assertEqual( + output_local['data-bytes'], + output_remote['data-bytes']) + + self.assertEqual( + output_local['uncompressed-bytes'], + output_remote['uncompressed-bytes']) + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_corrupt_correctness_2(self): + """backup.control contains invalid option""" + if not self.remote: + self.skipTest("You must enable PGPROBACKUP_SSH_REMOTE" + " for run this test") + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.pgbench_init(scale=1) + + # FULL + backup_local_id = self.backup_node( + backup_dir, 'node', node, + options=['--compress'], no_remote=True) + + output_local = self.show_pb( + backup_dir, 'node', as_json=False, backup_id=backup_local_id) + + if self.remote: + backup_remote_id = self.backup_node( + backup_dir, 'node', node, options=['--compress']) + else: + backup_remote_id = self.backup_node( + backup_dir, 'node', node, + options=['--remote-proto=ssh', '--remote-host=localhost', '--compress']) + + output_remote = self.show_pb( + backup_dir, 'node', as_json=False, backup_id=backup_remote_id) + + # check correctness + self.assertEqual( + output_local['data-bytes'], + output_remote['data-bytes']) + + self.assertEqual( + output_local['uncompressed-bytes'], + output_remote['uncompressed-bytes']) + + # change data + pgbench = node.pgbench(options=['-T', '10', '--no-vacuum']) + pgbench.wait() + + # DELTA + backup_local_id = self.backup_node( + backup_dir, 'node', node, + backup_type='delta', options=['--compress'], no_remote=True) + + output_local = self.show_pb( + backup_dir, 'node', as_json=False, backup_id=backup_local_id) + self.delete_pb(backup_dir, 'node', backup_local_id) + + if self.remote: + backup_remote_id = self.backup_node( + backup_dir, 'node', node, backup_type='delta', options=['--compress']) + else: + backup_remote_id = self.backup_node( + backup_dir, 'node', node, backup_type='delta', + options=['--remote-proto=ssh', '--remote-host=localhost', '--compress']) + + output_remote = self.show_pb( + backup_dir, 'node', as_json=False, backup_id=backup_remote_id) + self.delete_pb(backup_dir, 'node', backup_remote_id) + + # check correctness + self.assertEqual( + output_local['data-bytes'], + output_remote['data-bytes']) + + self.assertEqual( + output_local['uncompressed-bytes'], + output_remote['uncompressed-bytes']) + + # PAGE + backup_local_id = self.backup_node( + backup_dir, 'node', node, + backup_type='page', options=['--compress'], no_remote=True) + + output_local = self.show_pb( + backup_dir, 'node', as_json=False, backup_id=backup_local_id) + self.delete_pb(backup_dir, 'node', backup_local_id) + + if self.remote: + backup_remote_id = self.backup_node( + backup_dir, 'node', node, backup_type='page', options=['--compress']) + else: + backup_remote_id = self.backup_node( + backup_dir, 'node', node, backup_type='page', + options=['--remote-proto=ssh', '--remote-host=localhost', '--compress']) + + output_remote = self.show_pb( + backup_dir, 'node', as_json=False, backup_id=backup_remote_id) + self.delete_pb(backup_dir, 'node', backup_remote_id) + + # check correctness + self.assertEqual( + output_local['data-bytes'], + output_remote['data-bytes']) + + self.assertEqual( + output_local['uncompressed-bytes'], + output_remote['uncompressed-bytes']) + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_color_with_no_terminal(self): + """backup.control contains invalid option""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums'], + pg_options={'autovacuum': 'off'}) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + node.pgbench_init(scale=1) + + # FULL + try: + self.backup_node( + backup_dir, 'node', node, options=['--archive-timeout=1s']) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because archiving is disabled\n " + "Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertNotIn( + '[0m', e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) diff --git a/tests/time_consuming_test.py b/tests/time_consuming_test.py new file mode 100644 index 000000000..c0038c085 --- /dev/null +++ b/tests/time_consuming_test.py @@ -0,0 +1,77 @@ +import os +import unittest +from .helpers.ptrack_helpers import ProbackupTest +import subprocess +from time import sleep + + +class TimeConsumingTests(ProbackupTest, unittest.TestCase): + def test_pbckp150(self): + """ + https://jira.postgrespro.ru/browse/PBCKP-150 + create a node filled with pgbench + create FULL backup followed by PTRACK backup + run pgbench, vacuum VERBOSE FULL and ptrack backups in parallel + """ + # init node + if self.pg_config_version < self.version_to_num('11.0'): + self.skipTest('You need PostgreSQL >= 11 for this test') + if not self.ptrack: + self.skipTest('Skipped because ptrack support is disabled') + + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + ptrack_enable=self.ptrack, + initdb_params=['--data-checksums'], + pg_options={ + 'max_connections': 100, + 'log_statement': 'none', + 'log_checkpoints': 'on', + 'autovacuum': 'off', + 'ptrack.map_size': 1}) + + if node.major_version >= 13: + self.set_auto_conf(node, {'wal_keep_size': '16000MB'}) + else: + self.set_auto_conf(node, {'wal_keep_segments': '1000'}) + + # init probackup and add an instance + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + + # run the node and init ptrack + node.slow_start() + node.safe_psql("postgres", "CREATE EXTENSION ptrack") + # populate it with pgbench + node.pgbench_init(scale=5) + + # FULL backup followed by PTRACK backup + self.backup_node(backup_dir, 'node', node, options=['--stream']) + self.backup_node(backup_dir, 'node', node, backup_type='ptrack', options=['--stream']) + + # run ordinary pgbench scenario to imitate some activity and another pgbench for vacuuming in parallel + nBenchDuration = 30 + pgbench = node.pgbench(options=['-c', '20', '-j', '8', '-T', str(nBenchDuration)]) + with open('/tmp/pbckp150vacuum.sql', 'w') as f: + f.write('VACUUM (FULL) pgbench_accounts, pgbench_tellers, pgbench_history; SELECT pg_sleep(1);\n') + pgbenchval = node.pgbench(options=['-c', '1', '-f', '/tmp/pbckp150vacuum.sql', '-T', str(nBenchDuration)]) + + # several PTRACK backups + for i in range(nBenchDuration): + print("[{}] backing up PTRACK diff...".format(i+1)) + self.backup_node(backup_dir, 'node', node, backup_type='ptrack', options=['--stream', '--log-level-console', 'VERBOSE']) + sleep(0.1) + # if the activity pgbench has finished, stop backing up + if pgbench.poll() is not None: + break + + pgbench.kill() + pgbenchval.kill() + pgbench.wait() + pgbenchval.wait() + + backups = self.show_pb(backup_dir, 'node') + for b in backups: + self.assertEqual("OK", b['status']) diff --git a/tests/time_stamp_test.py b/tests/time_stamp_test.py new file mode 100644 index 000000000..170c62cd4 --- /dev/null +++ b/tests/time_stamp_test.py @@ -0,0 +1,236 @@ +import os +import unittest +from .helpers.ptrack_helpers import ProbackupTest, ProbackupException +import subprocess +from time import sleep + + +class TimeStamp(ProbackupTest, unittest.TestCase): + + def test_start_time_format(self): + """Test backup ID changing after start-time editing in backup.control. + We should convert local time in UTC format""" + # Create simple node + node = self.make_simple_node( + base_dir="{0}/{1}/node".format(self.module_name, self.fname), + set_replication=True, + initdb_params=['--data-checksums']) + + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.start() + + backup_id = self.backup_node(backup_dir, 'node', node, options=['--stream', '-j 2']) + show_backup = self.show_pb(backup_dir, 'node') + + i = 0 + while i < 2: + with open(os.path.join(backup_dir, "backups", "node", backup_id, "backup.control"), "r+") as f: + output = "" + for line in f: + if line.startswith('start-time') is True: + if i == 0: + output = output + str(line[:-5])+'+00\''+'\n' + else: + output = output + str(line[:-5]) + '\'' + '\n' + else: + output = output + str(line) + f.close() + + with open(os.path.join(backup_dir, "backups", "node", backup_id, "backup.control"), "w") as fw: + fw.write(output) + fw.flush() + show_backup = show_backup + self.show_pb(backup_dir, 'node') + i += 1 + + print(show_backup[1]['id']) + print(show_backup[2]['id']) + + self.assertTrue(show_backup[1]['id'] == show_backup[2]['id'], "ERROR: Localtime format using instead of UTC") + + output = self.show_pb(backup_dir, as_json=False, as_text=True) + self.assertNotIn("backup ID in control file", output) + + node.stop() + + def test_server_date_style(self): + """Issue #112""" + node = self.make_simple_node( + base_dir="{0}/{1}/node".format(self.module_name, self.fname), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={"datestyle": "GERMAN, DMY"}) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.start() + + self.backup_node( + backup_dir, 'node', node, options=['--stream', '-j 2']) + + def test_handling_of_TZ_env_variable(self): + """Issue #284""" + node = self.make_simple_node( + base_dir="{0}/{1}/node".format(self.module_name, self.fname), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.start() + + my_env = os.environ.copy() + my_env["TZ"] = "America/Detroit" + + self.backup_node( + backup_dir, 'node', node, options=['--stream', '-j 2'], env=my_env) + + output = self.show_pb(backup_dir, 'node', as_json=False, as_text=True, env=my_env) + + self.assertNotIn("backup ID in control file", output) + + @unittest.skip("skip") + # @unittest.expectedFailure + def test_dst_timezone_handling(self): + """for manual testing""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + print(subprocess.Popen( + ['sudo', 'timedatectl', 'set-timezone', 'America/Detroit'], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE).communicate()) + + subprocess.Popen( + ['sudo', 'timedatectl', 'set-ntp', 'false'], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE).communicate() + + subprocess.Popen( + ['sudo', 'timedatectl', 'set-time', '2020-05-25 12:00:00'], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE).communicate() + + # FULL + output = self.backup_node(backup_dir, 'node', node, return_id=False) + self.assertNotIn("backup ID in control file", output) + + # move to dst + subprocess.Popen( + ['sudo', 'timedatectl', 'set-time', '2020-10-25 12:00:00'], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE).communicate() + + # DELTA + output = self.backup_node( + backup_dir, 'node', node, backup_type='delta', return_id=False) + self.assertNotIn("backup ID in control file", output) + + subprocess.Popen( + ['sudo', 'timedatectl', 'set-time', '2020-12-01 12:00:00'], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE).communicate() + + # DELTA + self.backup_node(backup_dir, 'node', node, backup_type='delta') + + output = self.show_pb(backup_dir, as_json=False, as_text=True) + self.assertNotIn("backup ID in control file", output) + + subprocess.Popen( + ['sudo', 'timedatectl', 'set-ntp', 'true'], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE).communicate() + + sleep(10) + + self.backup_node(backup_dir, 'node', node, backup_type='delta') + + output = self.show_pb(backup_dir, as_json=False, as_text=True) + self.assertNotIn("backup ID in control file", output) + + subprocess.Popen( + ['sudo', 'timedatectl', 'set-timezone', 'US/Moscow'], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE).communicate() + + @unittest.skip("skip") + def test_dst_timezone_handling_backward_compatibilty(self): + """for manual testing""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + subprocess.Popen( + ['sudo', 'timedatectl', 'set-timezone', 'America/Detroit'], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE).communicate() + + subprocess.Popen( + ['sudo', 'timedatectl', 'set-ntp', 'false'], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE).communicate() + + subprocess.Popen( + ['sudo', 'timedatectl', 'set-time', '2020-05-25 12:00:00'], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE).communicate() + + # FULL + self.backup_node(backup_dir, 'node', node, old_binary=True, return_id=False) + + # move to dst + subprocess.Popen( + ['sudo', 'timedatectl', 'set-time', '2020-10-25 12:00:00'], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE).communicate() + + # DELTA + output = self.backup_node( + backup_dir, 'node', node, backup_type='delta', old_binary=True, return_id=False) + + subprocess.Popen( + ['sudo', 'timedatectl', 'set-time', '2020-12-01 12:00:00'], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE).communicate() + + # DELTA + self.backup_node(backup_dir, 'node', node, backup_type='delta') + + output = self.show_pb(backup_dir, as_json=False, as_text=True) + self.assertNotIn("backup ID in control file", output) + + subprocess.Popen( + ['sudo', 'timedatectl', 'set-ntp', 'true'], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE).communicate() + + sleep(10) + + self.backup_node(backup_dir, 'node', node, backup_type='delta') + + output = self.show_pb(backup_dir, as_json=False, as_text=True) + self.assertNotIn("backup ID in control file", output) + + subprocess.Popen( + ['sudo', 'timedatectl', 'set-timezone', 'US/Moscow'], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE).communicate() diff --git a/tests/validate_test.py b/tests/validate_test.py new file mode 100644 index 000000000..98a0fd13f --- /dev/null +++ b/tests/validate_test.py @@ -0,0 +1,4083 @@ +import os +import unittest +from .helpers.ptrack_helpers import ProbackupTest, ProbackupException +from datetime import datetime, timedelta +from pathlib import Path +import subprocess +from sys import exit +import time +import hashlib + + +class ValidateTest(ProbackupTest, unittest.TestCase): + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_basic_validate_nullified_heap_page_backup(self): + """ + make node with nullified heap block + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.pgbench_init(scale=3) + + file_path = node.safe_psql( + "postgres", + "select pg_relation_filepath('pgbench_accounts')").decode('utf-8').rstrip() + + node.safe_psql( + "postgres", + "CHECKPOINT") + + # Nullify some block in PostgreSQL + file = os.path.join(node.data_dir, file_path) + with open(file, 'r+b') as f: + f.seek(8192) + f.write(b"\x00"*8192) + f.flush() + f.close + + self.backup_node( + backup_dir, 'node', node, options=['--log-level-file=verbose']) + + pgdata = self.pgdata_content(node.data_dir) + + if not self.remote: + log_file_path = os.path.join(backup_dir, "log", "pg_probackup.log") + with open(log_file_path) as f: + log_content = f.read() + self.assertIn( + 'File: "{0}" blknum 1, empty page'.format(Path(file).as_posix()), + log_content, + 'Failed to detect nullified block') + + self.validate_pb(backup_dir, options=["-j", "4"]) + node.cleanup() + + self.restore_node(backup_dir, 'node', node) + + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_validate_wal_unreal_values(self): + """ + make node with archiving, make archive backup + validate to both real and unreal values + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.pgbench_init(scale=3) + with node.connect("postgres") as con: + con.execute("CREATE TABLE tbl0005 (a text)") + con.commit() + + backup_id = self.backup_node(backup_dir, 'node', node) + + node.pgbench_init(scale=3) + + target_time = self.show_pb( + backup_dir, 'node', backup_id)['recovery-time'] + after_backup_time = datetime.now().replace(second=0, microsecond=0) + + # Validate to real time + self.assertIn( + "INFO: Backup validation completed successfully", + self.validate_pb( + backup_dir, 'node', + options=["--time={0}".format(target_time), "-j", "4"]), + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd)) + + # Validate to unreal time + unreal_time_1 = after_backup_time - timedelta(days=2) + try: + self.validate_pb( + backup_dir, 'node', options=["--time={0}".format( + unreal_time_1), "-j", "4"]) + self.assertEqual( + 1, 0, + "Expecting Error because of validation to unreal time.\n " + "Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: Backup satisfying target options is not found', + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + # Validate to unreal time #2 + unreal_time_2 = after_backup_time + timedelta(days=2) + try: + self.validate_pb( + backup_dir, 'node', + options=["--time={0}".format(unreal_time_2), "-j", "4"]) + self.assertEqual( + 1, 0, + "Expecting Error because of validation to unreal time.\n " + "Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertTrue( + 'ERROR: Not enough WAL records to time' in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + # Validate to real xid + target_xid = None + with node.connect("postgres") as con: + res = con.execute( + "INSERT INTO tbl0005 VALUES ('inserted') RETURNING (xmin)") + con.commit() + target_xid = res[0][0] + self.switch_wal_segment(node) + time.sleep(5) + + self.assertIn( + "INFO: Backup validation completed successfully", + self.validate_pb( + backup_dir, 'node', options=["--xid={0}".format(target_xid), + "-j", "4"]), + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd)) + + # Validate to unreal xid + unreal_xid = int(target_xid) + 1000 + try: + self.validate_pb( + backup_dir, 'node', options=["--xid={0}".format(unreal_xid), + "-j", "4"]) + self.assertEqual( + 1, 0, + "Expecting Error because of validation to unreal xid.\n " + "Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertTrue( + 'ERROR: Not enough WAL records to xid' in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + # Validate with backup ID + output = self.validate_pb(backup_dir, 'node', backup_id, + options=["-j", "4"]) + self.assertIn( + "INFO: Validating backup {0}".format(backup_id), + output, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd)) + self.assertIn( + "INFO: Backup {0} data files are valid".format(backup_id), + output, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd)) + self.assertIn( + "INFO: Backup {0} WAL segments are valid".format(backup_id), + output, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd)) + self.assertIn( + "INFO: Backup {0} is valid".format(backup_id), + output, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd)) + self.assertIn( + "INFO: Validate of backup {0} completed".format(backup_id), + output, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd)) + + # @unittest.skip("skip") + def test_basic_validate_corrupted_intermediate_backup(self): + """ + make archive node, take FULL, PAGE1, PAGE2 backups, + corrupt file in PAGE1 backup, + run validate on PAGE1, expect PAGE1 to gain status CORRUPT + and PAGE2 gain status ORPHAN + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # FULL + backup_id_1 = self.backup_node(backup_dir, 'node', node) + + node.safe_psql( + "postgres", + "create table t_heap as select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,10000) i") + file_path = node.safe_psql( + "postgres", + "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip() + # PAGE1 + backup_id_2 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + node.safe_psql( + "postgres", + "insert into t_heap select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(10000,20000) i") + # PAGE2 + backup_id_3 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # Corrupt some file + file = os.path.join( + backup_dir, 'backups', 'node', + backup_id_2, 'database', file_path) + with open(file, "r+b", 0) as f: + f.seek(42) + f.write(b"blah") + f.flush() + f.close + + # Simple validate + try: + self.validate_pb( + backup_dir, 'node', backup_id=backup_id_2, options=["-j", "4"]) + self.assertEqual( + 1, 0, + "Expecting Error because of data files corruption.\n " + "Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertTrue( + 'INFO: Validating parents for backup {0}'.format( + backup_id_2) in e.message and + 'ERROR: Backup {0} is corrupt'.format( + backup_id_2) in e.message and + 'WARNING: Backup {0} data files are corrupted'.format( + backup_id_2) in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.assertEqual( + 'CORRUPT', + self.show_pb(backup_dir, 'node', backup_id_2)['status'], + 'Backup STATUS should be "CORRUPT"') + self.assertEqual( + 'ORPHAN', + self.show_pb(backup_dir, 'node', backup_id_3)['status'], + 'Backup STATUS should be "ORPHAN"') + + # @unittest.skip("skip") + def test_validate_corrupted_intermediate_backups(self): + """ + make archive node, take FULL, PAGE1, PAGE2 backups, + corrupt file in FULL and PAGE1 backups, run validate on PAGE1, + expect FULL and PAGE1 to gain status CORRUPT and + PAGE2 gain status ORPHAN + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + "postgres", + "create table t_heap as select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,10000) i") + file_path_t_heap = node.safe_psql( + "postgres", + "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip() + # FULL + backup_id_1 = self.backup_node(backup_dir, 'node', node) + + node.safe_psql( + "postgres", + "create table t_heap_1 as select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,10000) i") + file_path_t_heap_1 = node.safe_psql( + "postgres", + "select pg_relation_filepath('t_heap_1')").decode('utf-8').rstrip() + # PAGE1 + backup_id_2 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + node.safe_psql( + "postgres", + "insert into t_heap select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(20000,30000) i") + # PAGE2 + backup_id_3 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # Corrupt some file in FULL backup + file_full = os.path.join( + backup_dir, 'backups', 'node', + backup_id_1, 'database', file_path_t_heap) + with open(file_full, "rb+", 0) as f: + f.seek(84) + f.write(b"blah") + f.flush() + f.close + + # Corrupt some file in PAGE1 backup + file_page1 = os.path.join( + backup_dir, 'backups', 'node', + backup_id_2, 'database', file_path_t_heap_1) + with open(file_page1, "rb+", 0) as f: + f.seek(42) + f.write(b"blah") + f.flush() + f.close + + # Validate PAGE1 + try: + self.validate_pb( + backup_dir, 'node', backup_id=backup_id_2, options=["-j", "4"]) + self.assertEqual( + 1, 0, + "Expecting Error because of data files corruption.\n " + "Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertTrue( + 'INFO: Validating parents for backup {0}'.format( + backup_id_2) in e.message, + '\n Unexpected Error Message: {0}\n ' + 'CMD: {1}'.format( + repr(e.message), self.cmd)) + self.assertTrue( + 'INFO: Validating backup {0}'.format( + backup_id_1) in e.message and + 'WARNING: Invalid CRC of backup file' in e.message and + 'WARNING: Backup {0} data files are corrupted'.format( + backup_id_1) in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + self.assertTrue( + 'WARNING: Backup {0} is orphaned because his parent'.format( + backup_id_2) in e.message and + 'WARNING: Backup {0} is orphaned because his parent'.format( + backup_id_3) in e.message and + 'ERROR: Backup {0} is orphan.'.format( + backup_id_2) in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.assertEqual( + 'CORRUPT', + self.show_pb(backup_dir, 'node', backup_id_1)['status'], + 'Backup STATUS should be "CORRUPT"') + self.assertEqual( + 'ORPHAN', + self.show_pb(backup_dir, 'node', backup_id_2)['status'], + 'Backup STATUS should be "ORPHAN"') + self.assertEqual( + 'ORPHAN', + self.show_pb(backup_dir, 'node', backup_id_3)['status'], + 'Backup STATUS should be "ORPHAN"') + + # @unittest.skip("skip") + def test_validate_specific_error_intermediate_backups(self): + """ + make archive node, take FULL, PAGE1, PAGE2 backups, + change backup status of FULL and PAGE1 to ERROR, + run validate on PAGE1 + purpose of this test is to be sure that not only + CORRUPT backup descendants can be orphanized + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # FULL + backup_id_1 = self.backup_node(backup_dir, 'node', node) + + # PAGE1 + backup_id_2 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # PAGE2 + backup_id_3 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # Change FULL backup status to ERROR + control_path = os.path.join( + backup_dir, 'backups', 'node', backup_id_1, 'backup.control') + + with open(control_path, 'r') as f: + actual_control = f.read() + + new_control_file = '' + for line in actual_control.splitlines(): + new_control_file += line.replace( + 'status = OK', 'status = ERROR') + new_control_file += '\n' + + with open(control_path, 'wt') as f: + f.write(new_control_file) + f.flush() + f.close() + + # Validate PAGE1 + try: + self.validate_pb( + backup_dir, 'node', backup_id=backup_id_2, options=["-j", "4"]) + self.assertEqual( + 1, 0, + "Expecting Error because backup has status ERROR.\n " + "Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertTrue( + 'WARNING: Backup {0} is orphaned because ' + 'his parent {1} has status: ERROR'.format( + backup_id_2, backup_id_1) in e.message and + 'INFO: Validating parents for backup {0}'.format( + backup_id_2) in e.message and + 'WARNING: Backup {0} has status ERROR. Skip validation.'.format( + backup_id_1) and + 'ERROR: Backup {0} is orphan.'.format(backup_id_2) in e.message, + '\n Unexpected Error Message: {0}\n ' + 'CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.assertEqual( + 'ERROR', + self.show_pb(backup_dir, 'node', backup_id_1)['status'], + 'Backup STATUS should be "ERROR"') + self.assertEqual( + 'ORPHAN', + self.show_pb(backup_dir, 'node', backup_id_2)['status'], + 'Backup STATUS should be "ORPHAN"') + self.assertEqual( + 'ORPHAN', + self.show_pb(backup_dir, 'node', backup_id_3)['status'], + 'Backup STATUS should be "ORPHAN"') + + # @unittest.skip("skip") + def test_validate_error_intermediate_backups(self): + """ + make archive node, take FULL, PAGE1, PAGE2 backups, + change backup status of FULL and PAGE1 to ERROR, + run validate on instance + purpose of this test is to be sure that not only + CORRUPT backup descendants can be orphanized + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # FULL + backup_id_1 = self.backup_node(backup_dir, 'node', node) + + # PAGE1 + backup_id_2 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # PAGE2 + backup_id_3 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # Change FULL backup status to ERROR + control_path = os.path.join( + backup_dir, 'backups', 'node', backup_id_1, 'backup.control') + + with open(control_path, 'r') as f: + actual_control = f.read() + + new_control_file = '' + for line in actual_control.splitlines(): + new_control_file += line.replace( + 'status = OK', 'status = ERROR') + new_control_file += '\n' + + with open(control_path, 'wt') as f: + f.write(new_control_file) + f.flush() + f.close() + + # Validate instance + try: + self.validate_pb(backup_dir, options=["-j", "4"]) + self.assertEqual( + 1, 0, + "Expecting Error because backup has status ERROR.\n " + "Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertTrue( + "WARNING: Backup {0} is orphaned because " + "his parent {1} has status: ERROR".format( + backup_id_2, backup_id_1) in e.message and + 'WARNING: Backup {0} has status ERROR. Skip validation'.format( + backup_id_1) in e.message and + "WARNING: Some backups are not valid" in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.assertEqual( + 'ERROR', + self.show_pb(backup_dir, 'node', backup_id_1)['status'], + 'Backup STATUS should be "ERROR"') + self.assertEqual( + 'ORPHAN', + self.show_pb(backup_dir, 'node', backup_id_2)['status'], + 'Backup STATUS should be "ORPHAN"') + self.assertEqual( + 'ORPHAN', + self.show_pb(backup_dir, 'node', backup_id_3)['status'], + 'Backup STATUS should be "ORPHAN"') + + # @unittest.skip("skip") + def test_validate_corrupted_intermediate_backups_1(self): + """ + make archive node, FULL1, PAGE1, PAGE2, PAGE3, PAGE4, PAGE5, FULL2, + corrupt file in PAGE1 and PAGE4, run validate on PAGE3, + expect PAGE1 to gain status CORRUPT, PAGE2, PAGE3, PAGE4 and PAGE5 + to gain status ORPHAN + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # FULL1 + backup_id_1 = self.backup_node(backup_dir, 'node', node) + + # PAGE1 + node.safe_psql( + "postgres", + "create table t_heap as select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,10000) i") + backup_id_2 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # PAGE2 + node.safe_psql( + "postgres", + "insert into t_heap select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,10000) i") + file_page_2 = node.safe_psql( + "postgres", + "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip() + backup_id_3 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # PAGE3 + node.safe_psql( + "postgres", + "insert into t_heap select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(10000,20000) i") + backup_id_4 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # PAGE4 + node.safe_psql( + "postgres", + "insert into t_heap select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(20000,30000) i") + backup_id_5 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # PAGE5 + node.safe_psql( + "postgres", + "create table t_heap1 as select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,10000) i") + file_page_5 = node.safe_psql( + "postgres", + "select pg_relation_filepath('t_heap1')").decode('utf-8').rstrip() + backup_id_6 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # PAGE6 + node.safe_psql( + "postgres", + "insert into t_heap select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(30000,40000) i") + backup_id_7 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # FULL2 + backup_id_8 = self.backup_node(backup_dir, 'node', node) + + # Corrupt some file in PAGE2 and PAGE5 backups + file_page1 = os.path.join( + backup_dir, 'backups', 'node', backup_id_3, 'database', file_page_2) + with open(file_page1, "rb+", 0) as f: + f.seek(84) + f.write(b"blah") + f.flush() + f.close + + file_page4 = os.path.join( + backup_dir, 'backups', 'node', backup_id_6, 'database', file_page_5) + with open(file_page4, "rb+", 0) as f: + f.seek(42) + f.write(b"blah") + f.flush() + f.close + + # Validate PAGE3 + try: + self.validate_pb( + backup_dir, 'node', + backup_id=backup_id_4, options=["-j", "4"]) + self.assertEqual( + 1, 0, + "Expecting Error because of data files corruption.\n" + " Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertTrue( + 'INFO: Validating parents for backup {0}'.format( + backup_id_4) in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + self.assertTrue( + 'INFO: Validating backup {0}'.format( + backup_id_1) in e.message and + 'INFO: Backup {0} data files are valid'.format( + backup_id_1) in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + self.assertTrue( + 'INFO: Validating backup {0}'.format( + backup_id_2) in e.message and + 'INFO: Backup {0} data files are valid'.format( + backup_id_2) in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + self.assertTrue( + 'INFO: Validating backup {0}'.format( + backup_id_3) in e.message and + 'WARNING: Invalid CRC of backup file' in e.message and + 'WARNING: Backup {0} data files are corrupted'.format( + backup_id_3) in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + self.assertTrue( + 'WARNING: Backup {0} is orphaned because ' + 'his parent {1} has status: CORRUPT'.format( + backup_id_4, backup_id_3) in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + self.assertTrue( + 'WARNING: Backup {0} is orphaned because ' + 'his parent {1} has status: CORRUPT'.format( + backup_id_5, backup_id_3) in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + self.assertTrue( + 'WARNING: Backup {0} is orphaned because ' + 'his parent {1} has status: CORRUPT'.format( + backup_id_6, backup_id_3) in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + self.assertTrue( + 'WARNING: Backup {0} is orphaned because ' + 'his parent {1} has status: CORRUPT'.format( + backup_id_7, backup_id_3) in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + self.assertTrue( + 'ERROR: Backup {0} is orphan'.format(backup_id_4) in e.message, + '\n Unexpected Error Message: {0}\n ' + 'CMD: {1}'.format(repr(e.message), self.cmd)) + + self.assertEqual( + 'OK', self.show_pb(backup_dir, 'node', backup_id_1)['status'], + 'Backup STATUS should be "OK"') + self.assertEqual( + 'OK', self.show_pb(backup_dir, 'node', backup_id_2)['status'], + 'Backup STATUS should be "OK"') + self.assertEqual( + 'CORRUPT', self.show_pb(backup_dir, 'node', backup_id_3)['status'], + 'Backup STATUS should be "CORRUPT"') + self.assertEqual( + 'ORPHAN', self.show_pb(backup_dir, 'node', backup_id_4)['status'], + 'Backup STATUS should be "ORPHAN"') + self.assertEqual( + 'ORPHAN', self.show_pb(backup_dir, 'node', backup_id_5)['status'], + 'Backup STATUS should be "ORPHAN"') + self.assertEqual( + 'ORPHAN', self.show_pb(backup_dir, 'node', backup_id_6)['status'], + 'Backup STATUS should be "ORPHAN"') + self.assertEqual( + 'ORPHAN', self.show_pb(backup_dir, 'node', backup_id_7)['status'], + 'Backup STATUS should be "ORPHAN"') + self.assertEqual( + 'OK', self.show_pb(backup_dir, 'node', backup_id_8)['status'], + 'Backup STATUS should be "OK"') + + # @unittest.skip("skip") + def test_validate_specific_target_corrupted_intermediate_backups(self): + """ + make archive node, take FULL1, PAGE1, PAGE2, PAGE3, PAGE4, PAGE5, FULL2 + corrupt file in PAGE1 and PAGE4, run validate on PAGE3 to specific xid, + expect PAGE1 to gain status CORRUPT, PAGE2, PAGE3, PAGE4 and PAGE5 to + gain status ORPHAN + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # FULL1 + backup_id_1 = self.backup_node(backup_dir, 'node', node) + + # PAGE1 + node.safe_psql( + "postgres", + "create table t_heap as select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,10000) i") + backup_id_2 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # PAGE2 + node.safe_psql( + "postgres", + "insert into t_heap select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,10000) i") + file_page_2 = node.safe_psql( + "postgres", + "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip() + backup_id_3 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # PAGE3 + node.safe_psql( + "postgres", + "insert into t_heap select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(10000,20000) i") + backup_id_4 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # PAGE4 + node.safe_psql( + "postgres", + "insert into t_heap select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(20000,30000) i") + + target_xid = node.safe_psql( + "postgres", + "insert into t_heap select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(30001, 30001) i RETURNING (xmin)").decode('utf-8').rstrip() + + backup_id_5 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # PAGE5 + node.safe_psql( + "postgres", + "create table t_heap1 as select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,10000) i") + file_page_5 = node.safe_psql( + "postgres", + "select pg_relation_filepath('t_heap1')").decode('utf-8').rstrip() + backup_id_6 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # PAGE6 + node.safe_psql( + "postgres", + "insert into t_heap select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(30000,40000) i") + backup_id_7 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # FULL2 + backup_id_8 = self.backup_node(backup_dir, 'node', node) + + # Corrupt some file in PAGE2 and PAGE5 backups + file_page1 = os.path.join( + backup_dir, 'backups', 'node', + backup_id_3, 'database', file_page_2) + with open(file_page1, "rb+", 0) as f: + f.seek(84) + f.write(b"blah") + f.flush() + f.close + + file_page4 = os.path.join( + backup_dir, 'backups', 'node', + backup_id_6, 'database', file_page_5) + with open(file_page4, "rb+", 0) as f: + f.seek(42) + f.write(b"blah") + f.flush() + f.close + + # Validate PAGE3 + try: + self.validate_pb( + backup_dir, 'node', + options=[ + '-i', backup_id_4, '--xid={0}'.format(target_xid), "-j", "4"]) + self.assertEqual( + 1, 0, + "Expecting Error because of data files corruption.\n " + "Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertTrue( + 'INFO: Validating parents for backup {0}'.format( + backup_id_4) in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + self.assertTrue( + 'INFO: Validating backup {0}'.format( + backup_id_1) in e.message and + 'INFO: Backup {0} data files are valid'.format( + backup_id_1) in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + self.assertTrue( + 'INFO: Validating backup {0}'.format( + backup_id_2) in e.message and + 'INFO: Backup {0} data files are valid'.format( + backup_id_2) in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + self.assertTrue( + 'INFO: Validating backup {0}'.format( + backup_id_3) in e.message and + 'WARNING: Invalid CRC of backup file' in e.message and + 'WARNING: Backup {0} data files are corrupted'.format( + backup_id_3) in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + self.assertTrue( + 'WARNING: Backup {0} is orphaned because his ' + 'parent {1} has status: CORRUPT'.format( + backup_id_4, backup_id_3) in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + self.assertTrue( + 'WARNING: Backup {0} is orphaned because his ' + 'parent {1} has status: CORRUPT'.format( + backup_id_5, backup_id_3) in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + self.assertTrue( + 'WARNING: Backup {0} is orphaned because his ' + 'parent {1} has status: CORRUPT'.format( + backup_id_6, backup_id_3) in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + self.assertTrue( + 'WARNING: Backup {0} is orphaned because his ' + 'parent {1} has status: CORRUPT'.format( + backup_id_7, backup_id_3) in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + self.assertTrue( + 'ERROR: Backup {0} is orphan'.format( + backup_id_4) in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.assertEqual('OK', self.show_pb(backup_dir, 'node', backup_id_1)['status'], 'Backup STATUS should be "OK"') + self.assertEqual('OK', self.show_pb(backup_dir, 'node', backup_id_2)['status'], 'Backup STATUS should be "OK"') + self.assertEqual('CORRUPT', self.show_pb(backup_dir, 'node', backup_id_3)['status'], 'Backup STATUS should be "CORRUPT"') + self.assertEqual('ORPHAN', self.show_pb(backup_dir, 'node', backup_id_4)['status'], 'Backup STATUS should be "ORPHAN"') + self.assertEqual('ORPHAN', self.show_pb(backup_dir, 'node', backup_id_5)['status'], 'Backup STATUS should be "ORPHAN"') + self.assertEqual('ORPHAN', self.show_pb(backup_dir, 'node', backup_id_6)['status'], 'Backup STATUS should be "ORPHAN"') + self.assertEqual('ORPHAN', self.show_pb(backup_dir, 'node', backup_id_7)['status'], 'Backup STATUS should be "ORPHAN"') + self.assertEqual('OK', self.show_pb(backup_dir, 'node', backup_id_8)['status'], 'Backup STATUS should be "OK"') + + # @unittest.skip("skip") + def test_validate_instance_with_several_corrupt_backups(self): + """ + make archive node, take FULL1, PAGE1_1, FULL2, PAGE2_1 backups, FULL3 + corrupt file in FULL and FULL2 and run validate on instance, + expect FULL1 to gain status CORRUPT, PAGE1_1 to gain status ORPHAN + FULL2 to gain status CORRUPT, PAGE2_1 to gain status ORPHAN + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + "postgres", + "create table t_heap as select generate_series(0,1) i") + # FULL1 + backup_id_1 = self.backup_node( + backup_dir, 'node', node, options=['--no-validate']) + + # FULL2 + backup_id_2 = self.backup_node(backup_dir, 'node', node) + rel_path = node.safe_psql( + "postgres", + "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip() + + node.safe_psql( + "postgres", + "insert into t_heap values(2)") + + backup_id_3 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # FULL3 + backup_id_4 = self.backup_node(backup_dir, 'node', node) + + node.safe_psql( + "postgres", + "insert into t_heap values(3)") + + backup_id_5 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # FULL4 + backup_id_6 = self.backup_node( + backup_dir, 'node', node, options=['--no-validate']) + + # Corrupt some files in FULL2 and FULL3 backup + os.remove(os.path.join( + backup_dir, 'backups', 'node', backup_id_2, + 'database', rel_path)) + os.remove(os.path.join( + backup_dir, 'backups', 'node', backup_id_4, + 'database', rel_path)) + + # Validate Instance + try: + self.validate_pb(backup_dir, 'node', options=["-j", "4", "--log-level-file=LOG"]) + self.assertEqual( + 1, 0, + "Expecting Error because of data files corruption.\n " + "Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertTrue( + "INFO: Validate backups of the instance 'node'" in e.message, + "\n Unexpected Error Message: {0}\n " + "CMD: {1}".format(repr(e.message), self.cmd)) + self.assertTrue( + 'WARNING: Some backups are not valid' in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.assertEqual( + 'OK', self.show_pb(backup_dir, 'node', backup_id_1)['status'], + 'Backup STATUS should be "OK"') + self.assertEqual( + 'CORRUPT', self.show_pb(backup_dir, 'node', backup_id_2)['status'], + 'Backup STATUS should be "CORRUPT"') + self.assertEqual( + 'ORPHAN', self.show_pb(backup_dir, 'node', backup_id_3)['status'], + 'Backup STATUS should be "ORPHAN"') + self.assertEqual( + 'CORRUPT', self.show_pb(backup_dir, 'node', backup_id_4)['status'], + 'Backup STATUS should be "CORRUPT"') + self.assertEqual( + 'ORPHAN', self.show_pb(backup_dir, 'node', backup_id_5)['status'], + 'Backup STATUS should be "ORPHAN"') + self.assertEqual( + 'OK', self.show_pb(backup_dir, 'node', backup_id_6)['status'], + 'Backup STATUS should be "OK"') + + # @unittest.skip("skip") + def test_validate_instance_with_several_corrupt_backups_interrupt(self): + """ + check that interrupt during validation is handled correctly + """ + self._check_gdb_flag_or_skip_test() + + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + "postgres", + "create table t_heap as select generate_series(0,1) i") + # FULL1 + backup_id_1 = self.backup_node( + backup_dir, 'node', node, options=['--no-validate']) + + # FULL2 + backup_id_2 = self.backup_node(backup_dir, 'node', node) + rel_path = node.safe_psql( + "postgres", + "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip() + + node.safe_psql( + "postgres", + "insert into t_heap values(2)") + + backup_id_3 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # FULL3 + backup_id_4 = self.backup_node(backup_dir, 'node', node) + + node.safe_psql( + "postgres", + "insert into t_heap values(3)") + + backup_id_5 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # FULL4 + backup_id_6 = self.backup_node( + backup_dir, 'node', node, options=['--no-validate']) + + # Corrupt some files in FULL2 and FULL3 backup + os.remove(os.path.join( + backup_dir, 'backups', 'node', backup_id_1, + 'database', rel_path)) + os.remove(os.path.join( + backup_dir, 'backups', 'node', backup_id_3, + 'database', rel_path)) + + # Validate Instance + gdb = self.validate_pb( + backup_dir, 'node', options=["-j", "4", "--log-level-file=LOG"], gdb=True) + + gdb.set_breakpoint('validate_file_pages') + gdb.run_until_break() + gdb.continue_execution_until_break() + gdb.remove_all_breakpoints() + gdb._execute('signal SIGINT') + gdb.continue_execution_until_error() + + self.assertEqual( + 'DONE', self.show_pb(backup_dir, 'node', backup_id_1)['status'], + 'Backup STATUS should be "OK"') + self.assertEqual( + 'OK', self.show_pb(backup_dir, 'node', backup_id_2)['status'], + 'Backup STATUS should be "OK"') + self.assertEqual( + 'OK', self.show_pb(backup_dir, 'node', backup_id_3)['status'], + 'Backup STATUS should be "CORRUPT"') + self.assertEqual( + 'OK', self.show_pb(backup_dir, 'node', backup_id_4)['status'], + 'Backup STATUS should be "ORPHAN"') + self.assertEqual( + 'OK', self.show_pb(backup_dir, 'node', backup_id_5)['status'], + 'Backup STATUS should be "OK"') + self.assertEqual( + 'DONE', self.show_pb(backup_dir, 'node', backup_id_6)['status'], + 'Backup STATUS should be "OK"') + + log_file = os.path.join(backup_dir, 'log', 'pg_probackup.log') + with open(log_file, 'r') as f: + log_content = f.read() + self.assertNotIn( + 'Interrupted while locking backup', log_content) + + # @unittest.skip("skip") + def test_validate_instance_with_corrupted_page(self): + """ + make archive node, take FULL, PAGE1, PAGE2, FULL2, PAGE3 backups, + corrupt file in PAGE1 backup and run validate on instance, + expect PAGE1 to gain status CORRUPT, PAGE2 to gain status ORPHAN + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + "postgres", + "create table t_heap as select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,10000) i") + # FULL1 + backup_id_1 = self.backup_node(backup_dir, 'node', node) + + node.safe_psql( + "postgres", + "create table t_heap1 as select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,10000) i") + file_path_t_heap1 = node.safe_psql( + "postgres", + "select pg_relation_filepath('t_heap1')").decode('utf-8').rstrip() + # PAGE1 + backup_id_2 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + node.safe_psql( + "postgres", + "insert into t_heap select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(20000,30000) i") + # PAGE2 + backup_id_3 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + # FULL1 + backup_id_4 = self.backup_node( + backup_dir, 'node', node) + # PAGE3 + backup_id_5 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # Corrupt some file in FULL backup + file_full = os.path.join( + backup_dir, 'backups', 'node', backup_id_2, + 'database', file_path_t_heap1) + with open(file_full, "rb+", 0) as f: + f.seek(84) + f.write(b"blah") + f.flush() + f.close + + # Validate Instance + try: + self.validate_pb(backup_dir, 'node', options=["-j", "4"]) + self.assertEqual( + 1, 0, + "Expecting Error because of data files corruption.\n " + "Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertTrue( + "INFO: Validate backups of the instance 'node'" in e.message, + "\n Unexpected Error Message: {0}\n " + "CMD: {1}".format(repr(e.message), self.cmd)) + self.assertTrue( + 'INFO: Validating backup {0}'.format( + backup_id_5) in e.message and + 'INFO: Backup {0} data files are valid'.format( + backup_id_5) in e.message and + 'INFO: Backup {0} WAL segments are valid'.format( + backup_id_5) in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + self.assertTrue( + 'INFO: Validating backup {0}'.format( + backup_id_4) in e.message and + 'INFO: Backup {0} data files are valid'.format( + backup_id_4) in e.message and + 'INFO: Backup {0} WAL segments are valid'.format( + backup_id_4) in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + self.assertTrue( + 'INFO: Validating backup {0}'.format( + backup_id_3) in e.message and + 'INFO: Backup {0} data files are valid'.format( + backup_id_3) in e.message and + 'INFO: Backup {0} WAL segments are valid'.format( + backup_id_3) in e.message and + 'WARNING: Backup {0} is orphaned because ' + 'his parent {1} has status: CORRUPT'.format( + backup_id_3, backup_id_2) in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + self.assertTrue( + 'INFO: Validating backup {0}'.format( + backup_id_2) in e.message and + 'WARNING: Invalid CRC of backup file' in e.message and + 'WARNING: Backup {0} data files are corrupted'.format( + backup_id_2) in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + self.assertTrue( + 'INFO: Validating backup {0}'.format( + backup_id_1) in e.message and + 'INFO: Backup {0} data files are valid'.format( + backup_id_1) in e.message and + 'INFO: Backup {0} WAL segments are valid'.format( + backup_id_1) in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + self.assertTrue( + 'WARNING: Some backups are not valid' in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.assertEqual( + 'OK', self.show_pb(backup_dir, 'node', backup_id_1)['status'], + 'Backup STATUS should be "OK"') + self.assertEqual( + 'CORRUPT', self.show_pb(backup_dir, 'node', backup_id_2)['status'], + 'Backup STATUS should be "CORRUPT"') + self.assertEqual( + 'ORPHAN', self.show_pb(backup_dir, 'node', backup_id_3)['status'], + 'Backup STATUS should be "ORPHAN"') + self.assertEqual( + 'OK', self.show_pb(backup_dir, 'node', backup_id_4)['status'], + 'Backup STATUS should be "OK"') + self.assertEqual( + 'OK', self.show_pb(backup_dir, 'node', backup_id_5)['status'], + 'Backup STATUS should be "OK"') + + # @unittest.skip("skip") + def test_validate_instance_with_corrupted_full_and_try_restore(self): + """make archive node, take FULL, PAGE1, PAGE2, FULL2, PAGE3 backups, + corrupt file in FULL backup and run validate on instance, + expect FULL to gain status CORRUPT, PAGE1 and PAGE2 to gain status ORPHAN, + try to restore backup with --no-validation option""" + node = self.make_simple_node(base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + "postgres", + "create table t_heap as select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,10000) i") + file_path_t_heap = node.safe_psql( + "postgres", + "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip() + # FULL1 + backup_id_1 = self.backup_node(backup_dir, 'node', node) + + node.safe_psql( + "postgres", + "insert into t_heap select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,10000) i") + # PAGE1 + backup_id_2 = self.backup_node(backup_dir, 'node', node, backup_type='page') + + # PAGE2 + node.safe_psql( + "postgres", + "insert into t_heap select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(20000,30000) i") + backup_id_3 = self.backup_node(backup_dir, 'node', node, backup_type='page') + + # FULL1 + backup_id_4 = self.backup_node(backup_dir, 'node', node) + + # PAGE3 + node.safe_psql( + "postgres", + "insert into t_heap select i as id, " + "md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(30000,40000) i") + backup_id_5 = self.backup_node(backup_dir, 'node', node, backup_type='page') + + # Corrupt some file in FULL backup + file_full = os.path.join( + backup_dir, 'backups', 'node', + backup_id_1, 'database', file_path_t_heap) + with open(file_full, "rb+", 0) as f: + f.seek(84) + f.write(b"blah") + f.flush() + f.close + + # Validate Instance + try: + self.validate_pb(backup_dir, 'node', options=["-j", "4"]) + self.assertEqual(1, 0, "Expecting Error because of data files corruption.\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertTrue( + 'INFO: Validating backup {0}'.format(backup_id_1) in e.message + and "INFO: Validate backups of the instance 'node'" in e.message + and 'WARNING: Invalid CRC of backup file' in e.message + and 'WARNING: Backup {0} data files are corrupted'.format(backup_id_1) in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) + + self.assertEqual('CORRUPT', self.show_pb(backup_dir, 'node', backup_id_1)['status'], 'Backup STATUS should be "CORRUPT"') + self.assertEqual('ORPHAN', self.show_pb(backup_dir, 'node', backup_id_2)['status'], 'Backup STATUS should be "ORPHAN"') + self.assertEqual('ORPHAN', self.show_pb(backup_dir, 'node', backup_id_3)['status'], 'Backup STATUS should be "ORPHAN"') + self.assertEqual('OK', self.show_pb(backup_dir, 'node', backup_id_4)['status'], 'Backup STATUS should be "OK"') + self.assertEqual('OK', self.show_pb(backup_dir, 'node', backup_id_5)['status'], 'Backup STATUS should be "OK"') + + node.cleanup() + restore_out = self.restore_node( + backup_dir, 'node', node, + options=["--no-validate"]) + self.assertIn( + "INFO: Restore of backup {0} completed.".format(backup_id_5), + restore_out, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd)) + + # @unittest.skip("skip") + def test_validate_instance_with_corrupted_full(self): + """make archive node, take FULL, PAGE1, PAGE2, FULL2, PAGE3 backups, + corrupt file in FULL backup and run validate on instance, + expect FULL to gain status CORRUPT, PAGE1 and PAGE2 to gain status ORPHAN""" + node = self.make_simple_node(base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.safe_psql( + "postgres", + "create table t_heap as select i as id, " + "md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,10000) i") + + file_path_t_heap = node.safe_psql( + "postgres", + "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip() + # FULL1 + backup_id_1 = self.backup_node(backup_dir, 'node', node) + + node.safe_psql( + "postgres", + "insert into t_heap select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,10000) i") + + # PAGE1 + backup_id_2 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # PAGE2 + node.safe_psql( + "postgres", + "insert into t_heap select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(20000,30000) i") + + backup_id_3 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # FULL1 + backup_id_4 = self.backup_node( + backup_dir, 'node', node) + + # PAGE3 + node.safe_psql( + "postgres", + "insert into t_heap select i as id, " + "md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(30000,40000) i") + backup_id_5 = self.backup_node(backup_dir, 'node', node, backup_type='page') + + # Corrupt some file in FULL backup + file_full = os.path.join( + backup_dir, 'backups', 'node', + backup_id_1, 'database', file_path_t_heap) + with open(file_full, "rb+", 0) as f: + f.seek(84) + f.write(b"blah") + f.flush() + f.close + + # Validate Instance + try: + self.validate_pb(backup_dir, 'node', options=["-j", "4"]) + self.assertEqual( + 1, 0, + "Expecting Error because of data files corruption.\n " + "Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertTrue( + 'INFO: Validating backup {0}'.format(backup_id_1) in e.message + and "INFO: Validate backups of the instance 'node'" in e.message + and 'WARNING: Invalid CRC of backup file' in e.message + and 'WARNING: Backup {0} data files are corrupted'.format(backup_id_1) in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) + + self.assertEqual('CORRUPT', self.show_pb(backup_dir, 'node', backup_id_1)['status'], 'Backup STATUS should be "CORRUPT"') + self.assertEqual('ORPHAN', self.show_pb(backup_dir, 'node', backup_id_2)['status'], 'Backup STATUS should be "ORPHAN"') + self.assertEqual('ORPHAN', self.show_pb(backup_dir, 'node', backup_id_3)['status'], 'Backup STATUS should be "ORPHAN"') + self.assertEqual('OK', self.show_pb(backup_dir, 'node', backup_id_4)['status'], 'Backup STATUS should be "OK"') + self.assertEqual('OK', self.show_pb(backup_dir, 'node', backup_id_5)['status'], 'Backup STATUS should be "OK"') + + # @unittest.skip("skip") + def test_validate_corrupt_wal_1(self): + """make archive node, take FULL1, PAGE1,PAGE2,FULL2,PAGE3,PAGE4 backups, corrupt all wal files, run validate, expect errors""" + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + backup_id_1 = self.backup_node(backup_dir, 'node', node) + + with node.connect("postgres") as con: + con.execute("CREATE TABLE tbl0005 (a text)") + con.commit() + + backup_id_2 = self.backup_node(backup_dir, 'node', node) + + # Corrupt WAL + wals_dir = os.path.join(backup_dir, 'wal', 'node') + wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join(wals_dir, f)) and not f.endswith('.backup')] + wals.sort() + for wal in wals: + with open(os.path.join(wals_dir, wal), "rb+", 0) as f: + f.seek(42) + f.write(b"blablablaadssaaaaaaaaaaaaaaa") + f.flush() + f.close + + # Simple validate + try: + self.validate_pb(backup_dir, 'node', options=["-j", "4"]) + self.assertEqual( + 1, 0, + "Expecting Error because of wal segments corruption.\n" + " Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertTrue( + 'WARNING: Backup' in e.message and + 'WAL segments are corrupted' in e.message and + "WARNING: There are not enough WAL " + "records to consistenly restore backup" in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.assertEqual( + 'CORRUPT', + self.show_pb(backup_dir, 'node', backup_id_1)['status'], + 'Backup STATUS should be "CORRUPT"') + self.assertEqual( + 'CORRUPT', + self.show_pb(backup_dir, 'node', backup_id_2)['status'], + 'Backup STATUS should be "CORRUPT"') + + # @unittest.skip("skip") + def test_validate_corrupt_wal_2(self): + """make archive node, make full backup, corrupt all wal files, run validate to real xid, expect errors""" + node = self.make_simple_node(base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + with node.connect("postgres") as con: + con.execute("CREATE TABLE tbl0005 (a text)") + con.commit() + + backup_id = self.backup_node(backup_dir, 'node', node) + target_xid = None + with node.connect("postgres") as con: + res = con.execute( + "INSERT INTO tbl0005 VALUES ('inserted') RETURNING (xmin)") + con.commit() + target_xid = res[0][0] + + # Corrupt WAL + wals_dir = os.path.join(backup_dir, 'wal', 'node') + wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join(wals_dir, f)) and not f.endswith('.backup')] + wals.sort() + for wal in wals: + with open(os.path.join(wals_dir, wal), "rb+", 0) as f: + f.seek(128) + f.write(b"blablablaadssaaaaaaaaaaaaaaa") + f.flush() + f.close + + # Validate to xid + try: + self.validate_pb( + backup_dir, + 'node', + backup_id, + options=[ + "--xid={0}".format(target_xid), "-j", "4"]) + self.assertEqual( + 1, 0, + "Expecting Error because of wal segments corruption.\n" + " Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertTrue( + 'WARNING: Backup' in e.message and + 'WAL segments are corrupted' in e.message and + "WARNING: There are not enough WAL " + "records to consistenly restore backup" in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.assertEqual( + 'CORRUPT', + self.show_pb(backup_dir, 'node', backup_id)['status'], + 'Backup STATUS should be "CORRUPT"') + + # @unittest.skip("skip") + def test_validate_wal_lost_segment_1(self): + """make archive node, make archive full backup, + delete from archive wal segment which belong to previous backup + run validate, expecting error because of missing wal segment + make sure that backup status is 'CORRUPT' + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.pgbench_init(scale=3) + + backup_id = self.backup_node(backup_dir, 'node', node) + + # Delete wal segment + wals_dir = os.path.join(backup_dir, 'wal', 'node') + wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join(wals_dir, f)) and not f.endswith('.backup')] + wals.sort() + file = os.path.join(backup_dir, 'wal', 'node', wals[-1]) + os.remove(file) + + # cut out '.gz' + if self.archive_compress: + file = file[:-3] + + try: + self.validate_pb(backup_dir, 'node', options=["-j", "4"]) + self.assertEqual( + 1, 0, + "Expecting Error because of wal segment disappearance.\n" + " Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertTrue( + "is absent" in e.message and + "WARNING: There are not enough WAL records to consistenly " + "restore backup {0}".format(backup_id) in e.message and + "WARNING: Backup {0} WAL segments are corrupted".format( + backup_id) in e.message and + "WARNING: Some backups are not valid" in e.message, + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) + + self.assertEqual( + 'CORRUPT', + self.show_pb(backup_dir, 'node', backup_id)['status'], + 'Backup {0} should have STATUS "CORRUPT"') + + # Run validate again + try: + self.validate_pb(backup_dir, 'node', backup_id, options=["-j", "4"]) + self.assertEqual( + 1, 0, + "Expecting Error because of backup corruption.\n" + " Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'INFO: Revalidating backup {0}'.format(backup_id), e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + self.assertIn( + 'ERROR: Backup {0} is corrupt.'.format(backup_id), e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + # @unittest.skip("skip") + def test_validate_corrupt_wal_between_backups(self): + """ + make archive node, make full backup, corrupt all wal files, + run validate to real xid, expect errors + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + backup_id = self.backup_node(backup_dir, 'node', node) + + # make some wals + node.pgbench_init(scale=3) + + with node.connect("postgres") as con: + con.execute("CREATE TABLE tbl0005 (a text)") + con.commit() + + with node.connect("postgres") as con: + res = con.execute( + "INSERT INTO tbl0005 VALUES ('inserted') RETURNING (xmin)") + con.commit() + target_xid = res[0][0] + + if self.get_version(node) < self.version_to_num('10.0'): + walfile = node.safe_psql( + 'postgres', + 'select pg_xlogfile_name(pg_current_xlog_location())').decode('utf-8').rstrip() + else: + walfile = node.safe_psql( + 'postgres', + 'select pg_walfile_name(pg_current_wal_lsn())').decode('utf-8').rstrip() + + if self.archive_compress: + walfile = walfile + '.gz' + self.switch_wal_segment(node) + + # generate some wals + node.pgbench_init(scale=3) + + self.backup_node(backup_dir, 'node', node) + + # Corrupt WAL + wals_dir = os.path.join(backup_dir, 'wal', 'node') + with open(os.path.join(wals_dir, walfile), "rb+", 0) as f: + f.seek(9000) + f.write(b"b") + f.flush() + f.close + + # Validate to xid + try: + self.validate_pb( + backup_dir, + 'node', + backup_id, + options=[ + "--xid={0}".format(target_xid), "-j", "4"]) + self.assertEqual( + 1, 0, + "Expecting Error because of wal segments corruption.\n" + " Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertTrue( + 'ERROR: Not enough WAL records to xid' in e.message and + 'WARNING: Recovery can be done up to time' in e.message and + "ERROR: Not enough WAL records to xid {0}\n".format( + target_xid), + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.assertEqual( + 'OK', + self.show_pb(backup_dir, 'node')[0]['status'], + 'Backup STATUS should be "OK"') + + self.assertEqual( + 'OK', + self.show_pb(backup_dir, 'node')[1]['status'], + 'Backup STATUS should be "OK"') + + # @unittest.skip("skip") + def test_pgpro702_688(self): + """ + make node without archiving, make stream backup, + get Recovery Time, validate to Recovery Time + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + backup_id = self.backup_node( + backup_dir, 'node', node, options=["--stream"]) + recovery_time = self.show_pb( + backup_dir, 'node', backup_id=backup_id)['recovery-time'] + + try: + self.validate_pb( + backup_dir, 'node', + options=["--time={0}".format(recovery_time), "-j", "4"]) + self.assertEqual( + 1, 0, + "Expecting Error because of wal segment disappearance.\n " + "Output: {0} \n CMD: {1}".format( + self.output, self.cmd)) + except ProbackupException as e: + self.assertIn( + 'WAL archive is empty. You cannot restore backup to a ' + 'recovery target without WAL archive', e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + # @unittest.skip("skip") + def test_pgpro688(self): + """ + make node with archiving, make backup, get Recovery Time, + validate to Recovery Time. Waiting PGPRO-688. RESOLVED + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + backup_id = self.backup_node(backup_dir, 'node', node) + recovery_time = self.show_pb( + backup_dir, 'node', backup_id)['recovery-time'] + + self.validate_pb( + backup_dir, 'node', options=["--time={0}".format(recovery_time), + "-j", "4"]) + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_pgpro561(self): + """ + make node with archiving, make stream backup, + restore it to node1, check that archiving is not successful on node1 + """ + node1 = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node1'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node1', node1) + self.set_archiving(backup_dir, 'node1', node1) + node1.slow_start() + + backup_id = self.backup_node( + backup_dir, 'node1', node1, options=["--stream"]) + + node2 = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node2')) + node2.cleanup() + + node1.psql( + "postgres", + "create table t_heap as select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,256) i") + + self.backup_node( + backup_dir, 'node1', node1, + backup_type='page', options=["--stream"]) + self.restore_node(backup_dir, 'node1', data_dir=node2.data_dir) + + self.set_auto_conf( + node2, {'port': node2.port, 'archive_mode': 'off'}) + + node2.slow_start() + + self.set_auto_conf( + node2, {'archive_mode': 'on'}) + + node2.stop() + node2.slow_start() + + timeline_node1 = node1.get_control_data()["Latest checkpoint's TimeLineID"] + timeline_node2 = node2.get_control_data()["Latest checkpoint's TimeLineID"] + self.assertEqual( + timeline_node1, timeline_node2, + "Timelines on Master and Node1 should be equal. " + "This is unexpected") + + archive_command_node1 = node1.safe_psql( + "postgres", "show archive_command") + archive_command_node2 = node2.safe_psql( + "postgres", "show archive_command") + self.assertEqual( + archive_command_node1, archive_command_node2, + "Archive command on Master and Node should be equal. " + "This is unexpected") + + # result = node2.safe_psql("postgres", "select last_failed_wal from pg_stat_get_archiver() where last_failed_wal is not NULL") + ## self.assertEqual(res, six.b(""), 'Restored Node1 failed to archive segment {0} due to having the same archive command as Master'.format(res.rstrip())) + # if result == "": + # self.assertEqual(1, 0, 'Error is expected due to Master and Node1 having the common archive and archive_command') + + node1.psql( + "postgres", + "create table t_heap_1 as select i as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,10) i") + + self.switch_wal_segment(node1) + +# wals_dir = os.path.join(backup_dir, 'wal', 'node1') +# wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join( +# wals_dir, f)) and not f.endswith('.backup') and not f.endswith('.part')] +# wals = map(str, wals) +# print(wals) + + self.switch_wal_segment(node2) + +# wals_dir = os.path.join(backup_dir, 'wal', 'node1') +# wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join( +# wals_dir, f)) and not f.endswith('.backup') and not f.endswith('.part')] +# wals = map(str, wals) +# print(wals) + + time.sleep(5) + + log_file = os.path.join(node2.logs_dir, 'postgresql.log') + with open(log_file, 'r') as f: + log_content = f.read() + self.assertTrue( + 'LOG: archive command failed with exit code 1' in log_content and + 'DETAIL: The failed archive command was:' in log_content and + 'WAL file already exists in archive with different checksum' in log_content, + 'Expecting error messages about failed archive_command' + ) + self.assertFalse( + 'pg_probackup archive-push completed successfully' in log_content) + + # @unittest.skip("skip") + def test_validate_corrupted_full(self): + """ + make node with archiving, take full backup, and three page backups, + take another full backup and three page backups + corrupt second full backup, run validate, check that + second full backup became CORRUPT and his page backups are ORPHANs + remove corruption and run valudate again, check that + second full backup and his page backups are OK + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'checkpoint_timeout': '30'}) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + self.backup_node(backup_dir, 'node', node) + self.backup_node(backup_dir, 'node', node, backup_type='page') + self.backup_node(backup_dir, 'node', node, backup_type='page') + + backup_id = self.backup_node(backup_dir, 'node', node) + self.backup_node(backup_dir, 'node', node, backup_type='page') + self.backup_node(backup_dir, 'node', node, backup_type='page') + + node.safe_psql( + "postgres", + "alter system set archive_command = 'false'") + node.reload() + try: + self.backup_node( + backup_dir, 'node', node, + backup_type='page', options=['--archive-timeout=1s']) + self.assertEqual( + 1, 0, + "Expecting Error because of data file dissapearance.\n " + "Output: {0} \n CMD: {1}".format( + self.output, self.cmd)) + except ProbackupException as e: + pass + + self.assertTrue( + self.show_pb(backup_dir, 'node')[6]['status'] == 'ERROR') + self.set_archiving(backup_dir, 'node', node) + node.reload() + self.backup_node(backup_dir, 'node', node, backup_type='page') + + file = os.path.join( + backup_dir, 'backups', 'node', + backup_id, 'database', 'postgresql.auto.conf') + + file_new = os.path.join(backup_dir, 'postgresql.auto.conf') + os.rename(file, file_new) + + try: + self.validate_pb(backup_dir, options=["-j", "4"]) + self.assertEqual( + 1, 0, + "Expecting Error because of data file dissapearance.\n " + "Output: {0} \n CMD: {1}".format( + self.output, self.cmd)) + except ProbackupException as e: + self.assertIn( + 'Validating backup {0}'.format(backup_id), e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + self.assertIn( + 'WARNING: Backup {0} data files are corrupted'.format( + backup_id), e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + self.assertIn( + 'WARNING: Some backups are not valid'.format( + backup_id), e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') + self.assertTrue( + self.show_pb(backup_dir, 'node')[3]['status'] == 'CORRUPT') + self.assertTrue( + self.show_pb(backup_dir, 'node')[4]['status'] == 'ORPHAN') + self.assertTrue( + self.show_pb(backup_dir, 'node')[5]['status'] == 'ORPHAN') + self.assertTrue( + self.show_pb(backup_dir, 'node')[6]['status'] == 'ERROR') + self.assertTrue( + self.show_pb(backup_dir, 'node')[7]['status'] == 'ORPHAN') + + os.rename(file_new, file) + try: + self.validate_pb(backup_dir, options=["-j", "4"]) + except ProbackupException as e: + self.assertIn( + 'WARNING: Some backups are not valid'.format( + backup_id), e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'OK') + self.assertTrue( + self.show_pb(backup_dir, 'node')[6]['status'] == 'ERROR') + self.assertTrue(self.show_pb(backup_dir, 'node')[7]['status'] == 'OK') + + # @unittest.skip("skip") + def test_validate_corrupted_full_1(self): + """ + make node with archiving, take full backup, and three page backups, + take another full backup and four page backups + corrupt second full backup, run validate, check that + second full backup became CORRUPT and his page backups are ORPHANs + remove corruption from full backup and corrupt his second page backup + run valudate again, check that + second full backup and his firts page backups are OK, + second page should be CORRUPT + third page should be ORPHAN + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + self.backup_node(backup_dir, 'node', node) + self.backup_node(backup_dir, 'node', node, backup_type='page') + self.backup_node(backup_dir, 'node', node, backup_type='page') + + backup_id = self.backup_node(backup_dir, 'node', node) + self.backup_node(backup_dir, 'node', node, backup_type='page') + backup_id_page = self.backup_node( + backup_dir, 'node', node, backup_type='page') + self.backup_node(backup_dir, 'node', node, backup_type='page') + + file = os.path.join( + backup_dir, 'backups', 'node', + backup_id, 'database', 'postgresql.auto.conf') + + file_new = os.path.join(backup_dir, 'postgresql.auto.conf') + os.rename(file, file_new) + + try: + self.validate_pb(backup_dir, options=["-j", "4"]) + self.assertEqual( + 1, 0, + "Expecting Error because of data file dissapearance.\n " + "Output: {0} \n CMD: {1}".format( + self.output, self.cmd)) + except ProbackupException as e: + self.assertIn( + 'Validating backup {0}'.format(backup_id), e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + self.assertIn( + 'WARNING: Backup {0} data files are corrupted'.format( + backup_id), e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + self.assertIn( + 'WARNING: Some backups are not valid'.format( + backup_id), e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'ORPHAN') + self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'ORPHAN') + self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'ORPHAN') + self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'CORRUPT') + self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') + + os.rename(file_new, file) + + file = os.path.join( + backup_dir, 'backups', 'node', + backup_id_page, 'database', 'backup_label') + + file_new = os.path.join(backup_dir, 'backup_label') + os.rename(file, file_new) + + try: + self.validate_pb(backup_dir, options=["-j", "4"]) + except ProbackupException as e: + self.assertIn( + 'WARNING: Some backups are not valid'.format( + backup_id), e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'CORRUPT') + self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'ORPHAN') + + # @unittest.skip("skip") + def test_validate_corrupted_full_2(self): + """ + PAGE2_2b + PAGE2_2a + PAGE2_4 + PAGE2_4 <- validate + PAGE2_3 + PAGE2_2 <- CORRUPT + PAGE2_1 + FULL2 + PAGE1_1 + FULL1 + corrupt second page backup, run validate on PAGE2_3, check that + PAGE2_2 became CORRUPT and his descendants are ORPHANs, + take two more PAGE backups, which now trace their origin + to PAGE2_1 - latest OK backup, + run validate on PAGE2_3, check that PAGE2_2a and PAGE2_2b are OK, + + remove corruption from PAGE2_2 and run validate on PAGE2_4 + """ + + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + self.backup_node(backup_dir, 'node', node) + self.backup_node(backup_dir, 'node', node, backup_type='page') + + self.backup_node(backup_dir, 'node', node) + self.backup_node(backup_dir, 'node', node, backup_type='page') + corrupt_id = self.backup_node( + backup_dir, 'node', node, backup_type='page') + self.backup_node(backup_dir, 'node', node, backup_type='page') + validate_id = self.backup_node( + backup_dir, 'node', node, backup_type='page') + self.backup_node(backup_dir, 'node', node, backup_type='page') + + file = os.path.join( + backup_dir, 'backups', 'node', + corrupt_id, 'database', 'backup_label') + + file_new = os.path.join(backup_dir, 'backup_label') + os.rename(file, file_new) + + try: + self.validate_pb(backup_dir, 'node', validate_id, + options=["-j", "4"]) + self.assertEqual( + 1, 0, + "Expecting Error because of data file dissapearance.\n " + "Output: {0} \n CMD: {1}".format( + self.output, self.cmd)) + except ProbackupException as e: + self.assertIn( + 'INFO: Validating parents for backup {0}'.format(validate_id), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + self.assertIn( + 'INFO: Validating backup {0}'.format( + self.show_pb(backup_dir, 'node')[2]['id']), e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + self.assertIn( + 'INFO: Validating backup {0}'.format( + self.show_pb(backup_dir, 'node')[3]['id']), e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + self.assertIn( + 'INFO: Validating backup {0}'.format( + corrupt_id), e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + self.assertIn( + 'WARNING: Backup {0} data files are corrupted'.format( + corrupt_id), e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.assertTrue(self.show_pb(backup_dir, 'node')[7]['status'] == 'ORPHAN') + self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'ORPHAN') + self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'ORPHAN') + self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'CORRUPT') + self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') + + # THIS IS GOLD!!!! + self.backup_node(backup_dir, 'node', node, backup_type='page') + self.backup_node(backup_dir, 'node', node, backup_type='page') + + try: + self.validate_pb(backup_dir, 'node', options=["-j", "4"]) + self.assertEqual( + 1, 0, + "Expecting Error because of data file dissapearance.\n " + "Output: {0} \n CMD: {1}".format( + self.output, self.cmd)) + except ProbackupException as e: + self.assertIn( + 'Backup {0} data files are valid'.format( + self.show_pb(backup_dir, 'node')[9]['id']), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.assertIn( + 'Backup {0} data files are valid'.format( + self.show_pb(backup_dir, 'node')[8]['id']), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.assertIn( + 'WARNING: Backup {0} has parent {1} with status: CORRUPT'.format( + self.show_pb(backup_dir, 'node')[7]['id'], corrupt_id), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.assertIn( + 'WARNING: Backup {0} has parent {1} with status: CORRUPT'.format( + self.show_pb(backup_dir, 'node')[6]['id'], corrupt_id), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.assertIn( + 'WARNING: Backup {0} has parent {1} with status: CORRUPT'.format( + self.show_pb(backup_dir, 'node')[5]['id'], corrupt_id), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.assertIn( + 'INFO: Revalidating backup {0}'.format( + corrupt_id), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.assertIn( + 'WARNING: Some backups are not valid', e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.assertTrue(self.show_pb(backup_dir, 'node')[9]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[8]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[7]['status'] == 'ORPHAN') + self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'ORPHAN') + self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'ORPHAN') + self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'CORRUPT') + self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') + + # revalidate again + + try: + self.validate_pb(backup_dir, 'node', validate_id, + options=["-j", "4"]) + self.assertEqual( + 1, 0, + "Expecting Error because of data file dissapearance.\n " + "Output: {0} \n CMD: {1}".format( + self.output, self.cmd)) + except ProbackupException as e: + self.assertIn( + 'WARNING: Backup {0} has status: ORPHAN'.format(validate_id), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.assertIn( + 'Backup {0} has parent {1} with status: CORRUPT'.format( + self.show_pb(backup_dir, 'node')[7]['id'], corrupt_id), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.assertIn( + 'Backup {0} has parent {1} with status: CORRUPT'.format( + self.show_pb(backup_dir, 'node')[6]['id'], corrupt_id), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.assertIn( + 'Backup {0} has parent {1} with status: CORRUPT'.format( + self.show_pb(backup_dir, 'node')[5]['id'], corrupt_id), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.assertIn( + 'INFO: Validating parents for backup {0}'.format( + validate_id), e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.assertIn( + 'INFO: Validating backup {0}'.format( + self.show_pb(backup_dir, 'node')[2]['id']), e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.assertIn( + 'INFO: Validating backup {0}'.format( + self.show_pb(backup_dir, 'node')[3]['id']), e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.assertIn( + 'INFO: Revalidating backup {0}'.format( + corrupt_id), e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.assertIn( + 'WARNING: Backup {0} data files are corrupted'.format( + corrupt_id), e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.assertIn( + 'ERROR: Backup {0} is orphan.'.format( + validate_id), e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + # Fix CORRUPT + os.rename(file_new, file) + + output = self.validate_pb(backup_dir, 'node', validate_id, + options=["-j", "4"]) + + self.assertIn( + 'WARNING: Backup {0} has status: ORPHAN'.format(validate_id), + output, + '\n Unexpected Output Message: {0}\n'.format( + repr(output))) + + self.assertIn( + 'Backup {0} has parent {1} with status: CORRUPT'.format( + self.show_pb(backup_dir, 'node')[7]['id'], corrupt_id), + output, + '\n Unexpected Output Message: {0}\n'.format( + repr(output))) + + self.assertIn( + 'Backup {0} has parent {1} with status: CORRUPT'.format( + self.show_pb(backup_dir, 'node')[6]['id'], corrupt_id), + output, + '\n Unexpected Output Message: {0}\n'.format( + repr(output))) + + self.assertIn( + 'Backup {0} has parent {1} with status: CORRUPT'.format( + self.show_pb(backup_dir, 'node')[5]['id'], corrupt_id), + output, + '\n Unexpected Output Message: {0}\n'.format( + repr(output))) + + self.assertIn( + 'INFO: Validating parents for backup {0}'.format( + validate_id), output, + '\n Unexpected Output Message: {0}\n'.format( + repr(output))) + + self.assertIn( + 'INFO: Validating backup {0}'.format( + self.show_pb(backup_dir, 'node')[2]['id']), output, + '\n Unexpected Output Message: {0}\n'.format( + repr(output))) + + self.assertIn( + 'INFO: Validating backup {0}'.format( + self.show_pb(backup_dir, 'node')[3]['id']), output, + '\n Unexpected Output Message: {0}\n'.format( + repr(output))) + + self.assertIn( + 'INFO: Revalidating backup {0}'.format( + corrupt_id), output, + '\n Unexpected Output Message: {0}\n'.format( + repr(output))) + + self.assertIn( + 'Backup {0} data files are valid'.format( + corrupt_id), output, + '\n Unexpected Output Message: {0}\n'.format( + repr(output))) + + self.assertIn( + 'INFO: Revalidating backup {0}'.format( + self.show_pb(backup_dir, 'node')[5]['id']), output, + '\n Unexpected Output Message: {0}\n'.format( + repr(output))) + + self.assertIn( + 'Backup {0} data files are valid'.format( + self.show_pb(backup_dir, 'node')[5]['id']), output, + '\n Unexpected Output Message: {0}\n'.format( + repr(output))) + + self.assertIn( + 'INFO: Revalidating backup {0}'.format( + validate_id), output, + '\n Unexpected Output Message: {0}\n'.format( + repr(output))) + + self.assertIn( + 'Backup {0} data files are valid'.format( + validate_id), output, + '\n Unexpected Output Message: {0}\n'.format( + repr(output))) + + self.assertIn( + 'INFO: Backup {0} WAL segments are valid'.format( + validate_id), output, + '\n Unexpected Output Message: {0}\n'.format( + repr(output))) + + self.assertIn( + 'INFO: Backup {0} is valid.'.format( + validate_id), output, + '\n Unexpected Output Message: {0}\n'.format( + repr(output))) + + self.assertIn( + 'INFO: Validate of backup {0} completed.'.format( + validate_id), output, + '\n Unexpected Output Message: {0}\n'.format( + repr(output))) + + # Now we have two perfectly valid backup chains based on FULL2 + + self.assertTrue(self.show_pb(backup_dir, 'node')[9]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[8]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[7]['status'] == 'ORPHAN') + self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') + + # @unittest.skip("skip") + def test_validate_corrupted_full_missing(self): + """ + make node with archiving, take full backup, and three page backups, + take another full backup and four page backups + corrupt second full backup, run validate, check that + second full backup became CORRUPT and his page backups are ORPHANs + remove corruption from full backup and remove his second page backup + run valudate again, check that + second full backup and his firts page backups are OK, + third page should be ORPHAN + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + self.backup_node(backup_dir, 'node', node) + self.backup_node(backup_dir, 'node', node, backup_type='page') + self.backup_node(backup_dir, 'node', node, backup_type='page') + self.backup_node(backup_dir, 'node', node, backup_type='page') + + backup_id = self.backup_node(backup_dir, 'node', node) + self.backup_node(backup_dir, 'node', node, backup_type='page') + backup_id_page = self.backup_node( + backup_dir, 'node', node, backup_type='page') + self.backup_node(backup_dir, 'node', node, backup_type='page') + self.backup_node(backup_dir, 'node', node, backup_type='page') + + file = os.path.join( + backup_dir, 'backups', 'node', + backup_id, 'database', 'postgresql.auto.conf') + + file_new = os.path.join(backup_dir, 'postgresql.auto.conf') + os.rename(file, file_new) + + try: + self.validate_pb(backup_dir, options=["-j", "4"]) + self.assertEqual( + 1, 0, + "Expecting Error because of data file dissapearance.\n " + "Output: {0} \n CMD: {1}".format( + self.output, self.cmd)) + except ProbackupException as e: + self.assertIn( + 'Validating backup {0}'.format(backup_id), e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + self.assertIn( + 'WARNING: Backup {0} data files are corrupted'.format( + backup_id), e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + self.assertIn( + 'WARNING: Backup {0} is orphaned because his parent {1} has status: CORRUPT'.format( + self.show_pb(backup_dir, 'node')[5]['id'], backup_id), e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.assertTrue(self.show_pb(backup_dir, 'node')[8]['status'] == 'ORPHAN') + self.assertTrue(self.show_pb(backup_dir, 'node')[7]['status'] == 'ORPHAN') + self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'ORPHAN') + self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'ORPHAN') + self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'CORRUPT') + self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') + + # Full backup is fixed + os.rename(file_new, file) + + # break PAGE + old_directory = os.path.join( + backup_dir, 'backups', 'node', backup_id_page) + new_directory = os.path.join(backup_dir, backup_id_page) + os.rename(old_directory, new_directory) + + try: + self.validate_pb(backup_dir, options=["-j", "4"]) + except ProbackupException as e: + self.assertIn( + 'WARNING: Some backups are not valid', e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.assertIn( + 'WARNING: Backup {0} has missing parent {1}'.format( + self.show_pb(backup_dir, 'node')[7]['id'], + backup_id_page), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.assertIn( + 'WARNING: Backup {0} has missing parent {1}'.format( + self.show_pb(backup_dir, 'node')[6]['id'], + backup_id_page), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.assertIn( + 'WARNING: Backup {0} has parent {1} with status: CORRUPT'.format( + self.show_pb(backup_dir, 'node')[5]['id'], backup_id), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.assertTrue(self.show_pb(backup_dir, 'node')[7]['status'] == 'ORPHAN') + self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'ORPHAN') + # missing backup is here + self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') + + # validate should be idempotent - user running validate + # second time must be provided with ID of missing backup + + try: + self.validate_pb(backup_dir, options=["-j", "4"]) + except ProbackupException as e: + self.assertIn( + 'WARNING: Some backups are not valid', e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.assertIn( + 'WARNING: Backup {0} has missing parent {1}'.format( + self.show_pb(backup_dir, 'node')[7]['id'], + backup_id_page), e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.assertIn( + 'WARNING: Backup {0} has missing parent {1}'.format( + self.show_pb(backup_dir, 'node')[6]['id'], + backup_id_page), e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.assertTrue(self.show_pb(backup_dir, 'node')[7]['status'] == 'ORPHAN') + self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'ORPHAN') + # missing backup is here + self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') + + # fix missing PAGE backup + os.rename(new_directory, old_directory) + # exit(1) + + self.assertTrue(self.show_pb(backup_dir, 'node')[8]['status'] == 'ORPHAN') + self.assertTrue(self.show_pb(backup_dir, 'node')[7]['status'] == 'ORPHAN') + self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'ORPHAN') + self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') + + output = self.validate_pb(backup_dir, options=["-j", "4"]) + + self.assertIn( + 'INFO: All backups are valid', + output, + '\n Unexpected Error Message: {0}\n'.format( + repr(output))) + + self.assertIn( + 'WARNING: Backup {0} has parent {1} with status: ORPHAN'.format( + self.show_pb(backup_dir, 'node')[8]['id'], + self.show_pb(backup_dir, 'node')[6]['id']), + output, + '\n Unexpected Error Message: {0}\n'.format( + repr(output))) + + self.assertIn( + 'WARNING: Backup {0} has parent {1} with status: ORPHAN'.format( + self.show_pb(backup_dir, 'node')[7]['id'], + self.show_pb(backup_dir, 'node')[6]['id']), + output, + '\n Unexpected Error Message: {0}\n'.format( + repr(output))) + + self.assertIn( + 'Revalidating backup {0}'.format( + self.show_pb(backup_dir, 'node')[6]['id']), + output, + '\n Unexpected Error Message: {0}\n'.format( + repr(output))) + + self.assertIn( + 'Revalidating backup {0}'.format( + self.show_pb(backup_dir, 'node')[7]['id']), + output, + '\n Unexpected Error Message: {0}\n'.format( + repr(output))) + + self.assertIn( + 'Revalidating backup {0}'.format( + self.show_pb(backup_dir, 'node')[8]['id']), + output, + '\n Unexpected Error Message: {0}\n'.format( + repr(output))) + + self.assertTrue(self.show_pb(backup_dir, 'node')[8]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[7]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') + + def test_file_size_corruption_no_validate(self): + + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + # initdb_params=['--data-checksums'], + ) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + + node.slow_start() + + node.safe_psql( + "postgres", + "create table t_heap as select 1 as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,1000) i") + node.safe_psql( + "postgres", + "CHECKPOINT;") + + heap_path = node.safe_psql( + "postgres", + "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip() + heap_size = node.safe_psql( + "postgres", + "select pg_relation_size('t_heap')") + + backup_id = self.backup_node( + backup_dir, 'node', node, backup_type="full", + options=["-j", "4"], asynchronous=False, gdb=False) + + node.stop() + node.cleanup() + + # Let`s do file corruption + with open( + os.path.join( + backup_dir, "backups", 'node', backup_id, + "database", heap_path), "rb+", 0) as f: + f.truncate(int(heap_size) - 4096) + f.flush() + f.close + + node.cleanup() + + try: + self.restore_node( + backup_dir, 'node', node, + options=["--no-validate"]) + except ProbackupException as e: + self.assertTrue( + "ERROR: Backup files restoring failed" in e.message, + repr(e.message)) + + # @unittest.skip("skip") + def test_validate_specific_backup_with_missing_backup(self): + """ + PAGE3_2 + PAGE3_1 + FULL3 + PAGE2_5 + PAGE2_4 <- validate + PAGE2_3 + PAGE2_2 <- missing + PAGE2_1 + FULL2 + PAGE1_2 + PAGE1_1 + FULL1 + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # CHAIN1 + self.backup_node(backup_dir, 'node', node) + self.backup_node(backup_dir, 'node', node, backup_type='page') + self.backup_node(backup_dir, 'node', node, backup_type='page') + + # CHAIN2 + self.backup_node(backup_dir, 'node', node) + self.backup_node(backup_dir, 'node', node, backup_type='page') + missing_id = self.backup_node( + backup_dir, 'node', node, backup_type='page') + self.backup_node(backup_dir, 'node', node, backup_type='page') + validate_id = self.backup_node( + backup_dir, 'node', node, backup_type='page') + self.backup_node(backup_dir, 'node', node, backup_type='page') + + # CHAIN3 + self.backup_node(backup_dir, 'node', node) + self.backup_node(backup_dir, 'node', node, backup_type='page') + self.backup_node(backup_dir, 'node', node, backup_type='page') + + old_directory = os.path.join(backup_dir, 'backups', 'node', missing_id) + new_directory = os.path.join(backup_dir, missing_id) + + os.rename(old_directory, new_directory) + + try: + self.validate_pb(backup_dir, 'node', validate_id, + options=["-j", "4"]) + self.assertEqual( + 1, 0, + "Expecting Error because of backup dissapearance.\n " + "Output: {0} \n CMD: {1}".format( + self.output, self.cmd)) + except ProbackupException as e: + self.assertIn( + 'WARNING: Backup {0} is orphaned because his parent {1} is missing'.format( + self.show_pb(backup_dir, 'node')[7]['id'], missing_id), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + self.assertIn( + 'WARNING: Backup {0} is orphaned because his parent {1} is missing'.format( + self.show_pb(backup_dir, 'node')[6]['id'], missing_id), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + self.assertIn( + 'WARNING: Backup {0} is orphaned because his parent {1} is missing'.format( + self.show_pb(backup_dir, 'node')[5]['id'], missing_id), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.assertTrue(self.show_pb(backup_dir, 'node')[10]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[9]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[8]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[7]['status'] == 'ORPHAN') + self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'ORPHAN') + self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'ORPHAN') + # missing backup + self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') + + try: + self.validate_pb(backup_dir, 'node', validate_id, + options=["-j", "4"]) + self.assertEqual( + 1, 0, + "Expecting Error because of backup dissapearance.\n " + "Output: {0} \n CMD: {1}".format( + self.output, self.cmd)) + except ProbackupException as e: + self.assertIn( + 'WARNING: Backup {0} has missing parent {1}'.format( + self.show_pb(backup_dir, 'node')[7]['id'], missing_id), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + self.assertIn( + 'WARNING: Backup {0} has missing parent {1}'.format( + self.show_pb(backup_dir, 'node')[6]['id'], missing_id), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + self.assertIn( + 'WARNING: Backup {0} has missing parent {1}'.format( + self.show_pb(backup_dir, 'node')[5]['id'], missing_id), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + os.rename(new_directory, old_directory) + + # Revalidate backup chain + self.validate_pb(backup_dir, 'node', validate_id, options=["-j", "4"]) + + self.assertTrue(self.show_pb(backup_dir, 'node')[11]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[10]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[9]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[8]['status'] == 'ORPHAN') + self.assertTrue(self.show_pb(backup_dir, 'node')[7]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') + + # @unittest.skip("skip") + def test_validate_specific_backup_with_missing_backup_1(self): + """ + PAGE3_2 + PAGE3_1 + FULL3 + PAGE2_5 + PAGE2_4 <- validate + PAGE2_3 + PAGE2_2 <- missing + PAGE2_1 + FULL2 <- missing + PAGE1_2 + PAGE1_1 + FULL1 + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # CHAIN1 + self.backup_node(backup_dir, 'node', node) + self.backup_node(backup_dir, 'node', node, backup_type='page') + self.backup_node(backup_dir, 'node', node, backup_type='page') + + # CHAIN2 + missing_full_id = self.backup_node(backup_dir, 'node', node) + self.backup_node(backup_dir, 'node', node, backup_type='page') + missing_page_id = self.backup_node( + backup_dir, 'node', node, backup_type='page') + self.backup_node(backup_dir, 'node', node, backup_type='page') + validate_id = self.backup_node( + backup_dir, 'node', node, backup_type='page') + self.backup_node(backup_dir, 'node', node, backup_type='page') + + # CHAIN3 + self.backup_node(backup_dir, 'node', node) + self.backup_node(backup_dir, 'node', node, backup_type='page') + self.backup_node(backup_dir, 'node', node, backup_type='page') + + page_old_directory = os.path.join( + backup_dir, 'backups', 'node', missing_page_id) + page_new_directory = os.path.join(backup_dir, missing_page_id) + os.rename(page_old_directory, page_new_directory) + + full_old_directory = os.path.join( + backup_dir, 'backups', 'node', missing_full_id) + full_new_directory = os.path.join(backup_dir, missing_full_id) + os.rename(full_old_directory, full_new_directory) + + try: + self.validate_pb(backup_dir, 'node', validate_id, + options=["-j", "4"]) + self.assertEqual( + 1, 0, + "Expecting Error because of backup dissapearance.\n " + "Output: {0} \n CMD: {1}".format( + self.output, self.cmd)) + except ProbackupException as e: + self.assertIn( + 'WARNING: Backup {0} is orphaned because his parent {1} is missing'.format( + self.show_pb(backup_dir, 'node')[6]['id'], missing_page_id), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + self.assertIn( + 'WARNING: Backup {0} is orphaned because his parent {1} is missing'.format( + self.show_pb(backup_dir, 'node')[5]['id'], missing_page_id), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + self.assertIn( + 'WARNING: Backup {0} is orphaned because his parent {1} is missing'.format( + self.show_pb(backup_dir, 'node')[4]['id'], missing_page_id), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.assertTrue(self.show_pb(backup_dir, 'node')[9]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[8]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[7]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'ORPHAN') + self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'ORPHAN') + self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'ORPHAN') + # PAGE2_1 + self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'OK') # <- SHit + # FULL2 + self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') + + os.rename(page_new_directory, page_old_directory) + os.rename(full_new_directory, full_old_directory) + + # Revalidate backup chain + self.validate_pb(backup_dir, 'node', validate_id, options=["-j", "4"]) + + self.assertTrue(self.show_pb(backup_dir, 'node')[11]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[10]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[9]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[8]['status'] == 'ORPHAN') # <- Fail + self.assertTrue(self.show_pb(backup_dir, 'node')[7]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') + + # @unittest.skip("skip") + def test_validate_with_missing_backup_1(self): + """ + PAGE3_2 + PAGE3_1 + FULL3 + PAGE2_5 + PAGE2_4 <- validate + PAGE2_3 + PAGE2_2 <- missing + PAGE2_1 + FULL2 <- missing + PAGE1_2 + PAGE1_1 + FULL1 + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # CHAIN1 + self.backup_node(backup_dir, 'node', node) + self.backup_node(backup_dir, 'node', node, backup_type='page') + self.backup_node(backup_dir, 'node', node, backup_type='page') + + # CHAIN2 + missing_full_id = self.backup_node(backup_dir, 'node', node) + self.backup_node(backup_dir, 'node', node, backup_type='page') + missing_page_id = self.backup_node( + backup_dir, 'node', node, backup_type='page') + self.backup_node(backup_dir, 'node', node, backup_type='page') + validate_id = self.backup_node( + backup_dir, 'node', node, backup_type='page') + self.backup_node(backup_dir, 'node', node, backup_type='page') + + # CHAIN3 + self.backup_node(backup_dir, 'node', node) + self.backup_node(backup_dir, 'node', node, backup_type='page') + self.backup_node(backup_dir, 'node', node, backup_type='page') + + # Break PAGE + page_old_directory = os.path.join( + backup_dir, 'backups', 'node', missing_page_id) + page_new_directory = os.path.join(backup_dir, missing_page_id) + os.rename(page_old_directory, page_new_directory) + + # Break FULL + full_old_directory = os.path.join( + backup_dir, 'backups', 'node', missing_full_id) + full_new_directory = os.path.join(backup_dir, missing_full_id) + os.rename(full_old_directory, full_new_directory) + + try: + self.validate_pb(backup_dir, 'node', validate_id, + options=["-j", "4"]) + self.assertEqual( + 1, 0, + "Expecting Error because of backup dissapearance.\n " + "Output: {0} \n CMD: {1}".format( + self.output, self.cmd)) + except ProbackupException as e: + self.assertIn( + 'WARNING: Backup {0} is orphaned because his parent {1} is missing'.format( + self.show_pb(backup_dir, 'node')[6]['id'], missing_page_id), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + self.assertIn( + 'WARNING: Backup {0} is orphaned because his parent {1} is missing'.format( + self.show_pb(backup_dir, 'node')[5]['id'], missing_page_id), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + self.assertIn( + 'WARNING: Backup {0} is orphaned because his parent {1} is missing'.format( + self.show_pb(backup_dir, 'node')[4]['id'], missing_page_id), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.assertTrue(self.show_pb(backup_dir, 'node')[9]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[8]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[7]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'ORPHAN') + self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'ORPHAN') + self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'ORPHAN') + # PAGE2_2 is missing + self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'OK') + # FULL1 - is missing + self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') + + os.rename(page_new_directory, page_old_directory) + + # Revalidate backup chain + try: + self.validate_pb(backup_dir, 'node', validate_id, + options=["-j", "4"]) + self.assertEqual( + 1, 0, + "Expecting Error because of backup dissapearance.\n " + "Output: {0} \n CMD: {1}".format( + self.output, self.cmd)) + except ProbackupException as e: + self.assertIn( + 'WARNING: Backup {0} has status: ORPHAN'.format( + validate_id), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + self.assertIn( + 'WARNING: Backup {0} has missing parent {1}'.format( + self.show_pb(backup_dir, 'node')[7]['id'], + missing_full_id), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + self.assertIn( + 'WARNING: Backup {0} has missing parent {1}'.format( + self.show_pb(backup_dir, 'node')[6]['id'], + missing_full_id), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + self.assertIn( + 'WARNING: Backup {0} has missing parent {1}'.format( + self.show_pb(backup_dir, 'node')[5]['id'], + missing_full_id), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + self.assertIn( + 'WARNING: Backup {0} is orphaned because his parent {1} is missing'.format( + self.show_pb(backup_dir, 'node')[4]['id'], + missing_full_id), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + self.assertIn( + 'WARNING: Backup {0} is orphaned because his parent {1} is missing'.format( + self.show_pb(backup_dir, 'node')[3]['id'], + missing_full_id), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.assertTrue(self.show_pb(backup_dir, 'node')[10]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[9]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[8]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[7]['status'] == 'ORPHAN') + self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'ORPHAN') + self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'ORPHAN') + self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'ORPHAN') + self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'ORPHAN') + # FULL1 - is missing + self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') + + os.rename(full_new_directory, full_old_directory) + + # Revalidate chain + self.validate_pb(backup_dir, 'node', validate_id, options=["-j", "4"]) + + self.assertTrue(self.show_pb(backup_dir, 'node')[11]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[10]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[9]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[8]['status'] == 'ORPHAN') + self.assertTrue(self.show_pb(backup_dir, 'node')[7]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') + + # @unittest.skip("skip") + def test_validate_with_missing_backup_2(self): + """ + PAGE3_2 + PAGE3_1 + FULL3 + PAGE2_5 + PAGE2_4 + PAGE2_3 + PAGE2_2 <- missing + PAGE2_1 + FULL2 <- missing + PAGE1_2 + PAGE1_1 + FULL1 + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # CHAIN1 + self.backup_node(backup_dir, 'node', node) + self.backup_node(backup_dir, 'node', node, backup_type='page') + self.backup_node(backup_dir, 'node', node, backup_type='page') + + # CHAIN2 + missing_full_id = self.backup_node(backup_dir, 'node', node) + self.backup_node(backup_dir, 'node', node, backup_type='page') + missing_page_id = self.backup_node( + backup_dir, 'node', node, backup_type='page') + self.backup_node(backup_dir, 'node', node, backup_type='page') + self.backup_node( + backup_dir, 'node', node, backup_type='page') + self.backup_node(backup_dir, 'node', node, backup_type='page') + + # CHAIN3 + self.backup_node(backup_dir, 'node', node) + self.backup_node(backup_dir, 'node', node, backup_type='page') + self.backup_node(backup_dir, 'node', node, backup_type='page') + + page_old_directory = os.path.join(backup_dir, 'backups', 'node', missing_page_id) + page_new_directory = os.path.join(backup_dir, missing_page_id) + os.rename(page_old_directory, page_new_directory) + + full_old_directory = os.path.join(backup_dir, 'backups', 'node', missing_full_id) + full_new_directory = os.path.join(backup_dir, missing_full_id) + os.rename(full_old_directory, full_new_directory) + + try: + self.validate_pb(backup_dir, 'node', options=["-j", "4"]) + self.assertEqual( + 1, 0, + "Expecting Error because of backup dissapearance.\n " + "Output: {0} \n CMD: {1}".format( + self.output, self.cmd)) + except ProbackupException as e: + self.assertIn( + 'WARNING: Backup {0} is orphaned because his parent {1} is missing'.format( + self.show_pb(backup_dir, 'node')[6]['id'], missing_page_id), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + self.assertIn( + 'WARNING: Backup {0} is orphaned because his parent {1} is missing'.format( + self.show_pb(backup_dir, 'node')[5]['id'], missing_page_id), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + self.assertIn( + 'WARNING: Backup {0} is orphaned because his parent {1} is missing'.format( + self.show_pb(backup_dir, 'node')[4]['id'], missing_page_id), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + self.assertIn( + 'WARNING: Backup {0} is orphaned because his parent {1} is missing'.format( + self.show_pb(backup_dir, 'node')[3]['id'], missing_full_id), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.assertTrue(self.show_pb(backup_dir, 'node')[9]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[8]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[7]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'ORPHAN') + self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'ORPHAN') + self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'ORPHAN') + # PAGE2_2 is missing + self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'ORPHAN') + # FULL1 - is missing + self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') + + os.rename(page_new_directory, page_old_directory) + + # Revalidate backup chain + try: + self.validate_pb(backup_dir, 'node', options=["-j", "4"]) + self.assertEqual( + 1, 0, + "Expecting Error because of backup dissapearance.\n " + "Output: {0} \n CMD: {1}".format( + self.output, self.cmd)) + except ProbackupException as e: + self.assertIn( + 'WARNING: Backup {0} has missing parent {1}'.format( + self.show_pb(backup_dir, 'node')[7]['id'], missing_full_id), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + self.assertIn( + 'WARNING: Backup {0} has missing parent {1}'.format( + self.show_pb(backup_dir, 'node')[6]['id'], missing_full_id), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + self.assertIn( + 'WARNING: Backup {0} has missing parent {1}'.format( + self.show_pb(backup_dir, 'node')[5]['id'], missing_full_id), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + self.assertIn( + 'WARNING: Backup {0} is orphaned because his parent {1} is missing'.format( + self.show_pb(backup_dir, 'node')[4]['id'], missing_full_id), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + self.assertIn( + 'WARNING: Backup {0} has missing parent {1}'.format( + self.show_pb(backup_dir, 'node')[3]['id'], missing_full_id), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.assertTrue(self.show_pb(backup_dir, 'node')[10]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[9]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[8]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[7]['status'] == 'ORPHAN') + self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'ORPHAN') + self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'ORPHAN') + self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'ORPHAN') + self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'ORPHAN') + # FULL1 - is missing + self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') + + # @unittest.skip("skip") + def test_corrupt_pg_control_via_resetxlog(self): + """ PGPRO-2096 """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + backup_id = self.backup_node(backup_dir, 'node', node) + + if self.get_version(node) < 100000: + pg_resetxlog_path = self.get_bin_path('pg_resetxlog') + wal_dir = 'pg_xlog' + else: + pg_resetxlog_path = self.get_bin_path('pg_resetwal') + wal_dir = 'pg_wal' + + os.mkdir( + os.path.join( + backup_dir, 'backups', 'node', backup_id, 'database', wal_dir, 'archive_status')) + + pg_control_path = os.path.join( + backup_dir, 'backups', 'node', + backup_id, 'database', 'global', 'pg_control') + + md5_before = hashlib.md5( + open(pg_control_path, 'rb').read()).hexdigest() + + self.run_binary( + [ + pg_resetxlog_path, + '-D', + os.path.join(backup_dir, 'backups', 'node', backup_id, 'database'), + '-o 42', + '-f' + ], + asynchronous=False) + + md5_after = hashlib.md5( + open(pg_control_path, 'rb').read()).hexdigest() + + if self.verbose: + print('\n MD5 BEFORE resetxlog: {0}\n MD5 AFTER resetxlog: {1}'.format( + md5_before, md5_after)) + + # Validate backup + try: + self.validate_pb(backup_dir, 'node', options=["-j", "4"]) + self.assertEqual( + 1, 0, + "Expecting Error because of pg_control change.\n " + "Output: {0} \n CMD: {1}".format( + self.output, self.cmd)) + except ProbackupException as e: + self.assertIn( + 'data files are corrupted', + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + # @unittest.skip("skip") + def test_validation_after_backup(self): + """""" + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + # FULL backup + gdb = self.backup_node( + backup_dir, 'node', node, gdb=True, options=['--stream']) + + gdb.set_breakpoint('pgBackupValidate') + gdb.run_until_break() + + backup_id = self.show_pb(backup_dir, 'node')[0]['id'] + + file = os.path.join( + backup_dir, "backups", "node", backup_id, + "database", "postgresql.conf") + os.remove(file) + + gdb.continue_execution_until_exit() + + self.assertEqual( + 'CORRUPT', + self.show_pb(backup_dir, 'node', backup_id)['status'], + 'Backup STATUS should be "ERROR"') + + # @unittest.expectedFailure + # @unittest.skip("skip") + def test_validate_corrupt_tablespace_map(self): + """ + Check that corruption in tablespace_map is detected + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + self.create_tblspace_in_node(node, 'external_dir') + + node.safe_psql( + 'postgres', + 'CREATE TABLE t_heap(a int) TABLESPACE "external_dir"') + + # FULL backup + backup_id = self.backup_node( + backup_dir, 'node', node, options=['--stream']) + + tablespace_map = os.path.join( + backup_dir, 'backups', 'node', + backup_id, 'database', 'tablespace_map') + + # Corrupt tablespace_map file in FULL backup + with open(tablespace_map, "rb+", 0) as f: + f.seek(84) + f.write(b"blah") + f.flush() + f.close + + try: + self.validate_pb(backup_dir, 'node', backup_id=backup_id) + self.assertEqual( + 1, 0, + "Expecting Error because tablespace_map is corrupted.\n " + "Output: {0} \n CMD: {1}".format( + self.output, self.cmd)) + except ProbackupException as e: + self.assertIn( + 'WARNING: Invalid CRC of backup file', + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + #TODO fix the test + @unittest.expectedFailure + # @unittest.skip("skip") + def test_validate_target_lsn(self): + """ + Check validation to specific LSN + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # FULL backup + self.backup_node(backup_dir, 'node', node) + + node.safe_psql( + "postgres", + "create table t_heap as select 1 as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,10000) i") + + node_restored = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored.cleanup() + + self.restore_node(backup_dir, 'node', node_restored) + + self.set_auto_conf( + node_restored, {'port': node_restored.port}) + + node_restored.slow_start() + + self.switch_wal_segment(node) + + backup_id = self.backup_node( + backup_dir, 'node', node_restored, + data_dir=node_restored.data_dir) + + target_lsn = self.show_pb(backup_dir, 'node')[1]['stop-lsn'] + + self.delete_pb(backup_dir, 'node', backup_id) + + self.validate_pb( + backup_dir, 'node', + options=[ + '--recovery-target-timeline=2', + '--recovery-target-lsn={0}'.format(target_lsn)]) + + @unittest.skip("skip") + def test_partial_validate_empty_and_mangled_database_map(self): + """ + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + + node.slow_start() + + # create databases + for i in range(1, 10, 1): + node.safe_psql( + 'postgres', + 'CREATE database db{0}'.format(i)) + + # FULL backup with database_map + backup_id = self.backup_node( + backup_dir, 'node', node, options=['--stream']) + pgdata = self.pgdata_content(node.data_dir) + + # truncate database_map + path = os.path.join( + backup_dir, 'backups', 'node', + backup_id, 'database', 'database_map') + with open(path, "w") as f: + f.close() + + try: + self.validate_pb( + backup_dir, 'node', + options=["--db-include=db1"]) + self.assertEqual( + 1, 0, + "Expecting Error because database_map is empty.\n " + "Output: {0} \n CMD: {1}".format( + self.output, self.cmd)) + except ProbackupException as e: + self.assertIn( + "WARNING: Backup {0} data files are corrupted".format( + backup_id), e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + # mangle database_map + with open(path, "w") as f: + f.write("42") + f.close() + + try: + self.validate_pb( + backup_dir, 'node', + options=["--db-include=db1"]) + self.assertEqual( + 1, 0, + "Expecting Error because database_map is empty.\n " + "Output: {0} \n CMD: {1}".format( + self.output, self.cmd)) + except ProbackupException as e: + self.assertIn( + "WARNING: Backup {0} data files are corrupted".format( + backup_id), e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + @unittest.skip("skip") + def test_partial_validate_exclude(self): + """""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + for i in range(1, 10, 1): + node.safe_psql( + 'postgres', + 'CREATE database db{0}'.format(i)) + + # FULL backup + backup_id = self.backup_node(backup_dir, 'node', node) + + try: + self.validate_pb( + backup_dir, 'node', + options=[ + "--db-include=db1", + "--db-exclude=db2"]) + self.assertEqual( + 1, 0, + "Expecting Error because of 'db-exclude' and 'db-include'.\n " + "Output: {0} \n CMD: {1}".format( + self.output, self.cmd)) + except ProbackupException as e: + self.assertIn( + "ERROR: You cannot specify '--db-include' " + "and '--db-exclude' together", e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + try: + self.validate_pb( + backup_dir, 'node', + options=[ + "--db-exclude=db1", + "--db-exclude=db5", + "--log-level-console=verbose"]) + self.assertEqual( + 1, 0, + "Expecting Error because of missing backup ID.\n " + "Output: {0} \n CMD: {1}".format( + self.output, self.cmd)) + except ProbackupException as e: + self.assertIn( + "ERROR: You must specify parameter (-i, --backup-id) for partial validation", + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + output = self.validate_pb( + backup_dir, 'node', backup_id, + options=[ + "--db-exclude=db1", + "--db-exclude=db5", + "--log-level-console=verbose"]) + + self.assertIn( + "VERBOSE: Skip file validation due to partial restore", output) + + @unittest.skip("skip") + def test_partial_validate_include(self): + """ + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + for i in range(1, 10, 1): + node.safe_psql( + 'postgres', + 'CREATE database db{0}'.format(i)) + + # FULL backup + backup_id = self.backup_node(backup_dir, 'node', node) + + try: + self.validate_pb( + backup_dir, 'node', + options=[ + "--db-include=db1", + "--db-exclude=db2"]) + self.assertEqual( + 1, 0, + "Expecting Error because of 'db-exclude' and 'db-include'.\n " + "Output: {0} \n CMD: {1}".format( + self.output, self.cmd)) + except ProbackupException as e: + self.assertIn( + "ERROR: You cannot specify '--db-include' " + "and '--db-exclude' together", e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + output = self.validate_pb( + backup_dir, 'node', backup_id, + options=[ + "--db-include=db1", + "--db-include=db5", + "--db-include=postgres", + "--log-level-console=verbose"]) + + self.assertIn( + "VERBOSE: Skip file validation due to partial restore", output) + + output = self.validate_pb( + backup_dir, 'node', backup_id, + options=["--log-level-console=verbose"]) + + self.assertNotIn( + "VERBOSE: Skip file validation due to partial restore", output) + + # @unittest.skip("skip") + def test_not_validate_diffenent_pg_version(self): + """Do not validate backup, if binary is compiled with different PG version""" + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + backup_id = self.backup_node(backup_dir, 'node', node) + + control_file = os.path.join( + backup_dir, "backups", "node", backup_id, + "backup.control") + + pg_version = node.major_version + + if pg_version.is_integer(): + pg_version = int(pg_version) + + fake_new_pg_version = pg_version + 1 + + with open(control_file, 'r') as f: + data = f.read(); + + data = data.replace( + "server-version = {0}".format(str(pg_version)), + "server-version = {0}".format(str(fake_new_pg_version))) + + with open(control_file, 'w') as f: + f.write(data); + + try: + self.validate_pb(backup_dir) + self.assertEqual( + 1, 0, + "Expecting Error because validation is forbidden if server version of backup " + "is different from the server version of pg_probackup.\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + "ERROR: Backup {0} has server version".format(backup_id), + e.message, + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) + + # @unittest.expectedFailure + # @unittest.skip("skip") + def test_validate_corrupt_page_header_map(self): + """ + Check that corruption in page_header_map is detected + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + ok_1 = self.backup_node(backup_dir, 'node', node, options=['--stream']) + + # FULL backup + backup_id = self.backup_node( + backup_dir, 'node', node, options=['--stream']) + + ok_2 = self.backup_node(backup_dir, 'node', node, options=['--stream']) + + page_header_map = os.path.join( + backup_dir, 'backups', 'node', backup_id, 'page_header_map') + + # Corrupt tablespace_map file in FULL backup + with open(page_header_map, "rb+", 0) as f: + f.seek(42) + f.write(b"blah") + f.flush() + f.close + + try: + self.validate_pb(backup_dir, 'node', backup_id=backup_id) + self.assertEqual( + 1, 0, + "Expecting Error because page_header is corrupted.\n " + "Output: {0} \n CMD: {1}".format( + self.output, self.cmd)) + except ProbackupException as e: + self.assertRegex( + e.message, + r'WARNING: An error occured during metadata decompression for file "[\w/]+": (data|buffer) error', + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.assertIn("Backup {0} is corrupt".format(backup_id), e.message) + + try: + self.validate_pb(backup_dir) + self.assertEqual( + 1, 0, + "Expecting Error because page_header is corrupted.\n " + "Output: {0} \n CMD: {1}".format( + self.output, self.cmd)) + except ProbackupException as e: + self.assertTrue( + 'WARNING: An error occured during metadata decompression' in e.message and + 'data error' in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.assertIn("INFO: Backup {0} data files are valid".format(ok_1), e.message) + self.assertIn("WARNING: Backup {0} data files are corrupted".format(backup_id), e.message) + self.assertIn("INFO: Backup {0} data files are valid".format(ok_2), e.message) + + self.assertIn("WARNING: Some backups are not valid", e.message) + + # @unittest.expectedFailure + # @unittest.skip("skip") + def test_validate_truncated_page_header_map(self): + """ + Check that corruption in page_header_map is detected + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + ok_1 = self.backup_node(backup_dir, 'node', node, options=['--stream']) + + # FULL backup + backup_id = self.backup_node( + backup_dir, 'node', node, options=['--stream']) + + ok_2 = self.backup_node(backup_dir, 'node', node, options=['--stream']) + + page_header_map = os.path.join( + backup_dir, 'backups', 'node', backup_id, 'page_header_map') + + # truncate page_header_map file + with open(page_header_map, "rb+", 0) as f: + f.truncate(121) + f.flush() + f.close + + try: + self.validate_pb(backup_dir, 'node', backup_id=backup_id) + self.assertEqual( + 1, 0, + "Expecting Error because page_header is corrupted.\n " + "Output: {0} \n CMD: {1}".format( + self.output, self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: Backup {0} is corrupt'.format(backup_id), e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + try: + self.validate_pb(backup_dir) + self.assertEqual( + 1, 0, + "Expecting Error because page_header is corrupted.\n " + "Output: {0} \n CMD: {1}".format( + self.output, self.cmd)) + except ProbackupException as e: + self.assertIn("INFO: Backup {0} data files are valid".format(ok_1), e.message) + self.assertIn("WARNING: Backup {0} data files are corrupted".format(backup_id), e.message) + self.assertIn("INFO: Backup {0} data files are valid".format(ok_2), e.message) + self.assertIn("WARNING: Some backups are not valid", e.message) + + # @unittest.expectedFailure + # @unittest.skip("skip") + def test_validate_missing_page_header_map(self): + """ + Check that corruption in page_header_map is detected + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + ok_1 = self.backup_node(backup_dir, 'node', node, options=['--stream']) + + # FULL backup + backup_id = self.backup_node( + backup_dir, 'node', node, options=['--stream']) + + ok_2 = self.backup_node(backup_dir, 'node', node, options=['--stream']) + + page_header_map = os.path.join( + backup_dir, 'backups', 'node', backup_id, 'page_header_map') + + # unlink page_header_map file + os.remove(page_header_map) + + try: + self.validate_pb(backup_dir, 'node', backup_id=backup_id) + self.assertEqual( + 1, 0, + "Expecting Error because page_header is corrupted.\n " + "Output: {0} \n CMD: {1}".format( + self.output, self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: Backup {0} is corrupt'.format(backup_id), e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + try: + self.validate_pb(backup_dir) + self.assertEqual( + 1, 0, + "Expecting Error because page_header is corrupted.\n " + "Output: {0} \n CMD: {1}".format( + self.output, self.cmd)) + except ProbackupException as e: + self.assertIn("INFO: Backup {0} data files are valid".format(ok_1), e.message) + self.assertIn("WARNING: Backup {0} data files are corrupted".format(backup_id), e.message) + self.assertIn("INFO: Backup {0} data files are valid".format(ok_2), e.message) + self.assertIn("WARNING: Some backups are not valid", e.message) + + # @unittest.expectedFailure + # @unittest.skip("skip") + def test_no_validate_tablespace_map(self): + """ + Check that --no-validate is propagated to tablespace_map + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + self.create_tblspace_in_node(node, 'external_dir') + + node.safe_psql( + 'postgres', + 'CREATE TABLE t_heap(a int) TABLESPACE "external_dir"') + + tblspace_new = self.get_tblspace_path(node, 'external_dir_new') + + oid = node.safe_psql( + 'postgres', + "select oid from pg_tablespace where spcname = 'external_dir'").decode('utf-8').rstrip() + + # FULL backup + backup_id = self.backup_node( + backup_dir, 'node', node, options=['--stream']) + + pgdata = self.pgdata_content(node.data_dir) + + tablespace_map = os.path.join( + backup_dir, 'backups', 'node', + backup_id, 'database', 'tablespace_map') + + # overwrite tablespace_map file + with open(tablespace_map, "w") as f: + f.write("{0} {1}".format(oid, tblspace_new)) + f.close + + node.cleanup() + + self.restore_node(backup_dir, 'node', node, options=['--no-validate']) + + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # check that tablespace restore as symlink + tablespace_link = os.path.join(node.data_dir, 'pg_tblspc', oid) + self.assertTrue( + os.path.islink(tablespace_link), + "'%s' is not a symlink" % tablespace_link) + + self.assertEqual( + os.readlink(tablespace_link), + tblspace_new, + "Symlink '{0}' do not points to '{1}'".format(tablespace_link, tblspace_new)) + +# validate empty backup list +# page from future during validate +# page from future during backup + +# corrupt block, so file become unaligned: +# 712 Assert(header.compressed_size <= BLCKSZ); +# 713 +# 714 read_len = fread(compressed_page.data, 1, +# 715 MAXALIGN(header.compressed_size), in); +# 716 if (read_len != MAXALIGN(header.compressed_size)) +# -> 717 elog(ERROR, "cannot read block %u of \"%s\" read %lu of %d", +# 718 blknum, file->path, read_len, header.compressed_size); From 4c09debe6f3f801bf856019767bb88e9e0c60a82 Mon Sep 17 00:00:00 2001 From: "v.shepard" Date: Thu, 24 Nov 2022 10:26:35 +0100 Subject: [PATCH 394/525] Revert "PBCKP-306 add '_test' to tests files" This reverts commit 2b8a1532350af09eed827f3024f8f4a30baf04fb. --- tests/CVE_2018_1058_test.py | 129 - tests/archive_test.py | 2707 ------------------ tests/backup_test.py | 3564 ----------------------- tests/cfs_backup_test.py | 1235 -------- tests/cfs_catchup_test.py | 117 - tests/cfs_restore_test.py | 450 --- tests/cfs_validate_backup_test.py | 24 - tests/checkdb_test.py | 849 ------ tests/compatibility_test.py | 1500 ---------- tests/compression_test.py | 495 ---- tests/config_test.py | 113 - tests/delete_test.py | 822 ------ tests/delta_test.py | 1201 -------- tests/exclude_test.py | 338 --- tests/external_test.py | 2405 ---------------- tests/false_positive_test.py | 337 --- tests/incr_restore_test.py | 2300 --------------- tests/init_test.py | 138 - tests/locking_test.py | 629 ---- tests/logging_test.py | 345 --- tests/merge_test.py | 2759 ------------------ tests/option_test.py | 231 -- tests/page_test.py | 1424 ---------- tests/pgpro2068_test.py | 188 -- tests/pgpro560_test.py | 123 - tests/pgpro589_test.py | 72 - tests/ptrack_test.py | 4407 ----------------------------- tests/remote_test.py | 43 - tests/replica_test.py | 1654 ----------- tests/restore_test.py | 3822 ------------------------- tests/retention_test.py | 2529 ----------------- tests/set_backup_test.py | 476 ---- tests/show_test.py | 509 ---- tests/time_consuming_test.py | 77 - tests/time_stamp_test.py | 236 -- tests/validate_test.py | 4083 -------------------------- 36 files changed, 42331 deletions(-) delete mode 100644 tests/CVE_2018_1058_test.py delete mode 100644 tests/archive_test.py delete mode 100644 tests/backup_test.py delete mode 100644 tests/cfs_backup_test.py delete mode 100644 tests/cfs_catchup_test.py delete mode 100644 tests/cfs_restore_test.py delete mode 100644 tests/cfs_validate_backup_test.py delete mode 100644 tests/checkdb_test.py delete mode 100644 tests/compatibility_test.py delete mode 100644 tests/compression_test.py delete mode 100644 tests/config_test.py delete mode 100644 tests/delete_test.py delete mode 100644 tests/delta_test.py delete mode 100644 tests/exclude_test.py delete mode 100644 tests/external_test.py delete mode 100644 tests/false_positive_test.py delete mode 100644 tests/incr_restore_test.py delete mode 100644 tests/init_test.py delete mode 100644 tests/locking_test.py delete mode 100644 tests/logging_test.py delete mode 100644 tests/merge_test.py delete mode 100644 tests/option_test.py delete mode 100644 tests/page_test.py delete mode 100644 tests/pgpro2068_test.py delete mode 100644 tests/pgpro560_test.py delete mode 100644 tests/pgpro589_test.py delete mode 100644 tests/ptrack_test.py delete mode 100644 tests/remote_test.py delete mode 100644 tests/replica_test.py delete mode 100644 tests/restore_test.py delete mode 100644 tests/retention_test.py delete mode 100644 tests/set_backup_test.py delete mode 100644 tests/show_test.py delete mode 100644 tests/time_consuming_test.py delete mode 100644 tests/time_stamp_test.py delete mode 100644 tests/validate_test.py diff --git a/tests/CVE_2018_1058_test.py b/tests/CVE_2018_1058_test.py deleted file mode 100644 index cfd55cc60..000000000 --- a/tests/CVE_2018_1058_test.py +++ /dev/null @@ -1,129 +0,0 @@ -import os -import unittest -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException - -class CVE_2018_1058(ProbackupTest, unittest.TestCase): - - # @unittest.skip("skip") - def test_basic_default_search_path(self): - """""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - 'postgres', - "CREATE FUNCTION public.pgpro_edition() " - "RETURNS text " - "AS $$ " - "BEGIN " - " RAISE 'pg_probackup vulnerable!'; " - "END " - "$$ LANGUAGE plpgsql") - - self.backup_node(backup_dir, 'node', node, backup_type='full', options=['--stream']) - - # @unittest.skip("skip") - def test_basic_backup_modified_search_path(self): - """""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True) - self.set_auto_conf(node, options={'search_path': 'public,pg_catalog'}) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - 'postgres', - "CREATE FUNCTION public.pg_control_checkpoint(OUT timeline_id integer, OUT dummy integer) " - "RETURNS record " - "AS $$ " - "BEGIN " - " RAISE '% vulnerable!', 'pg_probackup'; " - "END " - "$$ LANGUAGE plpgsql") - - node.safe_psql( - 'postgres', - "CREATE FUNCTION public.pg_proc(OUT proname name, OUT dummy integer) " - "RETURNS record " - "AS $$ " - "BEGIN " - " RAISE '% vulnerable!', 'pg_probackup'; " - "END " - "$$ LANGUAGE plpgsql; " - "CREATE VIEW public.pg_proc AS SELECT proname FROM public.pg_proc()") - - self.backup_node(backup_dir, 'node', node, backup_type='full', options=['--stream']) - - log_file = os.path.join(node.logs_dir, 'postgresql.log') - with open(log_file, 'r') as f: - log_content = f.read() - self.assertFalse( - 'pg_probackup vulnerable!' in log_content) - - # @unittest.skip("skip") - def test_basic_checkdb_modified_search_path(self): - """""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - self.set_auto_conf(node, options={'search_path': 'public,pg_catalog'}) - node.slow_start() - - node.safe_psql( - 'postgres', - "CREATE FUNCTION public.pg_database(OUT datname name, OUT oid oid, OUT dattablespace oid) " - "RETURNS record " - "AS $$ " - "BEGIN " - " RAISE 'pg_probackup vulnerable!'; " - "END " - "$$ LANGUAGE plpgsql; " - "CREATE VIEW public.pg_database AS SELECT * FROM public.pg_database()") - - node.safe_psql( - 'postgres', - "CREATE FUNCTION public.pg_extension(OUT extname name, OUT extnamespace oid, OUT extversion text) " - "RETURNS record " - "AS $$ " - "BEGIN " - " RAISE 'pg_probackup vulnerable!'; " - "END " - "$$ LANGUAGE plpgsql; " - "CREATE FUNCTION public.pg_namespace(OUT oid oid, OUT nspname name) " - "RETURNS record " - "AS $$ " - "BEGIN " - " RAISE 'pg_probackup vulnerable!'; " - "END " - "$$ LANGUAGE plpgsql; " - "CREATE VIEW public.pg_extension AS SELECT * FROM public.pg_extension();" - "CREATE VIEW public.pg_namespace AS SELECT * FROM public.pg_namespace();" - ) - - try: - self.checkdb_node( - options=[ - '--amcheck', - '--skip-block-validation', - '-d', 'postgres', '-p', str(node.port)]) - self.assertEqual( - 1, 0, - "Expecting Error because amcheck{,_next} not installed\n" - " Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "WARNING: Extension 'amcheck' or 'amcheck_next' are not installed in database postgres", - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) diff --git a/tests/archive_test.py b/tests/archive_test.py deleted file mode 100644 index 5e59dd268..000000000 --- a/tests/archive_test.py +++ /dev/null @@ -1,2707 +0,0 @@ -import os -import shutil -import gzip -import unittest -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException, GdbException -from datetime import datetime, timedelta -import subprocess -from sys import exit -from time import sleep -from distutils.dir_util import copy_tree - - -class ArchiveTest(ProbackupTest, unittest.TestCase): - - # @unittest.expectedFailure - # @unittest.skip("skip") - def test_pgpro434_1(self): - """Description in jira issue PGPRO-434""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'checkpoint_timeout': '30s'}) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - "postgres", - "create table t_heap as select 1 as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector from " - "generate_series(0,100) i") - - result = node.safe_psql("postgres", "SELECT * FROM t_heap") - self.backup_node( - backup_dir, 'node', node) - node.cleanup() - - self.restore_node( - backup_dir, 'node', node) - node.slow_start() - - # Recreate backup catalog - self.clean_pb(backup_dir) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - - # Make backup - self.backup_node(backup_dir, 'node', node) - node.cleanup() - - # Restore Database - self.restore_node(backup_dir, 'node', node) - node.slow_start() - - self.assertEqual( - result, node.safe_psql("postgres", "SELECT * FROM t_heap"), - 'data after restore not equal to original data') - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_pgpro434_2(self): - """ - Check that timelines are correct. - WAITING PGPRO-1053 for --immediate - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'checkpoint_timeout': '30s'} - ) - - if self.get_version(node) < self.version_to_num('9.6.0'): - self.skipTest( - 'Skipped because pg_control_checkpoint() is not supported in PG 9.5') - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # FIRST TIMELINE - node.safe_psql( - "postgres", - "create table t_heap as select 1 as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,100) i") - backup_id = self.backup_node(backup_dir, 'node', node) - node.safe_psql( - "postgres", - "insert into t_heap select 100501 as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,1) i") - - # SECOND TIMELIN - node.cleanup() - self.restore_node( - backup_dir, 'node', node, - options=['--immediate', '--recovery-target-action=promote']) - node.slow_start() - - if self.verbose: - print(node.safe_psql( - "postgres", - "select redo_wal_file from pg_control_checkpoint()")) - self.assertFalse( - node.execute( - "postgres", - "select exists(select 1 " - "from t_heap where id = 100501)")[0][0], - 'data after restore not equal to original data') - - node.safe_psql( - "postgres", - "insert into t_heap select 2 as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(100,200) i") - - backup_id = self.backup_node(backup_dir, 'node', node) - - node.safe_psql( - "postgres", - "insert into t_heap select 100502 as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,256) i") - - # THIRD TIMELINE - node.cleanup() - self.restore_node( - backup_dir, 'node', node, - options=['--immediate', '--recovery-target-action=promote']) - node.slow_start() - - if self.verbose: - print( - node.safe_psql( - "postgres", - "select redo_wal_file from pg_control_checkpoint()")) - - node.safe_psql( - "postgres", - "insert into t_heap select 3 as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(200,300) i") - - backup_id = self.backup_node(backup_dir, 'node', node) - - result = node.safe_psql("postgres", "SELECT * FROM t_heap") - node.safe_psql( - "postgres", - "insert into t_heap select 100503 as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,256) i") - - # FOURTH TIMELINE - node.cleanup() - self.restore_node( - backup_dir, 'node', node, - options=['--immediate', '--recovery-target-action=promote']) - node.slow_start() - - if self.verbose: - print('Fourth timeline') - print(node.safe_psql( - "postgres", - "select redo_wal_file from pg_control_checkpoint()")) - - # FIFTH TIMELINE - node.cleanup() - self.restore_node( - backup_dir, 'node', node, - options=['--immediate', '--recovery-target-action=promote']) - node.slow_start() - - if self.verbose: - print('Fifth timeline') - print(node.safe_psql( - "postgres", - "select redo_wal_file from pg_control_checkpoint()")) - - # SIXTH TIMELINE - node.cleanup() - self.restore_node( - backup_dir, 'node', node, - options=['--immediate', '--recovery-target-action=promote']) - node.slow_start() - - if self.verbose: - print('Sixth timeline') - print(node.safe_psql( - "postgres", - "select redo_wal_file from pg_control_checkpoint()")) - - self.assertFalse( - node.execute( - "postgres", - "select exists(select 1 from t_heap where id > 100500)")[0][0], - 'data after restore not equal to original data') - - self.assertEqual( - result, - node.safe_psql( - "postgres", - "SELECT * FROM t_heap"), - 'data after restore not equal to original data') - - # @unittest.skip("skip") - def test_pgpro434_3(self): - """ - Check pg_stop_backup_timeout, needed backup_timeout - Fixed in commit d84d79668b0c139 and assert fixed by ptrack 1.7 - """ - self._check_gdb_flag_or_skip_test() - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - - node.slow_start() - - gdb = self.backup_node( - backup_dir, 'node', node, - options=[ - "--archive-timeout=60", - "--log-level-file=LOG"], - gdb=True) - - # Attention! this breakpoint has been set on internal probackup function, not on a postgres core one - gdb.set_breakpoint('pg_stop_backup') - gdb.run_until_break() - - self.set_auto_conf(node, {'archive_command': 'exit 1'}) - node.reload() - - gdb.continue_execution_until_exit() - - sleep(1) - - log_file = os.path.join(backup_dir, 'log', 'pg_probackup.log') - with open(log_file, 'r') as f: - log_content = f.read() - - # in PG =< 9.6 pg_stop_backup always wait - if self.get_version(node) < 100000: - self.assertIn( - "ERROR: pg_stop_backup doesn't answer in 60 seconds, cancel it", - log_content) - else: - self.assertIn( - "ERROR: WAL segment 000000010000000000000003 could not be archived in 60 seconds", - log_content) - - log_file = os.path.join(node.logs_dir, 'postgresql.log') - with open(log_file, 'r') as f: - log_content = f.read() - - self.assertNotIn( - 'FailedAssertion', - log_content, - 'PostgreSQL crashed because of a failed assert') - - # @unittest.skip("skip") - def test_pgpro434_4(self): - """ - Check pg_stop_backup_timeout, libpq-timeout requested. - Fixed in commit d84d79668b0c139 and assert fixed by ptrack 1.7 - """ - self._check_gdb_flag_or_skip_test() - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - - node.slow_start() - - gdb = self.backup_node( - backup_dir, 'node', node, - options=[ - "--archive-timeout=60", - "--log-level-file=info"], - gdb=True) - - # Attention! this breakpoint has been set on internal probackup function, not on a postgres core one - gdb.set_breakpoint('pg_stop_backup') - gdb.run_until_break() - - self.set_auto_conf(node, {'archive_command': 'exit 1'}) - node.reload() - - os.environ["PGAPPNAME"] = "foo" - - pid = node.safe_psql( - "postgres", - "SELECT pid " - "FROM pg_stat_activity " - "WHERE application_name = 'pg_probackup'").decode('utf-8').rstrip() - - os.environ["PGAPPNAME"] = "pg_probackup" - - postgres_gdb = self.gdb_attach(pid) - postgres_gdb.set_breakpoint('do_pg_stop_backup') - postgres_gdb.continue_execution_until_running() - - gdb.continue_execution_until_exit() - # gdb._execute('detach') - - log_file = os.path.join(backup_dir, 'log', 'pg_probackup.log') - with open(log_file, 'r') as f: - log_content = f.read() - - if self.get_version(node) < 150000: - self.assertIn( - "ERROR: pg_stop_backup doesn't answer in 60 seconds, cancel it", - log_content) - else: - self.assertIn( - "ERROR: pg_backup_stop doesn't answer in 60 seconds, cancel it", - log_content) - - log_file = os.path.join(node.logs_dir, 'postgresql.log') - with open(log_file, 'r') as f: - log_content = f.read() - - self.assertNotIn( - 'FailedAssertion', - log_content, - 'PostgreSQL crashed because of a failed assert') - - # @unittest.skip("skip") - def test_archive_push_file_exists(self): - """Archive-push if file exists""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'checkpoint_timeout': '30s'}) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - - wals_dir = os.path.join(backup_dir, 'wal', 'node') - if self.archive_compress: - filename = '000000010000000000000001.gz' - file = os.path.join(wals_dir, filename) - else: - filename = '000000010000000000000001' - file = os.path.join(wals_dir, filename) - - with open(file, 'a+b') as f: - f.write(b"blablablaadssaaaaaaaaaaaaaaa") - f.flush() - f.close() - - node.slow_start() - node.safe_psql( - "postgres", - "create table t_heap as select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,100500) i") - log_file = os.path.join(node.logs_dir, 'postgresql.log') - - self.switch_wal_segment(node) - sleep(1) - - with open(log_file, 'r') as f: - log_content = f.read() - self.assertIn( - 'LOG: archive command failed with exit code 1', - log_content) - - self.assertIn( - 'DETAIL: The failed archive command was:', - log_content) - - self.assertIn( - 'pg_probackup archive-push WAL file', - log_content) - - self.assertIn( - 'WAL file already exists in archive with different checksum', - log_content) - - self.assertNotIn( - 'pg_probackup archive-push completed successfully', log_content) - - if self.get_version(node) < 100000: - wal_src = os.path.join( - node.data_dir, 'pg_xlog', '000000010000000000000001') - else: - wal_src = os.path.join( - node.data_dir, 'pg_wal', '000000010000000000000001') - - if self.archive_compress: - with open(wal_src, 'rb') as f_in, gzip.open( - file, 'wb', compresslevel=1) as f_out: - shutil.copyfileobj(f_in, f_out) - else: - shutil.copyfile(wal_src, file) - - self.switch_wal_segment(node) - sleep(5) - - with open(log_file, 'r') as f: - log_content = f.read() - - self.assertIn( - 'pg_probackup archive-push completed successfully', - log_content) - - # btw check that console coloring codes are not slipped into log file - self.assertNotIn('[0m', log_content) - - print(log_content) - - # @unittest.skip("skip") - def test_archive_push_file_exists_overwrite(self): - """Archive-push if file exists""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={'checkpoint_timeout': '30s'}) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - - wals_dir = os.path.join(backup_dir, 'wal', 'node') - if self.archive_compress: - filename = '000000010000000000000001.gz' - file = os.path.join(wals_dir, filename) - else: - filename = '000000010000000000000001' - file = os.path.join(wals_dir, filename) - - with open(file, 'a+b') as f: - f.write(b"blablablaadssaaaaaaaaaaaaaaa") - f.flush() - f.close() - - node.slow_start() - node.safe_psql( - "postgres", - "create table t_heap as select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,100500) i") - log_file = os.path.join(node.logs_dir, 'postgresql.log') - - self.switch_wal_segment(node) - sleep(1) - - with open(log_file, 'r') as f: - log_content = f.read() - - self.assertIn( - 'LOG: archive command failed with exit code 1', log_content) - self.assertIn( - 'DETAIL: The failed archive command was:', log_content) - self.assertIn( - 'pg_probackup archive-push WAL file', log_content) - self.assertNotIn( - 'WAL file already exists in archive with ' - 'different checksum, overwriting', log_content) - self.assertIn( - 'WAL file already exists in archive with ' - 'different checksum', log_content) - - self.assertNotIn( - 'pg_probackup archive-push completed successfully', log_content) - - self.set_archiving(backup_dir, 'node', node, overwrite=True) - node.reload() - self.switch_wal_segment(node) - sleep(5) - - with open(log_file, 'r') as f: - log_content = f.read() - self.assertTrue( - 'pg_probackup archive-push completed successfully' in log_content, - 'Expecting messages about successfull execution archive_command') - - self.assertIn( - 'WAL file already exists in archive with ' - 'different checksum, overwriting', log_content) - - # @unittest.skip("skip") - def test_archive_push_partial_file_exists(self): - """Archive-push if stale '.part' file exists""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving( - backup_dir, 'node', node, - log_level='verbose', archive_timeout=60) - - node.slow_start() - - # this backup is needed only for validation to xid - self.backup_node(backup_dir, 'node', node) - - node.safe_psql( - "postgres", - "create table t1(a int)") - - xid = node.safe_psql( - "postgres", - "INSERT INTO t1 VALUES (1) RETURNING (xmin)").decode('utf-8').rstrip() - - if self.get_version(node) < 100000: - filename_orig = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_xlogfile_name_offset(pg_current_xlog_location());").rstrip() - else: - filename_orig = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn());").rstrip() - - filename_orig = filename_orig.decode('utf-8') - - # form up path to next .part WAL segment - wals_dir = os.path.join(backup_dir, 'wal', 'node') - if self.archive_compress: - filename = filename_orig + '.gz' + '.part' - file = os.path.join(wals_dir, filename) - else: - filename = filename_orig + '.part' - file = os.path.join(wals_dir, filename) - - # emulate stale .part file - with open(file, 'a+b') as f: - f.write(b"blahblah") - f.flush() - f.close() - - self.switch_wal_segment(node) - sleep(70) - - # check that segment is archived - if self.archive_compress: - filename_orig = filename_orig + '.gz' - - file = os.path.join(wals_dir, filename_orig) - self.assertTrue(os.path.isfile(file)) - - # successful validate means that archive-push reused stale wal segment - self.validate_pb( - backup_dir, 'node', - options=['--recovery-target-xid={0}'.format(xid)]) - - log_file = os.path.join(node.logs_dir, 'postgresql.log') - with open(log_file, 'r') as f: - log_content = f.read() - - self.assertIn( - 'Reusing stale temp WAL file', - log_content) - - # @unittest.skip("skip") - def test_archive_push_part_file_exists_not_stale(self): - """Archive-push if .part file exists and it is not stale""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node, archive_timeout=60) - - node.slow_start() - - node.safe_psql( - "postgres", - "create table t1()") - self.switch_wal_segment(node) - - node.safe_psql( - "postgres", - "create table t2()") - - if self.get_version(node) < 100000: - filename_orig = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_xlogfile_name_offset(pg_current_xlog_location());").rstrip() - else: - filename_orig = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn());").rstrip() - - filename_orig = filename_orig.decode('utf-8') - - # form up path to next .part WAL segment - wals_dir = os.path.join(backup_dir, 'wal', 'node') - if self.archive_compress: - filename = filename_orig + '.gz' + '.part' - file = os.path.join(wals_dir, filename) - else: - filename = filename_orig + '.part' - file = os.path.join(wals_dir, filename) - - with open(file, 'a+b') as f: - f.write(b"blahblah") - f.flush() - f.close() - - self.switch_wal_segment(node) - sleep(30) - - with open(file, 'a+b') as f: - f.write(b"blahblahblahblah") - f.flush() - f.close() - - sleep(40) - - # check that segment is NOT archived - if self.archive_compress: - filename_orig = filename_orig + '.gz' - - file = os.path.join(wals_dir, filename_orig) - - self.assertFalse(os.path.isfile(file)) - - # log_file = os.path.join(node.logs_dir, 'postgresql.log') - # with open(log_file, 'r') as f: - # log_content = f.read() - # self.assertIn( - # 'is not stale', - # log_content) - - # @unittest.expectedFailure - # @unittest.skip("skip") - def test_replica_archive(self): - """ - make node without archiving, take stream backup and - turn it into replica, set replica with archiving, - make archive backup from replica - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - master = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'master'), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'archive_timeout': '10s', - 'checkpoint_timeout': '30s', - 'max_wal_size': '32MB'}) - - if self.get_version(master) < self.version_to_num('9.6.0'): - self.skipTest( - 'Skipped because backup from replica is not supported in PG 9.5') - - self.init_pb(backup_dir) - # ADD INSTANCE 'MASTER' - self.add_instance(backup_dir, 'master', master) - master.slow_start() - - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) - replica.cleanup() - - master.psql( - "postgres", - "create table t_heap as select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,2560) i") - - self.backup_node(backup_dir, 'master', master, options=['--stream']) - before = master.safe_psql("postgres", "SELECT * FROM t_heap") - - # Settings for Replica - self.restore_node(backup_dir, 'master', replica) - self.set_replica(master, replica, synchronous=True) - - self.add_instance(backup_dir, 'replica', replica) - self.set_archiving(backup_dir, 'replica', replica, replica=True) - replica.slow_start(replica=True) - - # Check data correctness on replica - after = replica.safe_psql("postgres", "SELECT * FROM t_heap") - self.assertEqual(before, after) - - # Change data on master, take FULL backup from replica, - # restore taken backup and check that restored data equal - # to original data - master.psql( - "postgres", - "insert into t_heap select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(256,512) i") - before = master.safe_psql("postgres", "SELECT * FROM t_heap") - - backup_id = self.backup_node( - backup_dir, 'replica', replica, - options=[ - '--archive-timeout=30', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port), - '--stream']) - - self.validate_pb(backup_dir, 'replica') - self.assertEqual( - 'OK', self.show_pb(backup_dir, 'replica', backup_id)['status']) - - # RESTORE FULL BACKUP TAKEN FROM replica - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node')) - node.cleanup() - self.restore_node(backup_dir, 'replica', data_dir=node.data_dir) - - self.set_auto_conf(node, {'port': node.port}) - node.slow_start() - # CHECK DATA CORRECTNESS - after = node.safe_psql("postgres", "SELECT * FROM t_heap") - self.assertEqual(before, after) - - # Change data on master, make PAGE backup from replica, - # restore taken backup and check that restored data equal - # to original data - master.psql( - "postgres", - "insert into t_heap select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(512,80680) i") - - before = master.safe_psql("postgres", "SELECT * FROM t_heap") - - self.wait_until_replica_catch_with_master(master, replica) - - backup_id = self.backup_node( - backup_dir, 'replica', - replica, backup_type='page', - options=[ - '--archive-timeout=60', - '--master-db=postgres', - '--master-host=localhost', - '--master-port={0}'.format(master.port), - '--stream']) - - self.validate_pb(backup_dir, 'replica') - self.assertEqual( - 'OK', self.show_pb(backup_dir, 'replica', backup_id)['status']) - - # RESTORE PAGE BACKUP TAKEN FROM replica - node.cleanup() - self.restore_node( - backup_dir, 'replica', data_dir=node.data_dir, backup_id=backup_id) - - self.set_auto_conf(node, {'port': node.port}) - - node.slow_start() - # CHECK DATA CORRECTNESS - after = node.safe_psql("postgres", "SELECT * FROM t_heap") - self.assertEqual(before, after) - - # @unittest.expectedFailure - # @unittest.skip("skip") - def test_master_and_replica_parallel_archiving(self): - """ - make node 'master 'with archiving, - take archive backup and turn it into replica, - set replica with archiving, make archive backup from replica, - make archive backup from master - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - master = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'master'), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'archive_timeout': '10s'} - ) - - if self.get_version(master) < self.version_to_num('9.6.0'): - self.skipTest( - 'Skipped because backup from replica is not supported in PG 9.5') - - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) - replica.cleanup() - - self.init_pb(backup_dir) - # ADD INSTANCE 'MASTER' - self.add_instance(backup_dir, 'master', master) - self.set_archiving(backup_dir, 'master', master) - master.slow_start() - - master.psql( - "postgres", - "create table t_heap as select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,10000) i") - - # TAKE FULL ARCHIVE BACKUP FROM MASTER - self.backup_node(backup_dir, 'master', master) - # GET LOGICAL CONTENT FROM MASTER - before = master.safe_psql("postgres", "SELECT * FROM t_heap") - # GET PHYSICAL CONTENT FROM MASTER - pgdata_master = self.pgdata_content(master.data_dir) - - # Settings for Replica - self.restore_node(backup_dir, 'master', replica) - # CHECK PHYSICAL CORRECTNESS on REPLICA - pgdata_replica = self.pgdata_content(replica.data_dir) - self.compare_pgdata(pgdata_master, pgdata_replica) - - self.set_replica(master, replica) - # ADD INSTANCE REPLICA - self.add_instance(backup_dir, 'replica', replica) - # SET ARCHIVING FOR REPLICA - self.set_archiving(backup_dir, 'replica', replica, replica=True) - replica.slow_start(replica=True) - - # CHECK LOGICAL CORRECTNESS on REPLICA - after = replica.safe_psql("postgres", "SELECT * FROM t_heap") - self.assertEqual(before, after) - - master.psql( - "postgres", - "insert into t_heap select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0, 60000) i") - - backup_id = self.backup_node( - backup_dir, 'replica', replica, - options=[ - '--archive-timeout=30', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port), - '--stream']) - - self.validate_pb(backup_dir, 'replica') - self.assertEqual( - 'OK', self.show_pb(backup_dir, 'replica', backup_id)['status']) - - # TAKE FULL ARCHIVE BACKUP FROM MASTER - backup_id = self.backup_node(backup_dir, 'master', master) - self.validate_pb(backup_dir, 'master') - self.assertEqual( - 'OK', self.show_pb(backup_dir, 'master', backup_id)['status']) - - # @unittest.expectedFailure - # @unittest.skip("skip") - def test_basic_master_and_replica_concurrent_archiving(self): - """ - make node 'master 'with archiving, - take archive backup and turn it into replica, - set replica with archiving, - make sure that archiving on both node is working. - """ - if self.pg_config_version < self.version_to_num('9.6.0'): - self.skipTest('You need PostgreSQL >= 9.6 for this test') - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - master = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'master'), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'checkpoint_timeout': '30s', - 'archive_timeout': '10s'}) - - if self.get_version(master) < self.version_to_num('9.6.0'): - self.skipTest( - 'Skipped because backup from replica is not supported in PG 9.5') - - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) - replica.cleanup() - - self.init_pb(backup_dir) - # ADD INSTANCE 'MASTER' - self.add_instance(backup_dir, 'master', master) - self.set_archiving(backup_dir, 'master', master) - master.slow_start() - - master.psql( - "postgres", - "create table t_heap as select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,10000) i") - - master.pgbench_init(scale=5) - - # TAKE FULL ARCHIVE BACKUP FROM MASTER - self.backup_node(backup_dir, 'master', master) - # GET LOGICAL CONTENT FROM MASTER - before = master.safe_psql("postgres", "SELECT * FROM t_heap") - # GET PHYSICAL CONTENT FROM MASTER - pgdata_master = self.pgdata_content(master.data_dir) - - # Settings for Replica - self.restore_node( - backup_dir, 'master', replica) - # CHECK PHYSICAL CORRECTNESS on REPLICA - pgdata_replica = self.pgdata_content(replica.data_dir) - self.compare_pgdata(pgdata_master, pgdata_replica) - - self.set_replica(master, replica, synchronous=False) - # ADD INSTANCE REPLICA - # self.add_instance(backup_dir, 'replica', replica) - # SET ARCHIVING FOR REPLICA - self.set_archiving(backup_dir, 'master', replica, replica=True) - replica.slow_start(replica=True) - - # CHECK LOGICAL CORRECTNESS on REPLICA - after = replica.safe_psql("postgres", "SELECT * FROM t_heap") - self.assertEqual(before, after) - - master.psql( - "postgres", - "insert into t_heap select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,10000) i") - - # TAKE FULL ARCHIVE BACKUP FROM REPLICA - backup_id = self.backup_node(backup_dir, 'master', replica) - - self.validate_pb(backup_dir, 'master') - self.assertEqual( - 'OK', self.show_pb(backup_dir, 'master', backup_id)['status']) - - # TAKE FULL ARCHIVE BACKUP FROM MASTER - backup_id = self.backup_node(backup_dir, 'master', master) - self.validate_pb(backup_dir, 'master') - self.assertEqual( - 'OK', self.show_pb(backup_dir, 'master', backup_id)['status']) - - master.pgbench_init(scale=10) - - sleep(10) - - replica.promote() - - master.pgbench_init(scale=10) - replica.pgbench_init(scale=10) - - self.backup_node(backup_dir, 'master', master) - self.backup_node(backup_dir, 'master', replica) - - # @unittest.expectedFailure - # @unittest.skip("skip") - def test_concurrent_archiving(self): - """ - Concurrent archiving from master, replica and cascade replica - https://github.com/postgrespro/pg_probackup/issues/327 - - For PG >= 11 it is expected to pass this test - """ - - if self.pg_config_version < self.version_to_num('11.0'): - self.skipTest('You need PostgreSQL >= 11 for this test') - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - master = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'master'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', master) - self.set_archiving(backup_dir, 'node', master, replica=True) - master.slow_start() - - master.pgbench_init(scale=10) - - # TAKE FULL ARCHIVE BACKUP FROM MASTER - self.backup_node(backup_dir, 'node', master) - - # Settings for Replica - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) - replica.cleanup() - self.restore_node(backup_dir, 'node', replica) - - self.set_replica(master, replica, synchronous=True) - self.set_archiving(backup_dir, 'node', replica, replica=True) - self.set_auto_conf(replica, {'port': replica.port}) - replica.slow_start(replica=True) - - # create cascade replicas - replica1 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica1')) - replica1.cleanup() - - # Settings for casaced replica - self.restore_node(backup_dir, 'node', replica1) - self.set_replica(replica, replica1, synchronous=False) - self.set_auto_conf(replica1, {'port': replica1.port}) - replica1.slow_start(replica=True) - - # Take full backup from master - self.backup_node(backup_dir, 'node', master) - - pgbench = master.pgbench( - stdout=subprocess.PIPE, stderr=subprocess.STDOUT, - options=['-T', '30', '-c', '1']) - - # Take several incremental backups from master - self.backup_node(backup_dir, 'node', master, backup_type='page', options=['--no-validate']) - - self.backup_node(backup_dir, 'node', master, backup_type='page', options=['--no-validate']) - - pgbench.wait() - pgbench.stdout.close() - - with open(os.path.join(master.logs_dir, 'postgresql.log'), 'r') as f: - log_content = f.read() - self.assertNotIn('different checksum', log_content) - - with open(os.path.join(replica.logs_dir, 'postgresql.log'), 'r') as f: - log_content = f.read() - self.assertNotIn('different checksum', log_content) - - with open(os.path.join(replica1.logs_dir, 'postgresql.log'), 'r') as f: - log_content = f.read() - self.assertNotIn('different checksum', log_content) - - # @unittest.expectedFailure - # @unittest.skip("skip") - def test_archive_pg_receivexlog(self): - """Test backup with pg_receivexlog wal delivary method""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'checkpoint_timeout': '30s'}) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - if self.get_version(node) < 100000: - pg_receivexlog_path = self.get_bin_path('pg_receivexlog') - else: - pg_receivexlog_path = self.get_bin_path('pg_receivewal') - - pg_receivexlog = self.run_binary( - [ - pg_receivexlog_path, '-p', str(node.port), '--synchronous', - '-D', os.path.join(backup_dir, 'wal', 'node') - ], asynchronous=True) - - if pg_receivexlog.returncode: - self.assertFalse( - True, - 'Failed to start pg_receivexlog: {0}'.format( - pg_receivexlog.communicate()[1])) - - node.safe_psql( - "postgres", - "create table t_heap as select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,10000) i") - - self.backup_node(backup_dir, 'node', node) - - # PAGE - node.safe_psql( - "postgres", - "insert into t_heap select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(10000,20000) i") - - self.backup_node( - backup_dir, - 'node', - node, - backup_type='page' - ) - result = node.safe_psql("postgres", "SELECT * FROM t_heap") - self.validate_pb(backup_dir) - - # Check data correctness - node.cleanup() - self.restore_node(backup_dir, 'node', node) - node.slow_start() - - self.assertEqual( - result, - node.safe_psql( - "postgres", "SELECT * FROM t_heap" - ), - 'data after restore not equal to original data') - - # Clean after yourself - pg_receivexlog.kill() - - # @unittest.expectedFailure - # @unittest.skip("skip") - def test_archive_pg_receivexlog_compression_pg10(self): - """Test backup with pg_receivewal compressed wal delivary method""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'checkpoint_timeout': '30s'} - ) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - if self.get_version(node) < self.version_to_num('10.0'): - self.skipTest('You need PostgreSQL >= 10 for this test') - else: - pg_receivexlog_path = self.get_bin_path('pg_receivewal') - - pg_receivexlog = self.run_binary( - [ - pg_receivexlog_path, '-p', str(node.port), '--synchronous', - '-Z', '9', '-D', os.path.join(backup_dir, 'wal', 'node') - ], asynchronous=True) - - if pg_receivexlog.returncode: - self.assertFalse( - True, - 'Failed to start pg_receivexlog: {0}'.format( - pg_receivexlog.communicate()[1])) - - node.safe_psql( - "postgres", - "create table t_heap as select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,10000) i") - - self.backup_node(backup_dir, 'node', node) - - # PAGE - node.safe_psql( - "postgres", - "insert into t_heap select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(10000,20000) i") - - self.backup_node( - backup_dir, 'node', node, - backup_type='page' - ) - result = node.safe_psql("postgres", "SELECT * FROM t_heap") - self.validate_pb(backup_dir) - - # Check data correctness - node.cleanup() - self.restore_node(backup_dir, 'node', node) - node.slow_start() - - self.assertEqual( - result, node.safe_psql("postgres", "SELECT * FROM t_heap"), - 'data after restore not equal to original data') - - # Clean after yourself - pg_receivexlog.kill() - - # @unittest.expectedFailure - # @unittest.skip("skip") - def test_archive_catalog(self): - """ - ARCHIVE replica: - - t6 |----------------------- - t5 | |------- - | | - t4 | |-------------- - | | - t3 | |--B1--|/|--B2-|/|-B3--- - | | - t2 |--A1--------A2--- - t1 ---------Y1--Y2-- - - ARCHIVE master: - t1 -Z1--Z2--- - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - master = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'master'), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'archive_timeout': '30s', - 'checkpoint_timeout': '30s'}) - - if self.get_version(master) < self.version_to_num('9.6.0'): - self.skipTest( - 'Skipped because backup from replica is not supported in PG 9.5') - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'master', master) - self.set_archiving(backup_dir, 'master', master) - - master.slow_start() - - # FULL - master.safe_psql( - "postgres", - "create table t_heap as select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,10000) i") - - self.backup_node(backup_dir, 'master', master) - - # PAGE - master.safe_psql( - "postgres", - "insert into t_heap select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(10000,20000) i") - - self.backup_node( - backup_dir, 'master', master, backup_type='page') - - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) - replica.cleanup() - self.restore_node(backup_dir, 'master', replica) - self.set_replica(master, replica) - - self.add_instance(backup_dir, 'replica', replica) - self.set_archiving(backup_dir, 'replica', replica, replica=True) - - copy_tree( - os.path.join(backup_dir, 'wal', 'master'), - os.path.join(backup_dir, 'wal', 'replica')) - - replica.slow_start(replica=True) - - # FULL backup replica - Y1 = self.backup_node( - backup_dir, 'replica', replica, - options=['--stream', '--archive-timeout=60s']) - - master.pgbench_init(scale=5) - - # PAGE backup replica - Y2 = self.backup_node( - backup_dir, 'replica', replica, - backup_type='page', options=['--stream', '--archive-timeout=60s']) - - # create timeline t2 - replica.promote() - - # FULL backup replica - A1 = self.backup_node( - backup_dir, 'replica', replica) - - replica.pgbench_init(scale=5) - - replica.safe_psql( - 'postgres', - "CREATE TABLE t1 (a text)") - - target_xid = None - with replica.connect("postgres") as con: - res = con.execute( - "INSERT INTO t1 VALUES ('inserted') RETURNING (xmin)") - con.commit() - target_xid = res[0][0] - - # DELTA backup replica - A2 = self.backup_node( - backup_dir, 'replica', replica, backup_type='delta') - - # create timeline t3 - replica.cleanup() - self.restore_node( - backup_dir, 'replica', replica, - options=[ - '--recovery-target-xid={0}'.format(target_xid), - '--recovery-target-timeline=2', - '--recovery-target-action=promote']) - - replica.slow_start() - - B1 = self.backup_node( - backup_dir, 'replica', replica) - - replica.pgbench_init(scale=2) - - B2 = self.backup_node( - backup_dir, 'replica', replica, backup_type='page') - - replica.pgbench_init(scale=2) - - target_xid = None - with replica.connect("postgres") as con: - res = con.execute( - "INSERT INTO t1 VALUES ('inserted') RETURNING (xmin)") - con.commit() - target_xid = res[0][0] - - B3 = self.backup_node( - backup_dir, 'replica', replica, backup_type='page') - - replica.pgbench_init(scale=2) - - # create timeline t4 - replica.cleanup() - self.restore_node( - backup_dir, 'replica', replica, - options=[ - '--recovery-target-xid={0}'.format(target_xid), - '--recovery-target-timeline=3', - '--recovery-target-action=promote']) - - replica.slow_start() - - replica.safe_psql( - 'postgres', - 'CREATE TABLE ' - 't2 as select i, ' - 'repeat(md5(i::text),5006056) as fat_attr ' - 'from generate_series(0,6) i') - - target_xid = None - with replica.connect("postgres") as con: - res = con.execute( - "INSERT INTO t1 VALUES ('inserted') RETURNING (xmin)") - con.commit() - target_xid = res[0][0] - - replica.safe_psql( - 'postgres', - 'CREATE TABLE ' - 't3 as select i, ' - 'repeat(md5(i::text),5006056) as fat_attr ' - 'from generate_series(0,10) i') - - # create timeline t5 - replica.cleanup() - self.restore_node( - backup_dir, 'replica', replica, - options=[ - '--recovery-target-xid={0}'.format(target_xid), - '--recovery-target-timeline=4', - '--recovery-target-action=promote']) - - replica.slow_start() - - replica.safe_psql( - 'postgres', - 'CREATE TABLE ' - 't4 as select i, ' - 'repeat(md5(i::text),5006056) as fat_attr ' - 'from generate_series(0,6) i') - - # create timeline t6 - replica.cleanup() - - self.restore_node( - backup_dir, 'replica', replica, backup_id=A1, - options=[ - '--recovery-target=immediate', - '--recovery-target-action=promote']) - replica.slow_start() - - replica.pgbench_init(scale=2) - - sleep(5) - - show = self.show_archive(backup_dir, as_text=True) - show = self.show_archive(backup_dir) - - for instance in show: - if instance['instance'] == 'replica': - replica_timelines = instance['timelines'] - - if instance['instance'] == 'master': - master_timelines = instance['timelines'] - - # check that all timelines are ok - for timeline in replica_timelines: - self.assertTrue(timeline['status'], 'OK') - - # check that all timelines are ok - for timeline in master_timelines: - self.assertTrue(timeline['status'], 'OK') - - # create holes in t3 - wals_dir = os.path.join(backup_dir, 'wal', 'replica') - wals = [ - f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join(wals_dir, f)) - and not f.endswith('.backup') and not f.endswith('.history') and f.startswith('00000003') - ] - wals.sort() - - # check that t3 is ok - self.show_archive(backup_dir) - - file = os.path.join(backup_dir, 'wal', 'replica', '000000030000000000000017') - if self.archive_compress: - file = file + '.gz' - os.remove(file) - - file = os.path.join(backup_dir, 'wal', 'replica', '000000030000000000000012') - if self.archive_compress: - file = file + '.gz' - os.remove(file) - - file = os.path.join(backup_dir, 'wal', 'replica', '000000030000000000000013') - if self.archive_compress: - file = file + '.gz' - os.remove(file) - - # check that t3 is not OK - show = self.show_archive(backup_dir) - - show = self.show_archive(backup_dir) - - for instance in show: - if instance['instance'] == 'replica': - replica_timelines = instance['timelines'] - - # sanity - for timeline in replica_timelines: - if timeline['tli'] == 1: - timeline_1 = timeline - continue - - if timeline['tli'] == 2: - timeline_2 = timeline - continue - - if timeline['tli'] == 3: - timeline_3 = timeline - continue - - if timeline['tli'] == 4: - timeline_4 = timeline - continue - - if timeline['tli'] == 5: - timeline_5 = timeline - continue - - if timeline['tli'] == 6: - timeline_6 = timeline - continue - - self.assertEqual(timeline_6['status'], "OK") - self.assertEqual(timeline_5['status'], "OK") - self.assertEqual(timeline_4['status'], "OK") - self.assertEqual(timeline_3['status'], "DEGRADED") - self.assertEqual(timeline_2['status'], "OK") - self.assertEqual(timeline_1['status'], "OK") - - self.assertEqual(len(timeline_3['lost-segments']), 2) - self.assertEqual( - timeline_3['lost-segments'][0]['begin-segno'], - '000000030000000000000012') - self.assertEqual( - timeline_3['lost-segments'][0]['end-segno'], - '000000030000000000000013') - self.assertEqual( - timeline_3['lost-segments'][1]['begin-segno'], - '000000030000000000000017') - self.assertEqual( - timeline_3['lost-segments'][1]['end-segno'], - '000000030000000000000017') - - self.assertEqual(len(timeline_6['backups']), 0) - self.assertEqual(len(timeline_5['backups']), 0) - self.assertEqual(len(timeline_4['backups']), 0) - self.assertEqual(len(timeline_3['backups']), 3) - self.assertEqual(len(timeline_2['backups']), 2) - self.assertEqual(len(timeline_1['backups']), 2) - - # check closest backup correctness - self.assertEqual(timeline_6['closest-backup-id'], A1) - self.assertEqual(timeline_5['closest-backup-id'], B2) - self.assertEqual(timeline_4['closest-backup-id'], B2) - self.assertEqual(timeline_3['closest-backup-id'], A1) - self.assertEqual(timeline_2['closest-backup-id'], Y2) - - # check parent tli correctness - self.assertEqual(timeline_6['parent-tli'], 2) - self.assertEqual(timeline_5['parent-tli'], 4) - self.assertEqual(timeline_4['parent-tli'], 3) - self.assertEqual(timeline_3['parent-tli'], 2) - self.assertEqual(timeline_2['parent-tli'], 1) - self.assertEqual(timeline_1['parent-tli'], 0) - - # @unittest.expectedFailure - # @unittest.skip("skip") - def test_archive_catalog_1(self): - """ - double segment - compressed and not - """ - if not self.archive_compress: - self.skipTest('You need to enable ARCHIVE_COMPRESSION ' - 'for this test to run') - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'archive_timeout': '30s', - 'checkpoint_timeout': '30s'}) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node, compress=True) - - node.slow_start() - - # FULL - self.backup_node(backup_dir, 'node', node) - node.pgbench_init(scale=2) - - wals_dir = os.path.join(backup_dir, 'wal', 'node') - original_file = os.path.join(wals_dir, '000000010000000000000001.gz') - tmp_file = os.path.join(wals_dir, '000000010000000000000001') - - with gzip.open(original_file, 'rb') as f_in, open(tmp_file, 'wb') as f_out: - shutil.copyfileobj(f_in, f_out) - - os.rename( - os.path.join(wals_dir, '000000010000000000000001'), - os.path.join(wals_dir, '000000010000000000000002')) - - show = self.show_archive(backup_dir) - - for instance in show: - timelines = instance['timelines'] - - # sanity - for timeline in timelines: - self.assertEqual( - timeline['min-segno'], - '000000010000000000000001') - self.assertEqual(timeline['status'], 'OK') - - # @unittest.expectedFailure - # @unittest.skip("skip") - def test_archive_catalog_2(self): - """ - double segment - compressed and not - """ - if not self.archive_compress: - self.skipTest('You need to enable ARCHIVE_COMPRESSION ' - 'for this test to run') - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'archive_timeout': '30s', - 'checkpoint_timeout': '30s'}) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node, compress=True) - - node.slow_start() - - # FULL - self.backup_node(backup_dir, 'node', node) - node.pgbench_init(scale=2) - - wals_dir = os.path.join(backup_dir, 'wal', 'node') - original_file = os.path.join(wals_dir, '000000010000000000000001.gz') - tmp_file = os.path.join(wals_dir, '000000010000000000000001') - - with gzip.open(original_file, 'rb') as f_in, open(tmp_file, 'wb') as f_out: - shutil.copyfileobj(f_in, f_out) - - os.rename( - os.path.join(wals_dir, '000000010000000000000001'), - os.path.join(wals_dir, '000000010000000000000002')) - - os.remove(original_file) - - show = self.show_archive(backup_dir) - - for instance in show: - timelines = instance['timelines'] - - # sanity - for timeline in timelines: - self.assertEqual( - timeline['min-segno'], - '000000010000000000000002') - self.assertEqual(timeline['status'], 'OK') - - # @unittest.expectedFailure - # @unittest.skip("skip") - def test_archive_options(self): - """ - check that '--archive-host', '--archive-user', '--archiver-port' - and '--restore-command' are working as expected. - """ - if not self.remote: - self.skipTest("You must enable PGPROBACKUP_SSH_REMOTE" - " for run this test") - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node, compress=True) - - node.slow_start() - - # FULL - self.backup_node(backup_dir, 'node', node) - node.pgbench_init(scale=1) - - node.cleanup() - - wal_dir = os.path.join(backup_dir, 'wal', 'node') - self.restore_node( - backup_dir, 'node', node, - options=[ - '--restore-command="cp {0}/%f %p"'.format(wal_dir), - '--archive-host=localhost', - '--archive-port=22', - '--archive-user={0}'.format(self.user) - ]) - - if self.get_version(node) >= self.version_to_num('12.0'): - recovery_conf = os.path.join(node.data_dir, 'postgresql.auto.conf') - else: - recovery_conf = os.path.join(node.data_dir, 'recovery.conf') - - with open(recovery_conf, 'r') as f: - recovery_content = f.read() - - self.assertIn( - 'restore_command = \'"cp {0}/%f %p"\''.format(wal_dir), - recovery_content) - - node.cleanup() - - self.restore_node( - backup_dir, 'node', node, - options=[ - '--archive-host=localhost', - '--archive-port=22', - '--archive-user={0}'.format(self.user)]) - - with open(recovery_conf, 'r') as f: - recovery_content = f.read() - - self.assertIn( - "restore_command = '\"{0}\" archive-get -B \"{1}\" --instance \"{2}\" " - "--wal-file-path=%p --wal-file-name=%f --remote-host=localhost " - "--remote-port=22 --remote-user={3}'".format( - self.probackup_path, backup_dir, 'node', self.user), - recovery_content) - - node.slow_start() - - node.safe_psql( - 'postgres', - 'select 1') - - # @unittest.expectedFailure - # @unittest.skip("skip") - def test_archive_options_1(self): - """ - check that '--archive-host', '--archive-user', '--archiver-port' - and '--restore-command' are working as expected with set-config - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node, compress=True) - - node.slow_start() - - # FULL - self.backup_node(backup_dir, 'node', node) - node.pgbench_init(scale=1) - - node.cleanup() - - wal_dir = os.path.join(backup_dir, 'wal', 'node') - self.set_config( - backup_dir, 'node', - options=[ - '--restore-command="cp {0}/%f %p"'.format(wal_dir), - '--archive-host=localhost', - '--archive-port=22', - '--archive-user={0}'.format(self.user)]) - self.restore_node(backup_dir, 'node', node) - - if self.get_version(node) >= self.version_to_num('12.0'): - recovery_conf = os.path.join(node.data_dir, 'postgresql.auto.conf') - else: - recovery_conf = os.path.join(node.data_dir, 'recovery.conf') - - with open(recovery_conf, 'r') as f: - recovery_content = f.read() - - self.assertIn( - 'restore_command = \'"cp {0}/%f %p"\''.format(wal_dir), - recovery_content) - - node.cleanup() - - self.restore_node( - backup_dir, 'node', node, - options=[ - '--restore-command=none', - '--archive-host=localhost1', - '--archive-port=23', - '--archive-user={0}'.format(self.user) - ]) - - with open(recovery_conf, 'r') as f: - recovery_content = f.read() - - self.assertIn( - "restore_command = '\"{0}\" archive-get -B \"{1}\" --instance \"{2}\" " - "--wal-file-path=%p --wal-file-name=%f --remote-host=localhost1 " - "--remote-port=23 --remote-user={3}'".format( - self.probackup_path, backup_dir, 'node', self.user), - recovery_content) - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_undefined_wal_file_path(self): - """ - check that archive-push works correct with undefined - --wal-file-path - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - if os.name == 'posix': - archive_command = '\"{0}\" archive-push -B \"{1}\" --instance \"{2}\" --wal-file-name=%f'.format( - self.probackup_path, backup_dir, 'node') - elif os.name == 'nt': - archive_command = '\"{0}\" archive-push -B \"{1}\" --instance \"{2}\" --wal-file-name=%f'.format( - self.probackup_path, backup_dir, 'node').replace("\\","\\\\") - else: - self.assertTrue(False, 'Unexpected os family') - - self.set_auto_conf( - node, - {'archive_command': archive_command}) - - node.slow_start() - node.safe_psql( - "postgres", - "create table t_heap as select i" - " as id from generate_series(0, 10) i") - self.switch_wal_segment(node) - - # check - self.assertEqual(self.show_archive(backup_dir, instance='node', tli=1)['min-segno'], '000000010000000000000001') - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_intermediate_archiving(self): - """ - check that archive-push works correct with --wal-file-path setting by user - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - node_pg_options = {} - if node.major_version >= 13: - node_pg_options['wal_keep_size'] = '0MB' - else: - node_pg_options['wal_keep_segments'] = '0' - self.set_auto_conf(node, node_pg_options) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - - wal_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'intermediate_dir') - shutil.rmtree(wal_dir, ignore_errors=True) - os.makedirs(wal_dir) - if os.name == 'posix': - self.set_archiving(backup_dir, 'node', node, custom_archive_command='cp -v %p {0}/%f'.format(wal_dir)) - elif os.name == 'nt': - self.set_archiving(backup_dir, 'node', node, custom_archive_command='copy /Y "%p" "{0}\\\\%f"'.format(wal_dir.replace("\\","\\\\"))) - else: - self.assertTrue(False, 'Unexpected os family') - - node.slow_start() - node.safe_psql( - "postgres", - "create table t_heap as select i" - " as id from generate_series(0, 10) i") - self.switch_wal_segment(node) - - wal_segment = '000000010000000000000001' - - self.run_pb(["archive-push", "-B", backup_dir, - "--instance=node", "-D", node.data_dir, - "--wal-file-path", "{0}/{1}".format(wal_dir, wal_segment), "--wal-file-name", wal_segment]) - - self.assertEqual(self.show_archive(backup_dir, instance='node', tli=1)['min-segno'], wal_segment) - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_waldir_outside_pgdata_archiving(self): - """ - check that archive-push works correct with symlinked waldir - """ - if self.pg_config_version < self.version_to_num('10.0'): - self.skipTest( - 'Skipped because waldir outside pgdata is supported since PG 10') - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - external_wal_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'ext_wal_dir') - shutil.rmtree(external_wal_dir, ignore_errors=True) - - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums', '--waldir={0}'.format(external_wal_dir)]) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - - node.slow_start() - node.safe_psql( - "postgres", - "create table t_heap as select i" - " as id from generate_series(0, 10) i") - self.switch_wal_segment(node) - - # check - self.assertEqual(self.show_archive(backup_dir, instance='node', tli=1)['min-segno'], '000000010000000000000001') - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_hexadecimal_timeline(self): - """ - Check that timelines are correct. - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node, log_level='verbose') - node.slow_start() - - backup_id = self.backup_node(backup_dir, 'node', node) - node.pgbench_init(scale=2) - - # create timelines - for i in range(1, 13): - # print(i) - node.cleanup() - self.restore_node( - backup_dir, 'node', node, - options=['--recovery-target-timeline={0}'.format(i)]) - node.slow_start() - node.pgbench_init(scale=2) - - sleep(5) - - show = self.show_archive(backup_dir) - - timelines = show[0]['timelines'] - - print(timelines[0]) - - tli13 = timelines[0] - - self.assertEqual( - 13, - tli13['tli']) - - self.assertEqual( - 12, - tli13['parent-tli']) - - self.assertEqual( - backup_id, - tli13['closest-backup-id']) - - self.assertEqual( - '0000000D000000000000001C', - tli13['max-segno']) - - @unittest.skip("skip") - # @unittest.expectedFailure - def test_archiving_and_slots(self): - """ - Check that archiving don`t break slot - guarantee. - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'checkpoint_timeout': '30s', - 'max_wal_size': '64MB'}) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node, log_level='verbose') - node.slow_start() - - if self.get_version(node) < 100000: - pg_receivexlog_path = self.get_bin_path('pg_receivexlog') - else: - pg_receivexlog_path = self.get_bin_path('pg_receivewal') - - # "pg_receivewal --create-slot --slot archive_slot --if-not-exists " - # "&& pg_receivewal --synchronous -Z 1 /tmp/wal --slot archive_slot --no-loop" - - self.run_binary( - [ - pg_receivexlog_path, '-p', str(node.port), '--synchronous', - '--create-slot', '--slot', 'archive_slot', '--if-not-exists' - ]) - - node.pgbench_init(scale=10) - - pg_receivexlog = self.run_binary( - [ - pg_receivexlog_path, '-p', str(node.port), '--synchronous', - '-D', os.path.join(backup_dir, 'wal', 'node'), - '--no-loop', '--slot', 'archive_slot', - '-Z', '1' - ], asynchronous=True) - - if pg_receivexlog.returncode: - self.assertFalse( - True, - 'Failed to start pg_receivexlog: {0}'.format( - pg_receivexlog.communicate()[1])) - - sleep(2) - - pg_receivexlog.kill() - - backup_id = self.backup_node(backup_dir, 'node', node) - node.pgbench_init(scale=20) - - exit(1) - - def test_archive_push_sanity(self): - """""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'archive_mode': 'on', - 'archive_command': 'exit 1'}) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - - node.slow_start() - - node.pgbench_init(scale=50) - node.stop() - - self.set_archiving(backup_dir, 'node', node) - os.remove(os.path.join(node.logs_dir, 'postgresql.log')) - node.slow_start() - - self.backup_node(backup_dir, 'node', node) - - with open(os.path.join(node.logs_dir, 'postgresql.log'), 'r') as f: - postgres_log_content = f.read() - - # print(postgres_log_content) - # make sure that .backup file is not compressed - self.assertNotIn('.backup.gz', postgres_log_content) - self.assertNotIn('WARNING', postgres_log_content) - - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) - replica.cleanup() - - self.restore_node( - backup_dir, 'node', replica, - data_dir=replica.data_dir, options=['-R']) - - # self.set_archiving(backup_dir, 'replica', replica, replica=True) - self.set_auto_conf(replica, {'port': replica.port}) - self.set_auto_conf(replica, {'archive_mode': 'always'}) - self.set_auto_conf(replica, {'hot_standby': 'on'}) - replica.slow_start(replica=True) - - self.wait_until_replica_catch_with_master(node, replica) - - node.pgbench_init(scale=5) - - replica.promote() - replica.pgbench_init(scale=10) - - with open(os.path.join(replica.logs_dir, 'postgresql.log'), 'r') as f: - replica_log_content = f.read() - - # make sure that .partial file is not compressed - self.assertNotIn('.partial.gz', replica_log_content) - # make sure that .history file is not compressed - self.assertNotIn('.history.gz', replica_log_content) - self.assertNotIn('WARNING', replica_log_content) - - output = self.show_archive( - backup_dir, 'node', as_json=False, as_text=True, - options=['--log-level-console=INFO']) - - self.assertNotIn('WARNING', output) - - # @unittest.expectedFailure - # @unittest.skip("skip") - def test_archive_pg_receivexlog_partial_handling(self): - """check that archive-get delivers .partial and .gz.partial files""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - if self.get_version(node) < self.version_to_num('9.6.0'): - self.skipTest( - 'Skipped because backup from replica is not supported in PG 9.5') - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - - node.slow_start() - - if self.get_version(node) < 100000: - app_name = 'pg_receivexlog' - pg_receivexlog_path = self.get_bin_path('pg_receivexlog') - else: - app_name = 'pg_receivewal' - pg_receivexlog_path = self.get_bin_path('pg_receivewal') - - cmdline = [ - pg_receivexlog_path, '-p', str(node.port), '--synchronous', - '-D', os.path.join(backup_dir, 'wal', 'node')] - - if self.archive_compress and node.major_version >= 10: - cmdline += ['-Z', '1'] - - env = self.test_env - env["PGAPPNAME"] = app_name - pg_receivexlog = self.run_binary(cmdline, asynchronous=True, env=env) - - if pg_receivexlog.returncode: - self.assertFalse( - True, - 'Failed to start pg_receivexlog: {0}'.format( - pg_receivexlog.communicate()[1])) - - self.set_auto_conf(node, {'synchronous_standby_names': app_name}) - self.set_auto_conf(node, {'synchronous_commit': 'on'}) - node.reload() - - # FULL - self.backup_node(backup_dir, 'node', node, options=['--stream']) - - node.safe_psql( - "postgres", - "create table t_heap as select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,1000000) i") - - # PAGE - self.backup_node( - backup_dir, 'node', node, backup_type='page', options=['--stream']) - - node.safe_psql( - "postgres", - "insert into t_heap select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(1000000,2000000) i") - - pg_receivexlog.kill() - - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - node_restored.cleanup() - - self.restore_node( - backup_dir, 'node', node_restored, node_restored.data_dir, - options=['--recovery-target=latest', '--recovery-target-action=promote']) - self.set_auto_conf(node_restored, {'port': node_restored.port}) - self.set_auto_conf(node_restored, {'hot_standby': 'off'}) - - node_restored.slow_start() - - result = node.safe_psql( - "postgres", - "select sum(id) from t_heap").decode('utf-8').rstrip() - - result_new = node_restored.safe_psql( - "postgres", - "select sum(id) from t_heap").decode('utf-8').rstrip() - - self.assertEqual(result, result_new) - - @unittest.skip("skip") - def test_multi_timeline_recovery_prefetching(self): - """""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - - node.slow_start() - - self.backup_node(backup_dir, 'node', node) - - node.pgbench_init(scale=50) - - target_xid = node.safe_psql( - 'postgres', - 'select txid_current()').rstrip() - - node.pgbench_init(scale=20) - - node.stop() - node.cleanup() - - self.restore_node( - backup_dir, 'node', node, - options=[ - '--recovery-target-xid={0}'.format(target_xid), - '--recovery-target-action=promote']) - - node.slow_start() - - node.pgbench_init(scale=20) - - target_xid = node.safe_psql( - 'postgres', - 'select txid_current()').rstrip() - - node.stop(['-m', 'immediate', '-D', node.data_dir]) - node.cleanup() - - self.restore_node( - backup_dir, 'node', node, - options=[ -# '--recovery-target-xid={0}'.format(target_xid), - '--recovery-target-timeline=2', -# '--recovery-target-action=promote', - '--no-validate']) - node.slow_start() - - node.pgbench_init(scale=20) - result = node.safe_psql( - 'postgres', - 'select * from pgbench_accounts') - node.stop() - node.cleanup() - - self.restore_node( - backup_dir, 'node', node, - options=[ -# '--recovery-target-xid=100500', - '--recovery-target-timeline=3', -# '--recovery-target-action=promote', - '--no-validate']) - os.remove(os.path.join(node.logs_dir, 'postgresql.log')) - - restore_command = self.get_restore_command(backup_dir, 'node', node) - restore_command += ' -j 2 --batch-size=10 --log-level-console=VERBOSE' - - if node.major_version >= 12: - node.append_conf( - 'postgresql.auto.conf', "restore_command = '{0}'".format(restore_command)) - else: - node.append_conf( - 'recovery.conf', "restore_command = '{0}'".format(restore_command)) - - node.slow_start() - - result_new = node.safe_psql( - 'postgres', - 'select * from pgbench_accounts') - - self.assertEqual(result, result_new) - - with open(os.path.join(node.logs_dir, 'postgresql.log'), 'r') as f: - postgres_log_content = f.read() - - # check that requesting of non-existing segment do not - # throwns aways prefetch - self.assertIn( - 'pg_probackup archive-get failed to ' - 'deliver WAL file: 000000030000000000000006', - postgres_log_content) - - self.assertIn( - 'pg_probackup archive-get failed to ' - 'deliver WAL file: 000000020000000000000006', - postgres_log_content) - - self.assertIn( - 'pg_probackup archive-get used prefetched ' - 'WAL segment 000000010000000000000006, prefetch state: 5/10', - postgres_log_content) - - def test_archive_get_batching_sanity(self): - """ - Make sure that batching works. - .gz file is corrupted and uncompressed is not, check that both - corruption detected and uncompressed file is used. - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - if self.get_version(node) < self.version_to_num('9.6.0'): - self.skipTest( - 'Skipped because backup from replica is not supported in PG 9.5') - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - - node.slow_start() - - self.backup_node(backup_dir, 'node', node, options=['--stream']) - - node.pgbench_init(scale=50) - - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) - replica.cleanup() - - self.restore_node( - backup_dir, 'node', replica, replica.data_dir) - self.set_replica(node, replica, log_shipping=True) - - if node.major_version >= 12: - self.set_auto_conf(replica, {'restore_command': 'exit 1'}) - else: - replica.append_conf('recovery.conf', "restore_command = 'exit 1'") - - replica.slow_start(replica=True) - - # at this point replica is consistent - restore_command = self.get_restore_command(backup_dir, 'node', replica) - - restore_command += ' -j 2 --batch-size=10' - - # print(restore_command) - - if node.major_version >= 12: - self.set_auto_conf(replica, {'restore_command': restore_command}) - else: - replica.append_conf( - 'recovery.conf', "restore_command = '{0}'".format(restore_command)) - - replica.restart() - - sleep(5) - - with open(os.path.join(replica.logs_dir, 'postgresql.log'), 'r') as f: - postgres_log_content = f.read() - - self.assertIn( - 'pg_probackup archive-get completed successfully, fetched: 10/10', - postgres_log_content) - self.assertIn('used prefetched WAL segment', postgres_log_content) - self.assertIn('prefetch state: 9/10', postgres_log_content) - self.assertIn('prefetch state: 8/10', postgres_log_content) - - def test_archive_get_prefetch_corruption(self): - """ - Make sure that WAL corruption is detected. - And --prefetch-dir is honored. - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - - node.slow_start() - - self.backup_node(backup_dir, 'node', node, options=['--stream']) - - node.pgbench_init(scale=50) - - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) - replica.cleanup() - - self.restore_node( - backup_dir, 'node', replica, replica.data_dir) - self.set_replica(node, replica, log_shipping=True) - - if node.major_version >= 12: - self.set_auto_conf(replica, {'restore_command': 'exit 1'}) - else: - replica.append_conf('recovery.conf', "restore_command = 'exit 1'") - - replica.slow_start(replica=True) - - # at this point replica is consistent - restore_command = self.get_restore_command(backup_dir, 'node', replica) - - restore_command += ' -j5 --batch-size=10 --log-level-console=VERBOSE' - #restore_command += ' --batch-size=2 --log-level-console=VERBOSE' - - if node.major_version >= 12: - self.set_auto_conf(replica, {'restore_command': restore_command}) - else: - replica.append_conf( - 'recovery.conf', "restore_command = '{0}'".format(restore_command)) - - replica.restart() - - sleep(5) - - with open(os.path.join(replica.logs_dir, 'postgresql.log'), 'r') as f: - postgres_log_content = f.read() - - self.assertIn( - 'pg_probackup archive-get completed successfully, fetched: 10/10', - postgres_log_content) - self.assertIn('used prefetched WAL segment', postgres_log_content) - self.assertIn('prefetch state: 9/10', postgres_log_content) - self.assertIn('prefetch state: 8/10', postgres_log_content) - - replica.stop() - - # generate WAL, copy it into prefetch directory, then corrupt - # some segment - node.pgbench_init(scale=20) - sleep(20) - - # now copy WAL files into prefetch directory and corrupt some of them - archive_dir = os.path.join(backup_dir, 'wal', 'node') - files = os.listdir(archive_dir) - files.sort() - - for filename in [files[-4], files[-3], files[-2], files[-1]]: - src_file = os.path.join(archive_dir, filename) - - if node.major_version >= 10: - wal_dir = 'pg_wal' - else: - wal_dir = 'pg_xlog' - - if filename.endswith('.gz'): - dst_file = os.path.join(replica.data_dir, wal_dir, 'pbk_prefetch', filename[:-3]) - with gzip.open(src_file, 'rb') as f_in, open(dst_file, 'wb') as f_out: - shutil.copyfileobj(f_in, f_out) - else: - dst_file = os.path.join(replica.data_dir, wal_dir, 'pbk_prefetch', filename) - shutil.copyfile(src_file, dst_file) - - # print(dst_file) - - # corrupt file - if files[-2].endswith('.gz'): - filename = files[-2][:-3] - else: - filename = files[-2] - - prefetched_file = os.path.join(replica.data_dir, wal_dir, 'pbk_prefetch', filename) - - with open(prefetched_file, "rb+", 0) as f: - f.seek(8192*2) - f.write(b"SURIKEN") - f.flush() - f.close - - # enable restore_command - restore_command = self.get_restore_command(backup_dir, 'node', replica) - restore_command += ' --batch-size=2 --log-level-console=VERBOSE' - - if node.major_version >= 12: - self.set_auto_conf(replica, {'restore_command': restore_command}) - else: - replica.append_conf( - 'recovery.conf', "restore_command = '{0}'".format(restore_command)) - - os.remove(os.path.join(replica.logs_dir, 'postgresql.log')) - replica.slow_start(replica=True) - - sleep(60) - - with open(os.path.join(replica.logs_dir, 'postgresql.log'), 'r') as f: - postgres_log_content = f.read() - - self.assertIn( - 'Prefetched WAL segment {0} is invalid, cannot use it'.format(filename), - postgres_log_content) - - self.assertIn( - 'LOG: restored log file "{0}" from archive'.format(filename), - postgres_log_content) - - # @unittest.skip("skip") - def test_archive_show_partial_files_handling(self): - """ - check that files with '.part', '.part.gz', '.partial' and '.partial.gz' - siffixes are handled correctly - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node, compress=False) - - node.slow_start() - - self.backup_node(backup_dir, 'node', node) - - wals_dir = os.path.join(backup_dir, 'wal', 'node') - - # .part file - node.safe_psql( - "postgres", - "create table t1()") - - if self.get_version(node) < 100000: - filename = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_xlogfile_name_offset(pg_current_xlog_location())").rstrip() - else: - filename = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn())").rstrip() - - filename = filename.decode('utf-8') - - self.switch_wal_segment(node) - - os.rename( - os.path.join(wals_dir, filename), - os.path.join(wals_dir, '{0}.part'.format(filename))) - - # .gz.part file - node.safe_psql( - "postgres", - "create table t2()") - - if self.get_version(node) < 100000: - filename = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_xlogfile_name_offset(pg_current_xlog_location())").rstrip() - else: - filename = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn())").rstrip() - - filename = filename.decode('utf-8') - - self.switch_wal_segment(node) - - os.rename( - os.path.join(wals_dir, filename), - os.path.join(wals_dir, '{0}.gz.part'.format(filename))) - - # .partial file - node.safe_psql( - "postgres", - "create table t3()") - - if self.get_version(node) < 100000: - filename = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_xlogfile_name_offset(pg_current_xlog_location())").rstrip() - else: - filename = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn())").rstrip() - - filename = filename.decode('utf-8') - - self.switch_wal_segment(node) - - os.rename( - os.path.join(wals_dir, filename), - os.path.join(wals_dir, '{0}.partial'.format(filename))) - - # .gz.partial file - node.safe_psql( - "postgres", - "create table t4()") - - if self.get_version(node) < 100000: - filename = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_xlogfile_name_offset(pg_current_xlog_location())").rstrip() - else: - filename = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn())").rstrip() - - filename = filename.decode('utf-8') - - self.switch_wal_segment(node) - - os.rename( - os.path.join(wals_dir, filename), - os.path.join(wals_dir, '{0}.gz.partial'.format(filename))) - - self.show_archive(backup_dir, 'node', options=['--log-level-file=VERBOSE']) - - with open(os.path.join(backup_dir, 'log', 'pg_probackup.log'), 'r') as f: - log_content = f.read() - - self.assertNotIn( - 'WARNING', - log_content) - - # @unittest.expectedFailure - # @unittest.skip("skip") - def test_archive_empty_history_file(self): - """ - https://github.com/postgrespro/pg_probackup/issues/326 - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - - node.slow_start() - node.pgbench_init(scale=5) - - # FULL - self.backup_node(backup_dir, 'node', node) - - node.pgbench_init(scale=5) - node.cleanup() - - self.restore_node( - backup_dir, 'node', node, - options=[ - '--recovery-target=latest', - '--recovery-target-action=promote']) - - # Node in timeline 2 - node.slow_start() - - node.pgbench_init(scale=5) - node.cleanup() - - self.restore_node( - backup_dir, 'node', node, - options=[ - '--recovery-target=latest', - '--recovery-target-timeline=2', - '--recovery-target-action=promote']) - - # Node in timeline 3 - node.slow_start() - - node.pgbench_init(scale=5) - node.cleanup() - - self.restore_node( - backup_dir, 'node', node, - options=[ - '--recovery-target=latest', - '--recovery-target-timeline=3', - '--recovery-target-action=promote']) - - # Node in timeline 4 - node.slow_start() - node.pgbench_init(scale=5) - - # Truncate history files - for tli in range(2, 5): - file = os.path.join( - backup_dir, 'wal', 'node', '0000000{0}.history'.format(tli)) - with open(file, "w+") as f: - f.truncate() - - timelines = self.show_archive(backup_dir, 'node', options=['--log-level-file=INFO']) - - # check that all timelines has zero switchpoint - for timeline in timelines: - self.assertEqual(timeline['switchpoint'], '0/0') - - log_file = os.path.join(backup_dir, 'log', 'pg_probackup.log') - with open(log_file, 'r') as f: - log_content = f.read() - wal_dir = os.path.join(backup_dir, 'wal', 'node') - - self.assertIn( - 'WARNING: History file is corrupted or missing: "{0}"'.format(os.path.join(wal_dir, '00000002.history')), - log_content) - self.assertIn( - 'WARNING: History file is corrupted or missing: "{0}"'.format(os.path.join(wal_dir, '00000003.history')), - log_content) - self.assertIn( - 'WARNING: History file is corrupted or missing: "{0}"'.format(os.path.join(wal_dir, '00000004.history')), - log_content) - -# TODO test with multiple not archived segments. -# TODO corrupted file in archive. - -# important - switchpoint may be NullOffset LSN and not actually existing in archive to boot. -# so write WAL validation code accordingly - -# change wal-seg-size -# -# -#t3 ---------------- -# / -#t2 ---------------- -# / -#t1 -A-------- -# -# - - -#t3 ---------------- -# / -#t2 ---------------- -# / -#t1 -A-------- -# diff --git a/tests/backup_test.py b/tests/backup_test.py deleted file mode 100644 index db7ccf5a0..000000000 --- a/tests/backup_test.py +++ /dev/null @@ -1,3564 +0,0 @@ -import unittest -import os -from time import sleep, time -from .helpers.ptrack_helpers import base36enc, ProbackupTest, ProbackupException -import shutil -from distutils.dir_util import copy_tree -from testgres import ProcessType, QueryException -import subprocess - - -class BackupTest(ProbackupTest, unittest.TestCase): - - def test_full_backup(self): - """ - Just test full backup with at least two segments - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums'], - # we need to write a lot. Lets speedup a bit. - pg_options={"fsync": "off", "synchronous_commit": "off"}) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # Fill with data - # Have to use scale=100 to create second segment. - node.pgbench_init(scale=100, no_vacuum=True) - - # FULL - backup_id = self.backup_node(backup_dir, 'node', node) - - out = self.validate_pb(backup_dir, 'node', backup_id) - self.assertIn( - "INFO: Backup {0} is valid".format(backup_id), - out) - - def test_full_backup_stream(self): - """ - Just test full backup with at least two segments in stream mode - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums'], - # we need to write a lot. Lets speedup a bit. - pg_options={"fsync": "off", "synchronous_commit": "off"}) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - # Fill with data - # Have to use scale=100 to create second segment. - node.pgbench_init(scale=100, no_vacuum=True) - - # FULL - backup_id = self.backup_node(backup_dir, 'node', node, - options=["--stream"]) - - out = self.validate_pb(backup_dir, 'node', backup_id) - self.assertIn( - "INFO: Backup {0} is valid".format(backup_id), - out) - - # @unittest.skip("skip") - # @unittest.expectedFailure - # PGPRO-707 - def test_backup_modes_archive(self): - """standart backup modes with ARCHIVE WAL method""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - full_backup_id = self.backup_node(backup_dir, 'node', node) - show_backup = self.show_pb(backup_dir, 'node')[0] - - self.assertEqual(show_backup['status'], "OK") - self.assertEqual(show_backup['backup-mode'], "FULL") - - # postmaster.pid and postmaster.opts shouldn't be copied - excluded = True - db_dir = os.path.join( - backup_dir, "backups", 'node', full_backup_id, "database") - - for f in os.listdir(db_dir): - if ( - os.path.isfile(os.path.join(db_dir, f)) and - ( - f == "postmaster.pid" or - f == "postmaster.opts" - ) - ): - excluded = False - self.assertEqual(excluded, True) - - # page backup mode - page_backup_id = self.backup_node( - backup_dir, 'node', node, backup_type="page") - - show_backup_1 = self.show_pb(backup_dir, 'node')[1] - self.assertEqual(show_backup_1['status'], "OK") - self.assertEqual(show_backup_1['backup-mode'], "PAGE") - - # delta backup mode - delta_backup_id = self.backup_node( - backup_dir, 'node', node, backup_type="delta") - - show_backup_2 = self.show_pb(backup_dir, 'node')[2] - self.assertEqual(show_backup_2['status'], "OK") - self.assertEqual(show_backup_2['backup-mode'], "DELTA") - - # Check parent backup - self.assertEqual( - full_backup_id, - self.show_pb( - backup_dir, 'node', - backup_id=show_backup_1['id'])["parent-backup-id"]) - - self.assertEqual( - page_backup_id, - self.show_pb( - backup_dir, 'node', - backup_id=show_backup_2['id'])["parent-backup-id"]) - - # @unittest.skip("skip") - def test_smooth_checkpoint(self): - """full backup with smooth checkpoint""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - self.backup_node( - backup_dir, 'node', node, - options=["-C"]) - self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK") - node.stop() - - # @unittest.skip("skip") - def test_incremental_backup_without_full(self): - """page backup without validated full backup""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - try: - self.backup_node(backup_dir, 'node', node, backup_type="page") - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because page backup should not be possible " - "without valid full backup.\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - "WARNING: Valid full backup on current timeline 1 is not found" in e.message and - "ERROR: Create new full backup before an incremental one" in e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - self.assertEqual( - self.show_pb(backup_dir, 'node')[0]['status'], - "ERROR") - - # @unittest.skip("skip") - def test_incremental_backup_corrupt_full(self): - """page-level backup with corrupted full backup""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - backup_id = self.backup_node(backup_dir, 'node', node) - file = os.path.join( - backup_dir, "backups", "node", backup_id, - "database", "postgresql.conf") - os.remove(file) - - try: - self.validate_pb(backup_dir, 'node') - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because of validation of corrupted backup.\n" - " Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - "INFO: Validate backups of the instance 'node'" in e.message and - "WARNING: Backup file" in e.message and "is not found" in e.message and - "WARNING: Backup {0} data files are corrupted".format( - backup_id) in e.message and - "WARNING: Some backups are not valid" in e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - try: - self.backup_node(backup_dir, 'node', node, backup_type="page") - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because page backup should not be possible " - "without valid full backup.\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - "WARNING: Valid full backup on current timeline 1 is not found" in e.message and - "ERROR: Create new full backup before an incremental one" in e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - self.assertEqual( - self.show_pb(backup_dir, 'node', backup_id)['status'], "CORRUPT") - self.assertEqual( - self.show_pb(backup_dir, 'node')[1]['status'], "ERROR") - - # @unittest.skip("skip") - def test_delta_threads_stream(self): - """delta multi thread backup mode and stream""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - self.backup_node( - backup_dir, 'node', node, backup_type="full", - options=["-j", "4", "--stream"]) - - self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK") - self.backup_node( - backup_dir, 'node', node, - backup_type="delta", options=["-j", "4", "--stream"]) - self.assertEqual(self.show_pb(backup_dir, 'node')[1]['status'], "OK") - - # @unittest.skip("skip") - def test_page_detect_corruption(self): - """make node, corrupt some page, check that backup failed""" - - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=self.ptrack, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - self.backup_node( - backup_dir, 'node', node, - backup_type="full", options=["-j", "4", "--stream"]) - - node.safe_psql( - "postgres", - "create table t_heap as select 1 as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,1000) i") - - node.safe_psql( - "postgres", - "CHECKPOINT") - - heap_path = node.safe_psql( - "postgres", - "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip() - - path = os.path.join(node.data_dir, heap_path) - with open(path, "rb+", 0) as f: - f.seek(9000) - f.write(b"bla") - f.flush() - f.close - - try: - self.backup_node( - backup_dir, 'node', node, backup_type="full", - options=["-j", "4", "--stream", "--log-level-file=VERBOSE"]) - self.assertEqual( - 1, 0, - "Expecting Error because data file is corrupted" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'ERROR: Corruption detected in file "{0}", ' - 'block 1: page verification failed, calculated checksum'.format(path), - e.message) - - self.assertEqual( - self.show_pb(backup_dir, 'node')[1]['status'], - 'ERROR', - "Backup Status should be ERROR") - - # @unittest.skip("skip") - def test_backup_detect_corruption(self): - """make node, corrupt some page, check that backup failed""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=self.ptrack, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - if self.ptrack: - node.safe_psql( - "postgres", - "create extension ptrack") - - self.backup_node( - backup_dir, 'node', node, - backup_type="full", options=["-j", "4", "--stream"]) - - node.safe_psql( - "postgres", - "create table t_heap as select 1 as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,10000) i") - - heap_path = node.safe_psql( - "postgres", - "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip() - - self.backup_node( - backup_dir, 'node', node, - backup_type="full", options=["-j", "4", "--stream"]) - - node.safe_psql( - "postgres", - "select count(*) from t_heap") - - node.safe_psql( - "postgres", - "update t_heap set id = id + 10000") - - node.stop() - - heap_fullpath = os.path.join(node.data_dir, heap_path) - - with open(heap_fullpath, "rb+", 0) as f: - f.seek(9000) - f.write(b"bla") - f.flush() - f.close - - node.slow_start() - - try: - self.backup_node( - backup_dir, 'node', node, - backup_type="full", options=["-j", "4", "--stream"]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because of block corruption" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Corruption detected in file "{0}", block 1: ' - 'page verification failed, calculated checksum'.format( - heap_fullpath), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - sleep(1) - - try: - self.backup_node( - backup_dir, 'node', node, - backup_type="delta", options=["-j", "4", "--stream"]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because of block corruption" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Corruption detected in file "{0}", block 1: ' - 'page verification failed, calculated checksum'.format( - heap_fullpath), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - sleep(1) - - try: - self.backup_node( - backup_dir, 'node', node, - backup_type="page", options=["-j", "4", "--stream"]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because of block corruption" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Corruption detected in file "{0}", block 1: ' - 'page verification failed, calculated checksum'.format( - heap_fullpath), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - sleep(1) - - if self.ptrack: - try: - self.backup_node( - backup_dir, 'node', node, - backup_type="ptrack", options=["-j", "4", "--stream"]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because of block corruption" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Corruption detected in file "{0}", block 1: ' - 'page verification failed, calculated checksum'.format( - heap_fullpath), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - # @unittest.skip("skip") - def test_backup_detect_invalid_block_header(self): - """make node, corrupt some page, check that backup failed""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=self.ptrack, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - if self.ptrack: - node.safe_psql( - "postgres", - "create extension ptrack") - - node.safe_psql( - "postgres", - "create table t_heap as select 1 as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,10000) i") - - heap_path = node.safe_psql( - "postgres", - "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip() - - self.backup_node( - backup_dir, 'node', node, - backup_type="full", options=["-j", "4", "--stream"]) - - node.safe_psql( - "postgres", - "select count(*) from t_heap") - - node.safe_psql( - "postgres", - "update t_heap set id = id + 10000") - - node.stop() - - heap_fullpath = os.path.join(node.data_dir, heap_path) - with open(heap_fullpath, "rb+", 0) as f: - f.seek(8193) - f.write(b"blahblahblahblah") - f.flush() - f.close - - node.slow_start() - -# self.backup_node( -# backup_dir, 'node', node, -# backup_type="full", options=["-j", "4", "--stream"]) - - try: - self.backup_node( - backup_dir, 'node', node, - backup_type="full", options=["-j", "4", "--stream"]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because of block corruption" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Corruption detected in file "{0}", block 1: ' - 'page header invalid, pd_lower'.format(heap_fullpath), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - sleep(1) - - try: - self.backup_node( - backup_dir, 'node', node, - backup_type="delta", options=["-j", "4", "--stream"]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because of block corruption" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Corruption detected in file "{0}", block 1: ' - 'page header invalid, pd_lower'.format(heap_fullpath), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - sleep(1) - - try: - self.backup_node( - backup_dir, 'node', node, - backup_type="page", options=["-j", "4", "--stream"]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because of block corruption" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Corruption detected in file "{0}", block 1: ' - 'page header invalid, pd_lower'.format(heap_fullpath), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - sleep(1) - - if self.ptrack: - try: - self.backup_node( - backup_dir, 'node', node, - backup_type="ptrack", options=["-j", "4", "--stream"]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because of block corruption" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Corruption detected in file "{0}", block 1: ' - 'page header invalid, pd_lower'.format(heap_fullpath), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - # @unittest.skip("skip") - def test_backup_detect_missing_permissions(self): - """make node, corrupt some page, check that backup failed""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=self.ptrack, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - if self.ptrack: - node.safe_psql( - "postgres", - "create extension ptrack") - - node.safe_psql( - "postgres", - "create table t_heap as select 1 as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,10000) i") - - heap_path = node.safe_psql( - "postgres", - "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip() - - self.backup_node( - backup_dir, 'node', node, - backup_type="full", options=["-j", "4", "--stream"]) - - node.safe_psql( - "postgres", - "select count(*) from t_heap") - - node.safe_psql( - "postgres", - "update t_heap set id = id + 10000") - - node.stop() - - heap_fullpath = os.path.join(node.data_dir, heap_path) - with open(heap_fullpath, "rb+", 0) as f: - f.seek(8193) - f.write(b"blahblahblahblah") - f.flush() - f.close - - node.slow_start() - -# self.backup_node( -# backup_dir, 'node', node, -# backup_type="full", options=["-j", "4", "--stream"]) - - try: - self.backup_node( - backup_dir, 'node', node, - backup_type="full", options=["-j", "4", "--stream"]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because of block corruption" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Corruption detected in file "{0}", block 1: ' - 'page header invalid, pd_lower'.format(heap_fullpath), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - sleep(1) - - try: - self.backup_node( - backup_dir, 'node', node, - backup_type="delta", options=["-j", "4", "--stream"]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because of block corruption" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Corruption detected in file "{0}", block 1: ' - 'page header invalid, pd_lower'.format(heap_fullpath), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - sleep(1) - - try: - self.backup_node( - backup_dir, 'node', node, - backup_type="page", options=["-j", "4", "--stream"]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because of block corruption" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Corruption detected in file "{0}", block 1: ' - 'page header invalid, pd_lower'.format(heap_fullpath), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - sleep(1) - - if self.ptrack: - try: - self.backup_node( - backup_dir, 'node', node, - backup_type="ptrack", options=["-j", "4", "--stream"]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because of block corruption" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Corruption detected in file "{0}", block 1: ' - 'page header invalid, pd_lower'.format(heap_fullpath), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - # @unittest.skip("skip") - def test_backup_truncate_misaligned(self): - """ - make node, truncate file to size not even to BLCKSIZE, - take backup - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - "postgres", - "create table t_heap as select 1 as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,100000) i") - - node.safe_psql( - "postgres", - "CHECKPOINT;") - - heap_path = node.safe_psql( - "postgres", - "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip() - - heap_size = node.safe_psql( - "postgres", - "select pg_relation_size('t_heap')") - - with open(os.path.join(node.data_dir, heap_path), "rb+", 0) as f: - f.truncate(int(heap_size) - 4096) - f.flush() - f.close - - output = self.backup_node( - backup_dir, 'node', node, backup_type="full", - options=["-j", "4", "--stream"], return_id=False) - - self.assertIn("WARNING: File", output) - self.assertIn("invalid file size", output) - - # @unittest.skip("skip") - def test_tablespace_in_pgdata_pgpro_1376(self): - """PGPRO-1376 """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - self.create_tblspace_in_node( - node, 'tblspace1', - tblspc_path=( - os.path.join( - node.data_dir, 'somedirectory', '100500')) - ) - - self.create_tblspace_in_node( - node, 'tblspace2', - tblspc_path=(os.path.join(node.data_dir)) - ) - - node.safe_psql( - "postgres", - "create table t_heap1 tablespace tblspace1 as select 1 as id, " - "md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,1000) i") - - node.safe_psql( - "postgres", - "create table t_heap2 tablespace tblspace2 as select 1 as id, " - "md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,1000) i") - - backup_id_1 = self.backup_node( - backup_dir, 'node', node, backup_type="full", - options=["-j", "4", "--stream"]) - - node.safe_psql( - "postgres", - "drop table t_heap2") - node.safe_psql( - "postgres", - "drop tablespace tblspace2") - - self.backup_node( - backup_dir, 'node', node, backup_type="full", - options=["-j", "4", "--stream"]) - - pgdata = self.pgdata_content(node.data_dir) - - relfilenode = node.safe_psql( - "postgres", - "select 't_heap1'::regclass::oid" - ).decode('utf-8').rstrip() - - list = [] - for root, dirs, files in os.walk(os.path.join( - backup_dir, 'backups', 'node', backup_id_1)): - for file in files: - if file == relfilenode: - path = os.path.join(root, file) - list = list + [path] - - # We expect that relfilenode can be encountered only once - if len(list) > 1: - message = "" - for string in list: - message = message + string + "\n" - self.assertEqual( - 1, 0, - "Following file copied twice by backup:\n {0}".format( - message) - ) - - node.cleanup() - - self.restore_node( - backup_dir, 'node', node, options=["-j", "4"]) - - if self.paranoia: - pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.skip("skip") - def test_basic_tablespace_handling(self): - """ - make node, take full backup, check that restore with - tablespace mapping will end with error, take page backup, - check that restore with tablespace mapping will end with - success - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type="full", - options=["-j", "4", "--stream"]) - - tblspace1_old_path = self.get_tblspace_path(node, 'tblspace1_old') - tblspace2_old_path = self.get_tblspace_path(node, 'tblspace2_old') - - self.create_tblspace_in_node( - node, 'some_lame_tablespace') - - self.create_tblspace_in_node( - node, 'tblspace1', - tblspc_path=tblspace1_old_path) - - self.create_tblspace_in_node( - node, 'tblspace2', - tblspc_path=tblspace2_old_path) - - node.safe_psql( - "postgres", - "create table t_heap_lame tablespace some_lame_tablespace " - "as select 1 as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,1000) i") - - node.safe_psql( - "postgres", - "create table t_heap2 tablespace tblspace2 as select 1 as id, " - "md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,1000) i") - - tblspace1_new_path = self.get_tblspace_path(node, 'tblspace1_new') - tblspace2_new_path = self.get_tblspace_path(node, 'tblspace2_new') - - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - node_restored.cleanup() - - try: - self.restore_node( - backup_dir, 'node', node_restored, - options=[ - "-j", "4", - "-T", "{0}={1}".format( - tblspace1_old_path, tblspace1_new_path), - "-T", "{0}={1}".format( - tblspace2_old_path, tblspace2_new_path)]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because tablespace mapping is incorrect" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Backup {0} has no tablespaceses, ' - 'nothing to remap'.format(backup_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - node.safe_psql( - "postgres", - "drop table t_heap_lame") - - node.safe_psql( - "postgres", - "drop tablespace some_lame_tablespace") - - self.backup_node( - backup_dir, 'node', node, backup_type="delta", - options=["-j", "4", "--stream"]) - - self.restore_node( - backup_dir, 'node', node_restored, - options=[ - "-j", "4", - "-T", "{0}={1}".format( - tblspace1_old_path, tblspace1_new_path), - "-T", "{0}={1}".format( - tblspace2_old_path, tblspace2_new_path)]) - - if self.paranoia: - pgdata = self.pgdata_content(node.data_dir) - - if self.paranoia: - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.skip("skip") - def test_tablespace_handling_1(self): - """ - make node with tablespace A, take full backup, check that restore with - tablespace mapping of tablespace B will end with error - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - tblspace1_old_path = self.get_tblspace_path(node, 'tblspace1_old') - tblspace2_old_path = self.get_tblspace_path(node, 'tblspace2_old') - - tblspace_new_path = self.get_tblspace_path(node, 'tblspace_new') - - self.create_tblspace_in_node( - node, 'tblspace1', - tblspc_path=tblspace1_old_path) - - self.backup_node( - backup_dir, 'node', node, backup_type="full", - options=["-j", "4", "--stream"]) - - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - node_restored.cleanup() - - try: - self.restore_node( - backup_dir, 'node', node_restored, - options=[ - "-j", "4", - "-T", "{0}={1}".format( - tblspace2_old_path, tblspace_new_path)]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because tablespace mapping is incorrect" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'ERROR: --tablespace-mapping option' in e.message and - 'have an entry in tablespace_map file' in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - # @unittest.skip("skip") - def test_tablespace_handling_2(self): - """ - make node without tablespaces, take full backup, check that restore with - tablespace mapping will end with error - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - tblspace1_old_path = self.get_tblspace_path(node, 'tblspace1_old') - tblspace_new_path = self.get_tblspace_path(node, 'tblspace_new') - - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type="full", - options=["-j", "4", "--stream"]) - - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - node_restored.cleanup() - - try: - self.restore_node( - backup_dir, 'node', node_restored, - options=[ - "-j", "4", - "-T", "{0}={1}".format( - tblspace1_old_path, tblspace_new_path)]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because tablespace mapping is incorrect" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Backup {0} has no tablespaceses, ' - 'nothing to remap'.format(backup_id), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - # @unittest.skip("skip") - def test_drop_rel_during_full_backup(self): - """""" - self._check_gdb_flag_or_skip_test() - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - for i in range(1, 512): - node.safe_psql( - "postgres", - "create table t_heap_{0} as select i" - " as id from generate_series(0,100) i".format(i)) - - node.safe_psql( - "postgres", - "VACUUM") - - node.pgbench_init(scale=10) - - relative_path_1 = node.safe_psql( - "postgres", - "select pg_relation_filepath('t_heap_1')").decode('utf-8').rstrip() - - relative_path_2 = node.safe_psql( - "postgres", - "select pg_relation_filepath('t_heap_1')").decode('utf-8').rstrip() - - absolute_path_1 = os.path.join(node.data_dir, relative_path_1) - absolute_path_2 = os.path.join(node.data_dir, relative_path_2) - - # FULL backup - gdb = self.backup_node( - backup_dir, 'node', node, - options=['--stream', '--log-level-file=LOG', '--log-level-console=LOG', '--progress'], - gdb=True) - - gdb.set_breakpoint('backup_files') - gdb.run_until_break() - - # REMOVE file - for i in range(1, 512): - node.safe_psql( - "postgres", - "drop table t_heap_{0}".format(i)) - - node.safe_psql( - "postgres", - "CHECKPOINT") - - node.safe_psql( - "postgres", - "CHECKPOINT") - - # File removed, we can proceed with backup - gdb.continue_execution_until_exit() - - pgdata = self.pgdata_content(node.data_dir) - - #with open(os.path.join(backup_dir, 'log', 'pg_probackup.log')) as f: - # log_content = f.read() - # self.assertTrue( - # 'LOG: File "{0}" is not found'.format(absolute_path) in log_content, - # 'File "{0}" should be deleted but it`s not'.format(absolute_path)) - - node.cleanup() - self.restore_node(backup_dir, 'node', node) - - # Physical comparison - pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - @unittest.skip("skip") - def test_drop_db_during_full_backup(self): - """""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - for i in range(1, 2): - node.safe_psql( - "postgres", - "create database t_heap_{0}".format(i)) - - node.safe_psql( - "postgres", - "VACUUM") - - # FULL backup - gdb = self.backup_node( - backup_dir, 'node', node, gdb=True, - options=[ - '--stream', '--log-level-file=LOG', - '--log-level-console=LOG', '--progress']) - - gdb.set_breakpoint('backup_files') - gdb.run_until_break() - - # REMOVE file - for i in range(1, 2): - node.safe_psql( - "postgres", - "drop database t_heap_{0}".format(i)) - - node.safe_psql( - "postgres", - "CHECKPOINT") - - node.safe_psql( - "postgres", - "CHECKPOINT") - - # File removed, we can proceed with backup - gdb.continue_execution_until_exit() - - pgdata = self.pgdata_content(node.data_dir) - - #with open(os.path.join(backup_dir, 'log', 'pg_probackup.log')) as f: - # log_content = f.read() - # self.assertTrue( - # 'LOG: File "{0}" is not found'.format(absolute_path) in log_content, - # 'File "{0}" should be deleted but it`s not'.format(absolute_path)) - - node.cleanup() - self.restore_node(backup_dir, 'node', node) - - # Physical comparison - pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.skip("skip") - def test_drop_rel_during_backup_delta(self): - """""" - self._check_gdb_flag_or_skip_test() - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.pgbench_init(scale=10) - - node.safe_psql( - "postgres", - "create table t_heap as select i" - " as id from generate_series(0,100) i") - - relative_path = node.safe_psql( - "postgres", - "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip() - - absolute_path = os.path.join(node.data_dir, relative_path) - - # FULL backup - self.backup_node(backup_dir, 'node', node, options=['--stream']) - - # DELTA backup - gdb = self.backup_node( - backup_dir, 'node', node, backup_type='delta', - gdb=True, options=['--log-level-file=LOG']) - - gdb.set_breakpoint('backup_files') - gdb.run_until_break() - - # REMOVE file - node.safe_psql( - "postgres", - "DROP TABLE t_heap") - - node.safe_psql( - "postgres", - "CHECKPOINT") - - # File removed, we can proceed with backup - gdb.continue_execution_until_exit() - - pgdata = self.pgdata_content(node.data_dir) - - with open(os.path.join(backup_dir, 'log', 'pg_probackup.log')) as f: - log_content = f.read() - self.assertTrue( - 'LOG: File not found: "{0}"'.format(absolute_path) in log_content, - 'File "{0}" should be deleted but it`s not'.format(absolute_path)) - - node.cleanup() - self.restore_node(backup_dir, 'node', node, options=["-j", "4"]) - - # Physical comparison - pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.skip("skip") - def test_drop_rel_during_backup_page(self): - """""" - self._check_gdb_flag_or_skip_test() - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - "postgres", - "create table t_heap as select i" - " as id from generate_series(0,100) i") - - relative_path = node.safe_psql( - "postgres", - "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip() - - absolute_path = os.path.join(node.data_dir, relative_path) - - # FULL backup - self.backup_node(backup_dir, 'node', node, options=['--stream']) - - node.safe_psql( - "postgres", - "insert into t_heap select i" - " as id from generate_series(101,102) i") - - # PAGE backup - gdb = self.backup_node( - backup_dir, 'node', node, backup_type='page', - gdb=True, options=['--log-level-file=LOG']) - - gdb.set_breakpoint('backup_files') - gdb.run_until_break() - - # REMOVE file - os.remove(absolute_path) - - # File removed, we can proceed with backup - gdb.continue_execution_until_exit() - gdb.kill() - - pgdata = self.pgdata_content(node.data_dir) - - backup_id = self.show_pb(backup_dir, 'node')[1]['id'] - - filelist = self.get_backup_filelist(backup_dir, 'node', backup_id) - self.assertNotIn(relative_path, filelist) - - node.cleanup() - self.restore_node(backup_dir, 'node', node, options=["-j", "4"]) - - # Physical comparison - pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.skip("skip") - def test_persistent_slot_for_stream_backup(self): - """""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'max_wal_size': '40MB'}) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - "postgres", - "SELECT pg_create_physical_replication_slot('slot_1')") - - # FULL backup - self.backup_node( - backup_dir, 'node', node, - options=['--stream', '--slot=slot_1']) - - # FULL backup - self.backup_node( - backup_dir, 'node', node, - options=['--stream', '--slot=slot_1']) - - # @unittest.skip("skip") - def test_basic_temp_slot_for_stream_backup(self): - """""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={'max_wal_size': '40MB'}) - - if self.get_version(node) < self.version_to_num('10.0'): - self.skipTest('You need PostgreSQL >= 10 for this test') - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # FULL backup - self.backup_node( - backup_dir, 'node', node, - options=['--stream', '--temp-slot']) - - # FULL backup - self.backup_node( - backup_dir, 'node', node, - options=['--stream', '--slot=slot_1', '--temp-slot']) - - # @unittest.skip("skip") - def test_backup_concurrent_drop_table(self): - """""" - self._check_gdb_flag_or_skip_test() - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.pgbench_init(scale=1) - - # FULL backup - gdb = self.backup_node( - backup_dir, 'node', node, - options=['--stream', '--compress'], - gdb=True) - - gdb.set_breakpoint('backup_data_file') - gdb.run_until_break() - - node.safe_psql( - 'postgres', - 'DROP TABLE pgbench_accounts') - - # do checkpoint to guarantee filenode removal - node.safe_psql( - 'postgres', - 'CHECKPOINT') - - gdb.remove_all_breakpoints() - gdb.continue_execution_until_exit() - gdb.kill() - - show_backup = self.show_pb(backup_dir, 'node')[0] - - self.assertEqual(show_backup['status'], "OK") - - # @unittest.skip("skip") - def test_pg_11_adjusted_wal_segment_size(self): - """""" - if self.pg_config_version < self.version_to_num('11.0'): - self.skipTest('You need PostgreSQL >= 11 for this test') - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=[ - '--data-checksums', - '--wal-segsize=64'], - pg_options={ - 'min_wal_size': '128MB'}) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.pgbench_init(scale=5) - - # FULL STREAM backup - self.backup_node( - backup_dir, 'node', node, options=['--stream']) - - pgbench = node.pgbench(options=['-T', '5', '-c', '2']) - pgbench.wait() - - # PAGE STREAM backup - self.backup_node( - backup_dir, 'node', node, - backup_type='page', options=['--stream']) - - pgbench = node.pgbench(options=['-T', '5', '-c', '2']) - pgbench.wait() - - # DELTA STREAM backup - self.backup_node( - backup_dir, 'node', node, - backup_type='delta', options=['--stream']) - - pgbench = node.pgbench(options=['-T', '5', '-c', '2']) - pgbench.wait() - - # FULL ARCHIVE backup - self.backup_node(backup_dir, 'node', node) - - pgbench = node.pgbench(options=['-T', '5', '-c', '2']) - pgbench.wait() - - # PAGE ARCHIVE backup - self.backup_node(backup_dir, 'node', node, backup_type='page') - - pgbench = node.pgbench(options=['-T', '5', '-c', '2']) - pgbench.wait() - - # DELTA ARCHIVE backup - backup_id = self.backup_node(backup_dir, 'node', node, backup_type='delta') - pgdata = self.pgdata_content(node.data_dir) - - # delete - output = self.delete_pb( - backup_dir, 'node', - options=[ - '--expired', - '--delete-wal', - '--retention-redundancy=1']) - - # validate - self.validate_pb(backup_dir) - - # merge - self.merge_backup(backup_dir, 'node', backup_id=backup_id) - - # restore - node.cleanup() - self.restore_node( - backup_dir, 'node', node, backup_id=backup_id) - - pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.skip("skip") - def test_sigint_handling(self): - """""" - self._check_gdb_flag_or_skip_test() - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - # FULL backup - gdb = self.backup_node( - backup_dir, 'node', node, gdb=True, - options=['--stream', '--log-level-file=LOG']) - - gdb.set_breakpoint('backup_non_data_file') - gdb.run_until_break() - - gdb.continue_execution_until_break(20) - gdb.remove_all_breakpoints() - - gdb._execute('signal SIGINT') - gdb.continue_execution_until_error() - gdb.kill() - - backup_id = self.show_pb(backup_dir, 'node')[0]['id'] - - self.assertEqual( - 'ERROR', - self.show_pb(backup_dir, 'node', backup_id)['status'], - 'Backup STATUS should be "ERROR"') - - # @unittest.skip("skip") - def test_sigterm_handling(self): - """""" - self._check_gdb_flag_or_skip_test() - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - # FULL backup - gdb = self.backup_node( - backup_dir, 'node', node, gdb=True, - options=['--stream', '--log-level-file=LOG']) - - gdb.set_breakpoint('backup_non_data_file') - gdb.run_until_break() - - gdb.continue_execution_until_break(20) - gdb.remove_all_breakpoints() - - gdb._execute('signal SIGTERM') - gdb.continue_execution_until_error() - - backup_id = self.show_pb(backup_dir, 'node')[0]['id'] - - self.assertEqual( - 'ERROR', - self.show_pb(backup_dir, 'node', backup_id)['status'], - 'Backup STATUS should be "ERROR"') - - # @unittest.skip("skip") - def test_sigquit_handling(self): - """""" - self._check_gdb_flag_or_skip_test() - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - # FULL backup - gdb = self.backup_node( - backup_dir, 'node', node, gdb=True, options=['--stream']) - - gdb.set_breakpoint('backup_non_data_file') - gdb.run_until_break() - - gdb.continue_execution_until_break(20) - gdb.remove_all_breakpoints() - - gdb._execute('signal SIGQUIT') - gdb.continue_execution_until_error() - - backup_id = self.show_pb(backup_dir, 'node')[0]['id'] - - self.assertEqual( - 'ERROR', - self.show_pb(backup_dir, 'node', backup_id)['status'], - 'Backup STATUS should be "ERROR"') - - # @unittest.skip("skip") - def test_drop_table(self): - """""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - connect_1 = node.connect("postgres") - connect_1.execute( - "create table t_heap as select i" - " as id from generate_series(0,100) i") - connect_1.commit() - - connect_2 = node.connect("postgres") - connect_2.execute("SELECT * FROM t_heap") - connect_2.commit() - - # DROP table - connect_2.execute("DROP TABLE t_heap") - connect_2.commit() - - # FULL backup - self.backup_node( - backup_dir, 'node', node, options=['--stream']) - - # @unittest.skip("skip") - def test_basic_missing_file_permissions(self): - """""" - if os.name == 'nt': - self.skipTest('Skipped because it is POSIX only test') - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - relative_path = node.safe_psql( - "postgres", - "select pg_relation_filepath('pg_class')").decode('utf-8').rstrip() - - full_path = os.path.join(node.data_dir, relative_path) - - os.chmod(full_path, 000) - - try: - # FULL backup - self.backup_node( - backup_dir, 'node', node, options=['--stream']) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because of missing permissions" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Cannot open file', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - os.chmod(full_path, 700) - - # @unittest.skip("skip") - def test_basic_missing_dir_permissions(self): - """""" - if os.name == 'nt': - self.skipTest('Skipped because it is POSIX only test') - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - full_path = os.path.join(node.data_dir, 'pg_twophase') - - os.chmod(full_path, 000) - - try: - # FULL backup - self.backup_node( - backup_dir, 'node', node, options=['--stream']) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because of missing permissions" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Cannot open directory', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - os.rmdir(full_path) - - # @unittest.skip("skip") - def test_backup_with_least_privileges_role(self): - """""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=self.ptrack, - initdb_params=['--data-checksums'], - pg_options={'archive_timeout': '30s'}) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - 'postgres', - 'CREATE DATABASE backupdb') - - if self.ptrack: - node.safe_psql( - "backupdb", - "CREATE SCHEMA ptrack; " - "CREATE EXTENSION ptrack WITH SCHEMA ptrack") - - # PG 9.5 - if self.get_version(node) < 90600: - node.safe_psql( - 'backupdb', - "REVOKE ALL ON DATABASE backupdb from PUBLIC; " - "REVOKE ALL ON SCHEMA public from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " - "CREATE ROLE backup WITH LOGIN REPLICATION; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") - # PG 9.6 - elif self.get_version(node) > 90600 and self.get_version(node) < 100000: - node.safe_psql( - 'backupdb', - "REVOKE ALL ON DATABASE backupdb from PUBLIC; " - "REVOKE ALL ON SCHEMA public from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " - "CREATE ROLE backup WITH LOGIN REPLICATION; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) - # >= 10 && < 15 - elif self.get_version(node) >= 100000 and self.get_version(node) < 150000: - node.safe_psql( - 'backupdb', - "REVOKE ALL ON DATABASE backupdb from PUBLIC; " - "REVOKE ALL ON SCHEMA public from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " - "CREATE ROLE backup WITH LOGIN REPLICATION; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) - # >= 15 - else: - node.safe_psql( - 'backupdb', - "REVOKE ALL ON DATABASE backupdb from PUBLIC; " - "REVOKE ALL ON SCHEMA public from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " - "CREATE ROLE backup WITH LOGIN REPLICATION; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_start(text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_stop(boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) - - if self.ptrack: - node.safe_psql( - "backupdb", - "GRANT USAGE ON SCHEMA ptrack TO backup") - - node.safe_psql( - "backupdb", - "GRANT EXECUTE ON FUNCTION ptrack.ptrack_get_pagemapset(pg_lsn) TO backup; " - "GRANT EXECUTE ON FUNCTION ptrack.ptrack_init_lsn() TO backup;") - - if ProbackupTest.enterprise: - node.safe_psql( - "backupdb", - "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;") - - # FULL backup - self.backup_node( - backup_dir, 'node', node, - datname='backupdb', options=['--stream', '-U', 'backup']) - self.backup_node( - backup_dir, 'node', node, - datname='backupdb', options=['-U', 'backup']) - - # PAGE - self.backup_node( - backup_dir, 'node', node, backup_type='page', - datname='backupdb', options=['-U', 'backup']) - self.backup_node( - backup_dir, 'node', node, backup_type='page', datname='backupdb', - options=['--stream', '-U', 'backup']) - - # DELTA - self.backup_node( - backup_dir, 'node', node, backup_type='delta', - datname='backupdb', options=['-U', 'backup']) - self.backup_node( - backup_dir, 'node', node, backup_type='delta', - datname='backupdb', options=['--stream', '-U', 'backup']) - - # PTRACK - if self.ptrack: - self.backup_node( - backup_dir, 'node', node, backup_type='ptrack', - datname='backupdb', options=['-U', 'backup']) - self.backup_node( - backup_dir, 'node', node, backup_type='ptrack', - datname='backupdb', options=['--stream', '-U', 'backup']) - - # @unittest.skip("skip") - def test_parent_choosing(self): - """ - PAGE3 <- RUNNING(parent should be FULL) - PAGE2 <- OK - PAGE1 <- CORRUPT - FULL - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - full_id = self.backup_node(backup_dir, 'node', node) - - # PAGE1 - page1_id = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # PAGE2 - page2_id = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # Change PAGE1 to ERROR - self.change_backup_status(backup_dir, 'node', page1_id, 'ERROR') - - # PAGE3 - page3_id = self.backup_node( - backup_dir, 'node', node, - backup_type='page', options=['--log-level-file=LOG']) - - log_file_path = os.path.join(backup_dir, 'log', 'pg_probackup.log') - with open(log_file_path) as f: - log_file_content = f.read() - - self.assertIn( - "WARNING: Backup {0} has invalid parent: {1}. " - "Cannot be a parent".format(page2_id, page1_id), - log_file_content) - - self.assertIn( - "WARNING: Backup {0} has status: ERROR. " - "Cannot be a parent".format(page1_id), - log_file_content) - - self.assertIn( - "Parent backup: {0}".format(full_id), - log_file_content) - - self.assertEqual( - self.show_pb( - backup_dir, 'node', backup_id=page3_id)['parent-backup-id'], - full_id) - - # @unittest.skip("skip") - def test_parent_choosing_1(self): - """ - PAGE3 <- RUNNING(parent should be FULL) - PAGE2 <- OK - PAGE1 <- (missing) - FULL - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - full_id = self.backup_node(backup_dir, 'node', node) - - # PAGE1 - page1_id = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # PAGE2 - page2_id = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # Delete PAGE1 - shutil.rmtree( - os.path.join(backup_dir, 'backups', 'node', page1_id)) - - # PAGE3 - page3_id = self.backup_node( - backup_dir, 'node', node, - backup_type='page', options=['--log-level-file=LOG']) - - log_file_path = os.path.join(backup_dir, 'log', 'pg_probackup.log') - with open(log_file_path) as f: - log_file_content = f.read() - - self.assertIn( - "WARNING: Backup {0} has missing parent: {1}. " - "Cannot be a parent".format(page2_id, page1_id), - log_file_content) - - self.assertIn( - "Parent backup: {0}".format(full_id), - log_file_content) - - self.assertEqual( - self.show_pb( - backup_dir, 'node', backup_id=page3_id)['parent-backup-id'], - full_id) - - # @unittest.skip("skip") - def test_parent_choosing_2(self): - """ - PAGE3 <- RUNNING(backup should fail) - PAGE2 <- OK - PAGE1 <- OK - FULL <- (missing) - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - full_id = self.backup_node(backup_dir, 'node', node) - - # PAGE1 - page1_id = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # PAGE2 - page2_id = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # Delete FULL - shutil.rmtree( - os.path.join(backup_dir, 'backups', 'node', full_id)) - - # PAGE3 - try: - self.backup_node( - backup_dir, 'node', node, - backup_type='page', options=['--log-level-file=LOG']) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because FULL backup is missing" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'WARNING: Valid full backup on current timeline 1 is not found' in e.message and - 'ERROR: Create new full backup before an incremental one' in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertEqual( - self.show_pb( - backup_dir, 'node')[2]['status'], - 'ERROR') - - # @unittest.skip("skip") - def test_backup_with_less_privileges_role(self): - """ - check permissions correctness from documentation: - https://github.com/postgrespro/pg_probackup/blob/master/Documentation.md#configuring-the-database-cluster - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=self.ptrack, - initdb_params=['--data-checksums'], - pg_options={ - 'archive_timeout': '30s', - 'archive_mode': 'always', - 'checkpoint_timeout': '60s', - 'wal_level': 'logical'}) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_config(backup_dir, 'node', options=['--archive-timeout=60s']) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - 'postgres', - 'CREATE DATABASE backupdb') - - if self.ptrack: - node.safe_psql( - 'backupdb', - 'CREATE EXTENSION ptrack') - - # PG 9.5 - if self.get_version(node) < 90600: - node.safe_psql( - 'backupdb', - "CREATE ROLE backup WITH LOGIN; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") - # PG 9.6 - elif self.get_version(node) > 90600 and self.get_version(node) < 100000: - node.safe_psql( - 'backupdb', - "CREATE ROLE backup WITH LOGIN; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; " - "COMMIT;" - ) - # >= 10 && < 15 - elif self.get_version(node) >= 100000 and self.get_version(node) < 150000: - node.safe_psql( - 'backupdb', - "CREATE ROLE backup WITH LOGIN; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; " - "COMMIT;" - ) - # >= 15 - else: - node.safe_psql( - 'backupdb', - "BEGIN; " - "CREATE ROLE backup WITH LOGIN; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_start(text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_stop(boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; " - "COMMIT;" - ) - - # enable STREAM backup - node.safe_psql( - 'backupdb', - 'ALTER ROLE backup WITH REPLICATION;') - - # FULL backup - self.backup_node( - backup_dir, 'node', node, - datname='backupdb', options=['--stream', '-U', 'backup']) - self.backup_node( - backup_dir, 'node', node, - datname='backupdb', options=['-U', 'backup']) - - # PAGE - self.backup_node( - backup_dir, 'node', node, backup_type='page', - datname='backupdb', options=['-U', 'backup']) - self.backup_node( - backup_dir, 'node', node, backup_type='page', datname='backupdb', - options=['--stream', '-U', 'backup']) - - # DELTA - self.backup_node( - backup_dir, 'node', node, backup_type='delta', - datname='backupdb', options=['-U', 'backup']) - self.backup_node( - backup_dir, 'node', node, backup_type='delta', - datname='backupdb', options=['--stream', '-U', 'backup']) - - # PTRACK - if self.ptrack: - self.backup_node( - backup_dir, 'node', node, backup_type='ptrack', - datname='backupdb', options=['-U', 'backup']) - self.backup_node( - backup_dir, 'node', node, backup_type='ptrack', - datname='backupdb', options=['--stream', '-U', 'backup']) - - if self.get_version(node) < 90600: - return - - # Restore as replica - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) - replica.cleanup() - - self.restore_node(backup_dir, 'node', replica) - self.set_replica(node, replica) - self.add_instance(backup_dir, 'replica', replica) - self.set_config( - backup_dir, 'replica', - options=['--archive-timeout=120s', '--log-level-console=LOG']) - self.set_archiving(backup_dir, 'replica', replica, replica=True) - self.set_auto_conf(replica, {'hot_standby': 'on'}) - - # freeze bgwriter to get rid of RUNNING XACTS records - # bgwriter_pid = node.auxiliary_pids[ProcessType.BackgroundWriter][0] - # gdb_checkpointer = self.gdb_attach(bgwriter_pid) - - copy_tree( - os.path.join(backup_dir, 'wal', 'node'), - os.path.join(backup_dir, 'wal', 'replica')) - - replica.slow_start(replica=True) - - # self.switch_wal_segment(node) - # self.switch_wal_segment(node) - - self.backup_node( - backup_dir, 'replica', replica, - datname='backupdb', options=['-U', 'backup']) - - # stream full backup from replica - self.backup_node( - backup_dir, 'replica', replica, - datname='backupdb', options=['--stream', '-U', 'backup']) - -# self.switch_wal_segment(node) - - # PAGE backup from replica - self.switch_wal_segment(node) - self.backup_node( - backup_dir, 'replica', replica, backup_type='page', - datname='backupdb', options=['-U', 'backup', '--archive-timeout=30s']) - - self.backup_node( - backup_dir, 'replica', replica, backup_type='page', - datname='backupdb', options=['--stream', '-U', 'backup']) - - # DELTA backup from replica - self.switch_wal_segment(node) - self.backup_node( - backup_dir, 'replica', replica, backup_type='delta', - datname='backupdb', options=['-U', 'backup']) - self.backup_node( - backup_dir, 'replica', replica, backup_type='delta', - datname='backupdb', options=['--stream', '-U', 'backup']) - - # PTRACK backup from replica - if self.ptrack: - self.switch_wal_segment(node) - self.backup_node( - backup_dir, 'replica', replica, backup_type='ptrack', - datname='backupdb', options=['-U', 'backup']) - self.backup_node( - backup_dir, 'replica', replica, backup_type='ptrack', - datname='backupdb', options=['--stream', '-U', 'backup']) - - @unittest.skip("skip") - def test_issue_132(self): - """ - https://github.com/postgrespro/pg_probackup/issues/132 - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - with node.connect("postgres") as conn: - for i in range(50000): - conn.execute( - "CREATE TABLE t_{0} as select 1".format(i)) - conn.commit() - - self.backup_node( - backup_dir, 'node', node, options=['--stream']) - - pgdata = self.pgdata_content(node.data_dir) - - node.cleanup() - self.restore_node(backup_dir, 'node', node) - - pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - exit(1) - - @unittest.skip("skip") - def test_issue_132_1(self): - """ - https://github.com/postgrespro/pg_probackup/issues/132 - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - # TODO: check version of old binary, it should be 2.1.4, 2.1.5 or 2.2.1 - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - with node.connect("postgres") as conn: - for i in range(30000): - conn.execute( - "CREATE TABLE t_{0} as select 1".format(i)) - conn.commit() - - full_id = self.backup_node( - backup_dir, 'node', node, options=['--stream'], old_binary=True) - - delta_id = self.backup_node( - backup_dir, 'node', node, backup_type='delta', - options=['--stream'], old_binary=True) - - node.cleanup() - - # make sure that new binary can detect corruption - try: - self.validate_pb(backup_dir, 'node', backup_id=full_id) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because FULL backup is CORRUPT" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'WARNING: Backup {0} is a victim of metadata corruption'.format(full_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - try: - self.validate_pb(backup_dir, 'node', backup_id=delta_id) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because FULL backup is CORRUPT" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'WARNING: Backup {0} is a victim of metadata corruption'.format(full_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertEqual( - 'CORRUPT', self.show_pb(backup_dir, 'node', full_id)['status'], - 'Backup STATUS should be "CORRUPT"') - - self.assertEqual( - 'ORPHAN', self.show_pb(backup_dir, 'node', delta_id)['status'], - 'Backup STATUS should be "ORPHAN"') - - # check that revalidation is working correctly - try: - self.restore_node( - backup_dir, 'node', node, backup_id=delta_id) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because FULL backup is CORRUPT" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'WARNING: Backup {0} is a victim of metadata corruption'.format(full_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertEqual( - 'CORRUPT', self.show_pb(backup_dir, 'node', full_id)['status'], - 'Backup STATUS should be "CORRUPT"') - - self.assertEqual( - 'ORPHAN', self.show_pb(backup_dir, 'node', delta_id)['status'], - 'Backup STATUS should be "ORPHAN"') - - # check that '--no-validate' do not allow to restore ORPHAN backup -# try: -# self.restore_node( -# backup_dir, 'node', node, backup_id=delta_id, -# options=['--no-validate']) -# # we should die here because exception is what we expect to happen -# self.assertEqual( -# 1, 0, -# "Expecting Error because FULL backup is CORRUPT" -# "\n Output: {0} \n CMD: {1}".format( -# repr(self.output), self.cmd)) -# except ProbackupException as e: -# self.assertIn( -# 'Insert data', -# e.message, -# '\n Unexpected Error Message: {0}\n CMD: {1}'.format( -# repr(e.message), self.cmd)) - - node.cleanup() - - output = self.restore_node( - backup_dir, 'node', node, backup_id=full_id, options=['--force']) - - self.assertIn( - 'WARNING: Backup {0} has status: CORRUPT'.format(full_id), - output) - - self.assertIn( - 'WARNING: Backup {0} is corrupt.'.format(full_id), - output) - - self.assertIn( - 'WARNING: Backup {0} is not valid, restore is forced'.format(full_id), - output) - - self.assertIn( - 'INFO: Restore of backup {0} completed.'.format(full_id), - output) - - node.cleanup() - - output = self.restore_node( - backup_dir, 'node', node, backup_id=delta_id, options=['--force']) - - self.assertIn( - 'WARNING: Backup {0} is orphan.'.format(delta_id), - output) - - self.assertIn( - 'WARNING: Backup {0} is not valid, restore is forced'.format(full_id), - output) - - self.assertIn( - 'WARNING: Backup {0} is not valid, restore is forced'.format(delta_id), - output) - - self.assertIn( - 'INFO: Restore of backup {0} completed.'.format(delta_id), - output) - - def test_note_sanity(self): - """ - test that adding note to backup works as expected - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # FULL backup - backup_id = self.backup_node( - backup_dir, 'node', node, - options=['--stream', '--log-level-file=LOG', '--note=test_note']) - - show_backups = self.show_pb(backup_dir, 'node') - - print(self.show_pb(backup_dir, as_text=True, as_json=True)) - - self.assertEqual(show_backups[0]['note'], "test_note") - - self.set_backup(backup_dir, 'node', backup_id, options=['--note=none']) - - backup_meta = self.show_pb(backup_dir, 'node', backup_id) - - self.assertNotIn( - 'note', - backup_meta) - - # @unittest.skip("skip") - def test_parent_backup_made_by_newer_version(self): - """incremental backup with parent made by newer version""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - backup_id = self.backup_node(backup_dir, 'node', node) - - control_file = os.path.join( - backup_dir, "backups", "node", backup_id, - "backup.control") - - version = self.probackup_version - fake_new_version = str(int(version.split('.')[0]) + 1) + '.0.0' - - with open(control_file, 'r') as f: - data = f.read(); - - data = data.replace(version, fake_new_version) - - with open(control_file, 'w') as f: - f.write(data); - - try: - self.backup_node(backup_dir, 'node', node, backup_type="page") - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because incremental backup should not be possible " - "if parent made by newer version.\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "pg_probackup do not guarantee to be forward compatible. " - "Please upgrade pg_probackup binary.", - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - self.assertEqual( - self.show_pb(backup_dir, 'node')[1]['status'], "ERROR") - - # @unittest.skip("skip") - def test_issue_289(self): - """ - https://github.com/postgrespro/pg_probackup/issues/289 - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - - node.slow_start() - - try: - self.backup_node( - backup_dir, 'node', node, - backup_type='page', options=['--archive-timeout=10s']) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because full backup is missing" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertNotIn( - "INFO: Wait for WAL segment", - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - self.assertIn( - "ERROR: Create new full backup before an incremental one", - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - self.assertEqual( - self.show_pb(backup_dir, 'node')[0]['status'], "ERROR") - - # @unittest.skip("skip") - def test_issue_290(self): - """ - https://github.com/postgrespro/pg_probackup/issues/290 - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - - os.rmdir( - os.path.join(backup_dir, "wal", "node")) - - node.slow_start() - - try: - self.backup_node( - backup_dir, 'node', node, - options=['--archive-timeout=10s']) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because full backup is missing" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertNotIn( - "INFO: Wait for WAL segment", - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - self.assertIn( - "WAL archive directory is not accessible", - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - self.assertEqual( - self.show_pb(backup_dir, 'node')[0]['status'], "ERROR") - - @unittest.skip("skip") - def test_issue_203(self): - """ - https://github.com/postgrespro/pg_probackup/issues/203 - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - with node.connect("postgres") as conn: - for i in range(1000000): - conn.execute( - "CREATE TABLE t_{0} as select 1".format(i)) - conn.commit() - - full_id = self.backup_node( - backup_dir, 'node', node, options=['--stream', '-j2']) - - pgdata = self.pgdata_content(node.data_dir) - - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - node_restored.cleanup() - - self.restore_node(backup_dir, 'node', - node_restored, data_dir=node_restored.data_dir) - - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.skip("skip") - def test_issue_231(self): - """ - https://github.com/postgrespro/pg_probackup/issues/231 - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - datadir = os.path.join(node.data_dir, '123') - - try: - self.backup_node( - backup_dir, 'node', node, data_dir='{0}'.format(datadir)) - except: - pass - - out = self.backup_node(backup_dir, 'node', node, options=['--stream'], return_id=False) - - # it is a bit racy - self.assertIn("WARNING: Cannot create directory", out) - - def test_incr_backup_filenode_map(self): - """ - https://github.com/postgrespro/pg_probackup/issues/320 - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node1 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node1'), - initdb_params=['--data-checksums']) - node1.cleanup() - - node.pgbench_init(scale=5) - - # FULL backup - backup_id = self.backup_node(backup_dir, 'node', node) - - pgbench = node.pgbench( - stdout=subprocess.PIPE, stderr=subprocess.STDOUT, - options=['-T', '10', '-c', '1']) - - backup_id = self.backup_node(backup_dir, 'node', node, backup_type='delta') - - node.safe_psql( - 'postgres', - 'reindex index pg_type_oid_index') - - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type='delta') - - # incremental restore into node1 - node.cleanup() - - self.restore_node(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - 'postgres', - 'select 1') - - # @unittest.skip("skip") - def test_missing_wal_segment(self): - """""" - self._check_gdb_flag_or_skip_test() - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=self.ptrack, - initdb_params=['--data-checksums'], - pg_options={'archive_timeout': '30s'}) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.pgbench_init(scale=10) - - node.safe_psql( - 'postgres', - 'CREATE DATABASE backupdb') - - # get segments in pg_wal, sort then and remove all but the latest - pg_wal_dir = os.path.join(node.data_dir, 'pg_wal') - - if node.major_version >= 10: - pg_wal_dir = os.path.join(node.data_dir, 'pg_wal') - else: - pg_wal_dir = os.path.join(node.data_dir, 'pg_xlog') - - # Full backup in streaming mode - gdb = self.backup_node( - backup_dir, 'node', node, datname='backupdb', - options=['--stream', '--log-level-file=INFO'], gdb=True) - - # break at streaming start - gdb.set_breakpoint('start_WAL_streaming') - gdb.run_until_break() - - # generate some more data - node.pgbench_init(scale=3) - - # remove redundant WAL segments in pg_wal - files = os.listdir(pg_wal_dir) - files.sort(reverse=True) - - # leave first two files in list - del files[:2] - for filename in files: - os.remove(os.path.join(pg_wal_dir, filename)) - - gdb.continue_execution_until_exit() - - self.assertIn( - 'unexpected termination of replication stream: ERROR: requested WAL segment', - gdb.output) - - self.assertIn( - 'has already been removed', - gdb.output) - - self.assertIn( - 'ERROR: Interrupted during waiting for WAL streaming', - gdb.output) - - self.assertIn( - 'WARNING: backup in progress, stop backup', - gdb.output) - - # TODO: check the same for PAGE backup - - # @unittest.skip("skip") - def test_missing_replication_permission(self): - """""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=self.ptrack, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) -# self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # FULL backup - self.backup_node(backup_dir, 'node', node, options=['--stream']) - - # Create replica - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) - replica.cleanup() - self.restore_node(backup_dir, 'node', replica) - - # Settings for Replica - self.set_replica(node, replica) - replica.slow_start(replica=True) - - node.safe_psql( - 'postgres', - 'CREATE DATABASE backupdb') - - # PG 9.5 - if self.get_version(node) < 90600: - node.safe_psql( - 'backupdb', - "CREATE ROLE backup WITH LOGIN; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") - # PG 9.6 - elif self.get_version(node) > 90600 and self.get_version(node) < 100000: - node.safe_psql( - 'backupdb', - "CREATE ROLE backup WITH LOGIN; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") - # >= 10 && < 15 - elif self.get_version(node) >= 100000 and self.get_version(node) < 150000: - node.safe_psql( - 'backupdb', - "CREATE ROLE backup WITH LOGIN; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) - # >= 15 - else: - node.safe_psql( - 'backupdb', - "CREATE ROLE backup WITH LOGIN; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_start(text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_stop(boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) - - if ProbackupTest.enterprise: - node.safe_psql( - "backupdb", - "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;") - - sleep(2) - replica.promote() - - # Delta backup - try: - self.backup_node( - backup_dir, 'node', replica, backup_type='delta', - data_dir=replica.data_dir, datname='backupdb', options=['--stream', '-U', 'backup']) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because incremental backup should not be possible " - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - # 9.5: ERROR: must be superuser or replication role to run a backup - # >=9.6: FATAL: must be superuser or replication role to start walsender - self.assertRegex( - e.message, - "ERROR: must be superuser or replication role to run a backup|FATAL: must be superuser or replication role to start walsender", - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - # @unittest.skip("skip") - def test_missing_replication_permission_1(self): - """""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=self.ptrack, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # FULL backup - self.backup_node(backup_dir, 'node', node, options=['--stream']) - - # Create replica - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) - replica.cleanup() - self.restore_node(backup_dir, 'node', replica) - - # Settings for Replica - self.set_replica(node, replica) - replica.slow_start(replica=True) - - node.safe_psql( - 'postgres', - 'CREATE DATABASE backupdb') - - # PG 9.5 - if self.get_version(node) < 90600: - node.safe_psql( - 'backupdb', - "CREATE ROLE backup WITH LOGIN; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") - # PG 9.6 - elif self.get_version(node) > 90600 and self.get_version(node) < 100000: - node.safe_psql( - 'backupdb', - "CREATE ROLE backup WITH LOGIN; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) - # >= 10 && < 15 - elif self.get_version(node) >= 100000 and self.get_version(node) < 150000: - node.safe_psql( - 'backupdb', - "CREATE ROLE backup WITH LOGIN; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) - # > 15 - else: - node.safe_psql( - 'backupdb', - "CREATE ROLE backup WITH LOGIN; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_start(text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_stop(boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) - - if ProbackupTest.enterprise: - node.safe_psql( - "backupdb", - "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;") - - replica.promote() - - # PAGE - output = self.backup_node( - backup_dir, 'node', replica, backup_type='page', - data_dir=replica.data_dir, datname='backupdb', options=['-U', 'backup'], - return_id=False) - - self.assertIn( - 'WARNING: Valid full backup on current timeline 2 is not found, trying to look up on previous timelines', - output) - - # Messages before 14 - # 'WARNING: could not connect to database backupdb: FATAL: must be superuser or replication role to start walsender' - # Messages for >=14 - # 'WARNING: could not connect to database backupdb: connection to server on socket "/tmp/.s.PGSQL.30983" failed: FATAL: must be superuser or replication role to start walsender' - # 'WARNING: could not connect to database backupdb: connection to server at "localhost" (127.0.0.1), port 29732 failed: FATAL: must be superuser or replication role to start walsender' - self.assertRegex( - output, - r'WARNING: could not connect to database backupdb: (connection to server (on socket "/tmp/.s.PGSQL.\d+"|at "localhost" \(127.0.0.1\), port \d+) failed: ){0,1}' - 'FATAL: must be superuser or replication role to start walsender') - - # @unittest.skip("skip") - def test_basic_backup_default_transaction_read_only(self): - """""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={'default_transaction_read_only': 'on'}) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - try: - node.safe_psql( - 'postgres', - 'create temp table t1()') - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because incremental backup should not be possible " - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except QueryException as e: - self.assertIn( - "cannot execute CREATE TABLE in a read-only transaction", - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - # FULL backup - self.backup_node( - backup_dir, 'node', node, - options=['--stream']) - - # DELTA backup - self.backup_node( - backup_dir, 'node', node, backup_type='delta', options=['--stream']) - - # PAGE backup - self.backup_node(backup_dir, 'node', node, backup_type='page') - - # @unittest.skip("skip") - def test_backup_atexit(self): - """""" - self._check_gdb_flag_or_skip_test() - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=self.ptrack, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.pgbench_init(scale=5) - - # Full backup in streaming mode - gdb = self.backup_node( - backup_dir, 'node', node, - options=['--stream', '--log-level-file=VERBOSE'], gdb=True) - - # break at streaming start - gdb.set_breakpoint('backup_data_file') - gdb.run_until_break() - - gdb.remove_all_breakpoints() - gdb._execute('signal SIGINT') - sleep(1) - - self.assertEqual( - self.show_pb( - backup_dir, 'node')[0]['status'], 'ERROR') - - with open(os.path.join(backup_dir, 'log', 'pg_probackup.log')) as f: - log_content = f.read() - #print(log_content) - self.assertIn( - 'WARNING: backup in progress, stop backup', - log_content) - - if self.get_version(node) < 150000: - self.assertIn( - 'FROM pg_catalog.pg_stop_backup', - log_content) - else: - self.assertIn( - 'FROM pg_catalog.pg_backup_stop', - log_content) - - self.assertIn( - 'setting its status to ERROR', - log_content) - - # @unittest.skip("skip") - def test_pg_stop_backup_missing_permissions(self): - """""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=self.ptrack, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.pgbench_init(scale=5) - - self.simple_bootstrap(node, 'backup') - - if self.get_version(node) < 90600: - node.safe_psql( - 'postgres', - 'REVOKE EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() FROM backup') - elif self.get_version(node) > 90600 and self.get_version(node) < 100000: - node.safe_psql( - 'postgres', - 'REVOKE EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) FROM backup') - elif self.get_version(node) < 150000: - node.safe_psql( - 'postgres', - 'REVOKE EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) FROM backup') - else: - node.safe_psql( - 'postgres', - 'REVOKE EXECUTE ON FUNCTION pg_catalog.pg_backup_stop(boolean) FROM backup') - - - # Full backup in streaming mode - try: - self.backup_node( - backup_dir, 'node', node, - options=['--stream', '-U', 'backup']) - # we should die here because exception is what we expect to happen - if self.get_version(node) < 150000: - self.assertEqual( - 1, 0, - "Expecting Error because of missing permissions on pg_stop_backup " - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - else: - self.assertEqual( - 1, 0, - "Expecting Error because of missing permissions on pg_backup_stop " - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - if self.get_version(node) < 150000: - self.assertIn( - "ERROR: permission denied for function pg_stop_backup", - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - else: - self.assertIn( - "ERROR: permission denied for function pg_backup_stop", - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - self.assertIn( - "query was: SELECT pg_catalog.txid_snapshot_xmax", - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - # @unittest.skip("skip") - def test_start_time(self): - """Test, that option --start-time allows to set backup_id and restore""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=self.ptrack, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # FULL backup - startTime = int(time()) - self.backup_node( - backup_dir, 'node', node, backup_type='full', - options=['--stream', '--start-time={0}'.format(str(startTime))]) - # restore FULL backup by backup_id calculated from start-time - self.restore_node( - backup_dir, 'node', - data_dir=os.path.join(self.tmp_path, self.module_name, self.fname, 'node_restored_full'), - backup_id=base36enc(startTime)) - - #FULL backup with incorrect start time - try: - startTime = str(int(time()-100000)) - self.backup_node( - backup_dir, 'node', node, backup_type='full', - options=['--stream', '--start-time={0}'.format(startTime)]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - 'Expecting Error because start time for new backup must be newer ' - '\n Output: {0} \n CMD: {1}'.format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertRegex( - e.message, - r"ERROR: Can't assign backup_id from requested start_time \(\w*\), this time must be later that backup \w*\n", - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - # DELTA backup - startTime = int(time()) - self.backup_node( - backup_dir, 'node', node, backup_type='delta', - options=['--stream', '--start-time={0}'.format(str(startTime))]) - # restore DELTA backup by backup_id calculated from start-time - self.restore_node( - backup_dir, 'node', - data_dir=os.path.join(self.tmp_path, self.module_name, self.fname, 'node_restored_delta'), - backup_id=base36enc(startTime)) - - # PAGE backup - startTime = int(time()) - self.backup_node( - backup_dir, 'node', node, backup_type='page', - options=['--stream', '--start-time={0}'.format(str(startTime))]) - # restore PAGE backup by backup_id calculated from start-time - self.restore_node( - backup_dir, 'node', - data_dir=os.path.join(self.tmp_path, self.module_name, self.fname, 'node_restored_page'), - backup_id=base36enc(startTime)) - - # PTRACK backup - if self.ptrack: - node.safe_psql( - 'postgres', - 'create extension ptrack') - - startTime = int(time()) - self.backup_node( - backup_dir, 'node', node, backup_type='ptrack', - options=['--stream', '--start-time={0}'.format(str(startTime))]) - # restore PTRACK backup by backup_id calculated from start-time - self.restore_node( - backup_dir, 'node', - data_dir=os.path.join(self.tmp_path, self.module_name, self.fname, 'node_restored_ptrack'), - backup_id=base36enc(startTime)) - - # @unittest.skip("skip") - def test_start_time_few_nodes(self): - """Test, that we can synchronize backup_id's for different DBs""" - node1 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node1'), - set_replication=True, - ptrack_enable=self.ptrack, - initdb_params=['--data-checksums']) - - backup_dir1 = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup1') - self.init_pb(backup_dir1) - self.add_instance(backup_dir1, 'node1', node1) - self.set_archiving(backup_dir1, 'node1', node1) - node1.slow_start() - - node2 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node2'), - set_replication=True, - ptrack_enable=self.ptrack, - initdb_params=['--data-checksums']) - - backup_dir2 = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup2') - self.init_pb(backup_dir2) - self.add_instance(backup_dir2, 'node2', node2) - self.set_archiving(backup_dir2, 'node2', node2) - node2.slow_start() - - # FULL backup - startTime = str(int(time())) - self.backup_node( - backup_dir1, 'node1', node1, backup_type='full', - options=['--stream', '--start-time={0}'.format(startTime)]) - self.backup_node( - backup_dir2, 'node2', node2, backup_type='full', - options=['--stream', '--start-time={0}'.format(startTime)]) - show_backup1 = self.show_pb(backup_dir1, 'node1')[0] - show_backup2 = self.show_pb(backup_dir2, 'node2')[0] - self.assertEqual(show_backup1['id'], show_backup2['id']) - - # DELTA backup - startTime = str(int(time())) - self.backup_node( - backup_dir1, 'node1', node1, backup_type='delta', - options=['--stream', '--start-time={0}'.format(startTime)]) - self.backup_node( - backup_dir2, 'node2', node2, backup_type='delta', - options=['--stream', '--start-time={0}'.format(startTime)]) - show_backup1 = self.show_pb(backup_dir1, 'node1')[1] - show_backup2 = self.show_pb(backup_dir2, 'node2')[1] - self.assertEqual(show_backup1['id'], show_backup2['id']) - - # PAGE backup - startTime = str(int(time())) - self.backup_node( - backup_dir1, 'node1', node1, backup_type='page', - options=['--stream', '--start-time={0}'.format(startTime)]) - self.backup_node( - backup_dir2, 'node2', node2, backup_type='page', - options=['--stream', '--start-time={0}'.format(startTime)]) - show_backup1 = self.show_pb(backup_dir1, 'node1')[2] - show_backup2 = self.show_pb(backup_dir2, 'node2')[2] - self.assertEqual(show_backup1['id'], show_backup2['id']) - - # PTRACK backup - if self.ptrack: - node1.safe_psql( - 'postgres', - 'create extension ptrack') - node2.safe_psql( - 'postgres', - 'create extension ptrack') - - startTime = str(int(time())) - self.backup_node( - backup_dir1, 'node1', node1, backup_type='ptrack', - options=['--stream', '--start-time={0}'.format(startTime)]) - self.backup_node( - backup_dir2, 'node2', node2, backup_type='ptrack', - options=['--stream', '--start-time={0}'.format(startTime)]) - show_backup1 = self.show_pb(backup_dir1, 'node1')[3] - show_backup2 = self.show_pb(backup_dir2, 'node2')[3] - self.assertEqual(show_backup1['id'], show_backup2['id']) - diff --git a/tests/cfs_backup_test.py b/tests/cfs_backup_test.py deleted file mode 100644 index 28ef275df..000000000 --- a/tests/cfs_backup_test.py +++ /dev/null @@ -1,1235 +0,0 @@ -import os -import unittest -import random -import shutil - -from .helpers.cfs_helpers import find_by_extensions, find_by_name, find_by_pattern, corrupt_file -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException - -tblspace_name = 'cfs_tblspace' - - -class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase): - # --- Begin --- # - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def setUp(self): - self.backup_dir = os.path.join( - self.tmp_path, self.module_name, self.fname, 'backup') - self.node = self.make_simple_node( - base_dir="{0}/{1}/node".format(self.module_name, self.fname), - set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums'], - pg_options={ - 'cfs_encryption': 'off', - 'max_wal_senders': '2', - 'shared_buffers': '200MB' - } - ) - - self.init_pb(self.backup_dir) - self.add_instance(self.backup_dir, 'node', self.node) - self.set_archiving(self.backup_dir, 'node', self.node) - - self.node.slow_start() - - self.node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") - - self.create_tblspace_in_node(self.node, tblspace_name, cfs=True) - - tblspace = self.node.safe_psql( - "postgres", - "SELECT * FROM pg_tablespace WHERE spcname='{0}'".format( - tblspace_name)) - - self.assertIn( - tblspace_name, str(tblspace), - "ERROR: The tablespace not created " - "or it create without compressions") - - self.assertIn( - "compression=true", str(tblspace), - "ERROR: The tablespace not created " - "or it create without compressions") - - self.assertTrue( - find_by_name( - [self.get_tblspace_path(self.node, tblspace_name)], - ['pg_compression']), - "ERROR: File pg_compression not found" - ) - - # --- Section: Full --- # - # @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_fullbackup_empty_tablespace(self): - """Case: Check fullbackup empty compressed tablespace""" - - backup_id = None - try: - backup_id = self.backup_node( - self.backup_dir, 'node', self.node, backup_type='full') - except ProbackupException as e: - self.fail( - "ERROR: Full backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - show_backup = self.show_pb(self.backup_dir, 'node', backup_id) - self.assertEqual( - "OK", - show_backup["status"], - "ERROR: Full backup status is not valid. \n " - "Current backup status={0}".format(show_backup["status"]) - ) - self.assertTrue( - find_by_name( - [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], - ['pg_compression']), - "ERROR: File pg_compression not found in backup dir" - ) - - # @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_fullbackup_empty_tablespace_stream(self): - """Case: Check fullbackup empty compressed tablespace with options stream""" - - backup_id = None - try: - backup_id = self.backup_node( - self.backup_dir, 'node', self.node, - backup_type='full', options=['--stream']) - except ProbackupException as e: - self.fail( - "ERROR: Full backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - show_backup = self.show_pb(self.backup_dir, 'node', backup_id) - self.assertEqual( - "OK", - show_backup["status"], - "ERROR: Full backup status is not valid. \n " - "Current backup status={0}".format(show_backup["status"]) - ) - self.assertTrue( - find_by_name( - [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], - ['pg_compression']), - "ERROR: File pg_compression not found in backup dir" - ) - - # @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - # PGPRO-1018 invalid file size - def test_fullbackup_after_create_table(self): - """Case: Make full backup after created table in the tablespace""" - if not self.enterprise: - return - - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,256) i".format('t1', tblspace_name) - ) - - backup_id = None - try: - backup_id = self.backup_node( - self.backup_dir, 'node', self.node, backup_type='full') - except ProbackupException as e: - self.fail( - "\n ERROR: {0}\n CMD: {1}".format( - repr(e.message), - repr(self.cmd) - ) - ) - return False - show_backup = self.show_pb(self.backup_dir, 'node', backup_id) - self.assertEqual( - "OK", - show_backup["status"], - "ERROR: Full backup status is not valid. \n " - "Current backup status={0}".format(show_backup["status"]) - ) - self.assertTrue( - find_by_name( - [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], - ['pg_compression']), - "ERROR: File pg_compression not found in {0}".format( - os.path.join(self.backup_dir, 'node', backup_id)) - ) - - # check cfm size - cfms = find_by_extensions( - [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], - ['.cfm']) - self.assertTrue(cfms, "ERROR: .cfm files not found in backup dir") - for cfm in cfms: - size = os.stat(cfm).st_size - self.assertLessEqual(size, 4096, - "ERROR: {0} is not truncated (has size {1} > 4096)".format( - cfm, size - )) - - # @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - # PGPRO-1018 invalid file size - def test_fullbackup_after_create_table_stream(self): - """ - Case: Make full backup after created table in the tablespace with option --stream - """ - - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,256) i".format('t1', tblspace_name) - ) - - backup_id = None - try: - backup_id = self.backup_node( - self.backup_dir, 'node', self.node, - backup_type='full', options=['--stream']) - except ProbackupException as e: - self.fail( - "ERROR: Full backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - show_backup = self.show_pb(self.backup_dir, 'node', backup_id) - self.assertEqual( - "OK", - show_backup["status"], - "ERROR: Full backup status is not valid. \n " - "Current backup status={0}".format(show_backup["status"]) - ) - self.assertTrue( - find_by_name( - [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], - ['pg_compression']), - "ERROR: File pg_compression not found in backup dir" - ) - self.assertTrue( - find_by_extensions( - [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], - ['.cfm']), - "ERROR: .cfm files not found in backup dir" - ) - - # --- Section: Incremental from empty tablespace --- # - # @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_fullbackup_empty_tablespace_ptrack_after_create_table(self): - """ - Case: Make full backup before created table in the tablespace. - Make ptrack backup after create table - """ - - try: - self.backup_node( - self.backup_dir, 'node', self.node, backup_type='full') - except ProbackupException as e: - self.fail( - "ERROR: Full backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,256) i".format('t1', tblspace_name) - ) - - backup_id = None - try: - backup_id = self.backup_node( - self.backup_dir, 'node', self.node, backup_type='ptrack') - except ProbackupException as e: - self.fail( - "ERROR: Incremental backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - show_backup = self.show_pb(self.backup_dir, 'node', backup_id) - self.assertEqual( - "OK", - show_backup["status"], - "ERROR: Incremental backup status is not valid. \n " - "Current backup status={0}".format(show_backup["status"]) - ) - self.assertTrue( - find_by_name( - [self.get_tblspace_path(self.node, tblspace_name)], - ['pg_compression']), - "ERROR: File pg_compression not found" - ) - self.assertTrue( - find_by_extensions( - [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], - ['.cfm']), - "ERROR: .cfm files not found in backup dir" - ) - - # @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_fullbackup_empty_tablespace_ptrack_after_create_table_stream(self): - """ - Case: Make full backup before created table in the tablespace. - Make ptrack backup after create table - """ - - try: - self.backup_node( - self.backup_dir, 'node', self.node, - backup_type='full', options=['--stream']) - except ProbackupException as e: - self.fail( - "ERROR: Full backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,256) i".format('t1', tblspace_name) - ) - - backup_id = None - try: - backup_id = self.backup_node( - self.backup_dir, 'node', self.node, - backup_type='ptrack', options=['--stream']) - except ProbackupException as e: - self.fail( - "ERROR: Incremental backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - show_backup = self.show_pb(self.backup_dir, 'node', backup_id) - self.assertEqual( - "OK", - show_backup["status"], - "ERROR: Incremental backup status is not valid. \n " - "Current backup status={0}".format(show_backup["status"]) - ) - self.assertTrue( - find_by_name( - [self.get_tblspace_path(self.node, tblspace_name)], - ['pg_compression']), - "ERROR: File pg_compression not found" - ) - self.assertTrue( - find_by_extensions( - [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], - ['.cfm']), - "ERROR: .cfm files not found in backup dir" - ) - self.assertFalse( - find_by_extensions( - [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], - ['_ptrack']), - "ERROR: _ptrack files was found in backup dir" - ) - - # @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_fullbackup_empty_tablespace_page_after_create_table(self): - """ - Case: Make full backup before created table in the tablespace. - Make page backup after create table - """ - - try: - self.backup_node( - self.backup_dir, 'node', self.node, backup_type='full') - except ProbackupException as e: - self.fail( - "ERROR: Full backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,256) i".format('t1', tblspace_name) - ) - - backup_id = None - try: - backup_id = self.backup_node( - self.backup_dir, 'node', self.node, backup_type='page') - except ProbackupException as e: - self.fail( - "ERROR: Incremental backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - show_backup = self.show_pb(self.backup_dir, 'node', backup_id) - self.assertEqual( - "OK", - show_backup["status"], - "ERROR: Incremental backup status is not valid. \n " - "Current backup status={0}".format(show_backup["status"]) - ) - self.assertTrue( - find_by_name( - [self.get_tblspace_path(self.node, tblspace_name)], - ['pg_compression']), - "ERROR: File pg_compression not found" - ) - self.assertTrue( - find_by_extensions( - [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], - ['.cfm']), - "ERROR: .cfm files not found in backup dir" - ) - - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_page_doesnt_store_unchanged_cfm(self): - """ - Case: Test page backup doesn't store cfm file if table were not modified - """ - - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,256) i".format('t1', tblspace_name) - ) - - try: - backup_id_full = self.backup_node( - self.backup_dir, 'node', self.node, backup_type='full') - except ProbackupException as e: - self.fail( - "ERROR: Full backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - self.assertTrue( - find_by_extensions( - [os.path.join(self.backup_dir, 'backups', 'node', backup_id_full)], - ['.cfm']), - "ERROR: .cfm files not found in backup dir" - ) - - try: - backup_id = self.backup_node( - self.backup_dir, 'node', self.node, backup_type='page') - except ProbackupException as e: - self.fail( - "ERROR: Incremental backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - show_backup = self.show_pb(self.backup_dir, 'node', backup_id) - self.assertEqual( - "OK", - show_backup["status"], - "ERROR: Incremental backup status is not valid. \n " - "Current backup status={0}".format(show_backup["status"]) - ) - self.assertTrue( - find_by_name( - [self.get_tblspace_path(self.node, tblspace_name)], - ['pg_compression']), - "ERROR: File pg_compression not found" - ) - self.assertFalse( - find_by_extensions( - [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], - ['.cfm']), - "ERROR: .cfm files is found in backup dir" - ) - - # @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_fullbackup_empty_tablespace_page_after_create_table_stream(self): - """ - Case: Make full backup before created table in the tablespace. - Make page backup after create table - """ - - try: - self.backup_node( - self.backup_dir, 'node', self.node, - backup_type='full', options=['--stream']) - except ProbackupException as e: - self.fail( - "ERROR: Full backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,256) i".format('t1', tblspace_name) - ) - - backup_id = None - try: - backup_id = self.backup_node( - self.backup_dir, 'node', self.node, - backup_type='page', options=['--stream']) - except ProbackupException as e: - self.fail( - "ERROR: Incremental backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - show_backup = self.show_pb(self.backup_dir, 'node', backup_id) - self.assertEqual( - "OK", - show_backup["status"], - "ERROR: Incremental backup status is not valid. \n " - "Current backup status={0}".format(show_backup["status"]) - ) - self.assertTrue( - find_by_name( - [self.get_tblspace_path(self.node, tblspace_name)], - ['pg_compression']), - "ERROR: File pg_compression not found" - ) - self.assertTrue( - find_by_extensions( - [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], - ['.cfm']), - "ERROR: .cfm files not found in backup dir" - ) - self.assertFalse( - find_by_extensions( - [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], - ['_ptrack']), - "ERROR: _ptrack files was found in backup dir" - ) - - # --- Section: Incremental from fill tablespace --- # - # @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_fullbackup_after_create_table_ptrack_after_create_table(self): - """ - Case: Make full backup before created table in the tablespace. - Make ptrack backup after create table. - Check: incremental backup will not greater as full - """ - - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,1005000) i".format('t1', tblspace_name) - ) - - backup_id_full = None - try: - backup_id_full = self.backup_node( - self.backup_dir, 'node', self.node, backup_type='full') - except ProbackupException as e: - self.fail( - "ERROR: Full backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,10) i".format('t2', tblspace_name) - ) - - backup_id_ptrack = None - try: - backup_id_ptrack = self.backup_node( - self.backup_dir, 'node', self.node, backup_type='ptrack') - except ProbackupException as e: - self.fail( - "ERROR: Incremental backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - show_backup_full = self.show_pb( - self.backup_dir, 'node', backup_id_full) - show_backup_ptrack = self.show_pb( - self.backup_dir, 'node', backup_id_ptrack) - self.assertGreater( - show_backup_full["data-bytes"], - show_backup_ptrack["data-bytes"], - "ERROR: Size of incremental backup greater than full. \n " - "INFO: {0} >{1}".format( - show_backup_ptrack["data-bytes"], - show_backup_full["data-bytes"] - ) - ) - - # @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_fullbackup_after_create_table_ptrack_after_create_table_stream(self): - """ - Case: Make full backup before created table in the tablespace(--stream). - Make ptrack backup after create table(--stream). - Check: incremental backup size should not be greater than full - """ - - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,1005000) i".format('t1', tblspace_name) - ) - - backup_id_full = None - try: - backup_id_full = self.backup_node( - self.backup_dir, 'node', self.node, - backup_type='full', options=['--stream']) - except ProbackupException as e: - self.fail( - "ERROR: Full backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,25) i".format('t2', tblspace_name) - ) - - backup_id_ptrack = None - try: - backup_id_ptrack = self.backup_node( - self.backup_dir, 'node', self.node, - backup_type='ptrack', options=['--stream']) - except ProbackupException as e: - self.fail( - "ERROR: Incremental backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - show_backup_full = self.show_pb( - self.backup_dir, 'node', backup_id_full) - show_backup_ptrack = self.show_pb( - self.backup_dir, 'node', backup_id_ptrack) - self.assertGreater( - show_backup_full["data-bytes"], - show_backup_ptrack["data-bytes"], - "ERROR: Size of incremental backup greater than full. \n " - "INFO: {0} >{1}".format( - show_backup_ptrack["data-bytes"], - show_backup_full["data-bytes"] - ) - ) - - # @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_fullbackup_after_create_table_page_after_create_table(self): - """ - Case: Make full backup before created table in the tablespace. - Make ptrack backup after create table. - Check: incremental backup size should not be greater than full - """ - - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,1005000) i".format('t1', tblspace_name) - ) - - backup_id_full = None - try: - backup_id_full = self.backup_node( - self.backup_dir, 'node', self.node, backup_type='full') - except ProbackupException as e: - self.fail( - "ERROR: Full backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,10) i".format('t2', tblspace_name) - ) - - backup_id_page = None - try: - backup_id_page = self.backup_node( - self.backup_dir, 'node', self.node, backup_type='page') - except ProbackupException as e: - self.fail( - "ERROR: Incremental backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - show_backup_full = self.show_pb( - self.backup_dir, 'node', backup_id_full) - show_backup_page = self.show_pb( - self.backup_dir, 'node', backup_id_page) - self.assertGreater( - show_backup_full["data-bytes"], - show_backup_page["data-bytes"], - "ERROR: Size of incremental backup greater than full. \n " - "INFO: {0} >{1}".format( - show_backup_page["data-bytes"], - show_backup_full["data-bytes"] - ) - ) - - # @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_multiple_segments(self): - """ - Case: Make full backup before created table in the tablespace. - Make ptrack backup after create table. - Check: incremental backup will not greater as full - """ - - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,1005000) i".format( - 't_heap', tblspace_name) - ) - - full_result = self.node.safe_psql("postgres", "SELECT * FROM t_heap") - - try: - backup_id_full = self.backup_node( - self.backup_dir, 'node', self.node, backup_type='full') - except ProbackupException as e: - self.fail( - "ERROR: Full backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - self.node.safe_psql( - "postgres", - "INSERT INTO {0} " - "SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,10) i".format( - 't_heap') - ) - - page_result = self.node.safe_psql("postgres", "SELECT * FROM t_heap") - - try: - backup_id_page = self.backup_node( - self.backup_dir, 'node', self.node, backup_type='page') - except ProbackupException as e: - self.fail( - "ERROR: Incremental backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - show_backup_full = self.show_pb( - self.backup_dir, 'node', backup_id_full) - show_backup_page = self.show_pb( - self.backup_dir, 'node', backup_id_page) - self.assertGreater( - show_backup_full["data-bytes"], - show_backup_page["data-bytes"], - "ERROR: Size of incremental backup greater than full. \n " - "INFO: {0} >{1}".format( - show_backup_page["data-bytes"], - show_backup_full["data-bytes"] - ) - ) - - # CHECK FULL BACKUP - self.node.stop() - self.node.cleanup() - shutil.rmtree(self.get_tblspace_path(self.node, tblspace_name)) - self.restore_node( - self.backup_dir, 'node', self.node, backup_id=backup_id_full, - options=[ - "-j", "4", - "--recovery-target=immediate", - "--recovery-target-action=promote"]) - - self.node.slow_start() - self.assertEqual( - full_result, - self.node.safe_psql("postgres", "SELECT * FROM t_heap"), - 'Lost data after restore') - - # CHECK PAGE BACKUP - self.node.stop() - self.node.cleanup() - shutil.rmtree( - self.get_tblspace_path(self.node, tblspace_name), - ignore_errors=True) - self.restore_node( - self.backup_dir, 'node', self.node, backup_id=backup_id_page, - options=[ - "-j", "4", - "--recovery-target=immediate", - "--recovery-target-action=promote"]) - - self.node.slow_start() - self.assertEqual( - page_result, - self.node.safe_psql("postgres", "SELECT * FROM t_heap"), - 'Lost data after restore') - - # @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_multiple_segments_in_multiple_tablespaces(self): - """ - Case: Make full backup before created table in the tablespace. - Make ptrack backup after create table. - Check: incremental backup will not greater as full - """ - tblspace_name_1 = 'tblspace_name_1' - tblspace_name_2 = 'tblspace_name_2' - - self.create_tblspace_in_node(self.node, tblspace_name_1, cfs=True) - self.create_tblspace_in_node(self.node, tblspace_name_2, cfs=True) - - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,1005000) i".format( - 't_heap_1', tblspace_name_1)) - - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,1005000) i".format( - 't_heap_2', tblspace_name_2)) - - full_result_1 = self.node.safe_psql( - "postgres", "SELECT * FROM t_heap_1") - full_result_2 = self.node.safe_psql( - "postgres", "SELECT * FROM t_heap_2") - - try: - backup_id_full = self.backup_node( - self.backup_dir, 'node', self.node, backup_type='full') - except ProbackupException as e: - self.fail( - "ERROR: Full backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - self.node.safe_psql( - "postgres", - "INSERT INTO {0} " - "SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,10) i".format( - 't_heap_1') - ) - - self.node.safe_psql( - "postgres", - "INSERT INTO {0} " - "SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,10) i".format( - 't_heap_2') - ) - - page_result_1 = self.node.safe_psql( - "postgres", "SELECT * FROM t_heap_1") - page_result_2 = self.node.safe_psql( - "postgres", "SELECT * FROM t_heap_2") - - try: - backup_id_page = self.backup_node( - self.backup_dir, 'node', self.node, backup_type='page') - except ProbackupException as e: - self.fail( - "ERROR: Incremental backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - show_backup_full = self.show_pb( - self.backup_dir, 'node', backup_id_full) - show_backup_page = self.show_pb( - self.backup_dir, 'node', backup_id_page) - self.assertGreater( - show_backup_full["data-bytes"], - show_backup_page["data-bytes"], - "ERROR: Size of incremental backup greater than full. \n " - "INFO: {0} >{1}".format( - show_backup_page["data-bytes"], - show_backup_full["data-bytes"] - ) - ) - - # CHECK FULL BACKUP - self.node.stop() - - self.restore_node( - self.backup_dir, 'node', self.node, - backup_id=backup_id_full, - options=[ - "-j", "4", "--incremental-mode=checksum", - "--recovery-target=immediate", - "--recovery-target-action=promote"]) - self.node.slow_start() - - self.assertEqual( - full_result_1, - self.node.safe_psql("postgres", "SELECT * FROM t_heap_1"), - 'Lost data after restore') - self.assertEqual( - full_result_2, - self.node.safe_psql("postgres", "SELECT * FROM t_heap_2"), - 'Lost data after restore') - - # CHECK PAGE BACKUP - self.node.stop() - - self.restore_node( - self.backup_dir, 'node', self.node, - backup_id=backup_id_page, - options=[ - "-j", "4", "--incremental-mode=checksum", - "--recovery-target=immediate", - "--recovery-target-action=promote"]) - self.node.slow_start() - - self.assertEqual( - page_result_1, - self.node.safe_psql("postgres", "SELECT * FROM t_heap_1"), - 'Lost data after restore') - self.assertEqual( - page_result_2, - self.node.safe_psql("postgres", "SELECT * FROM t_heap_2"), - 'Lost data after restore') - - # @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_fullbackup_after_create_table_page_after_create_table_stream(self): - """ - Case: Make full backup before created table in the tablespace(--stream). - Make ptrack backup after create table(--stream). - Check: incremental backup will not greater as full - """ - - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,1005000) i".format('t1', tblspace_name) - ) - - backup_id_full = None - try: - backup_id_full = self.backup_node( - self.backup_dir, 'node', self.node, - backup_type='full', options=['--stream']) - except ProbackupException as e: - self.fail( - "ERROR: Full backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,10) i".format('t2', tblspace_name) - ) - - backup_id_page = None - try: - backup_id_page = self.backup_node( - self.backup_dir, 'node', self.node, - backup_type='page', options=['--stream']) - except ProbackupException as e: - self.fail( - "ERROR: Incremental backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - show_backup_full = self.show_pb( - self.backup_dir, 'node', backup_id_full) - show_backup_page = self.show_pb( - self.backup_dir, 'node', backup_id_page) - self.assertGreater( - show_backup_full["data-bytes"], - show_backup_page["data-bytes"], - "ERROR: Size of incremental backup greater than full. \n " - "INFO: {0} >{1}".format( - show_backup_page["data-bytes"], - show_backup_full["data-bytes"] - ) - ) - - # --- Make backup with not valid data(broken .cfm) --- # - @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_delete_random_cfm_file_from_tablespace_dir(self): - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,256) i".format('t1', tblspace_name) - ) - - self.node.safe_psql( - "postgres", - "CHECKPOINT" - ) - - list_cmf = find_by_extensions( - [self.get_tblspace_path(self.node, tblspace_name)], - ['.cfm']) - self.assertTrue( - list_cmf, - "ERROR: .cfm-files not found into tablespace dir" - ) - - os.remove(random.choice(list_cmf)) - - self.assertRaises( - ProbackupException, - self.backup_node, - self.backup_dir, - 'node', - self.node, - backup_type='full' - ) - - @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_delete_file_pg_compression_from_tablespace_dir(self): - os.remove( - find_by_name( - [self.get_tblspace_path(self.node, tblspace_name)], - ['pg_compression'])[0]) - - self.assertRaises( - ProbackupException, - self.backup_node, - self.backup_dir, - 'node', - self.node, - backup_type='full' - ) - - @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_delete_random_data_file_from_tablespace_dir(self): - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,256) i".format('t1', tblspace_name) - ) - - self.node.safe_psql( - "postgres", - "CHECKPOINT" - ) - - list_data_files = find_by_pattern( - [self.get_tblspace_path(self.node, tblspace_name)], - '^.*/\d+$') - self.assertTrue( - list_data_files, - "ERROR: Files of data not found into tablespace dir" - ) - - os.remove(random.choice(list_data_files)) - - self.assertRaises( - ProbackupException, - self.backup_node, - self.backup_dir, - 'node', - self.node, - backup_type='full' - ) - - @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_broken_random_cfm_file_into_tablespace_dir(self): - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,256) i".format('t1', tblspace_name) - ) - - list_cmf = find_by_extensions( - [self.get_tblspace_path(self.node, tblspace_name)], - ['.cfm']) - self.assertTrue( - list_cmf, - "ERROR: .cfm-files not found into tablespace dir" - ) - - corrupt_file(random.choice(list_cmf)) - - self.assertRaises( - ProbackupException, - self.backup_node, - self.backup_dir, - 'node', - self.node, - backup_type='full' - ) - - @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_broken_random_data_file_into_tablespace_dir(self): - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,256) i".format('t1', tblspace_name) - ) - - list_data_files = find_by_pattern( - [self.get_tblspace_path(self.node, tblspace_name)], - '^.*/\d+$') - self.assertTrue( - list_data_files, - "ERROR: Files of data not found into tablespace dir" - ) - - corrupt_file(random.choice(list_data_files)) - - self.assertRaises( - ProbackupException, - self.backup_node, - self.backup_dir, - 'node', - self.node, - backup_type='full' - ) - - @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_broken_file_pg_compression_into_tablespace_dir(self): - - corrupted_file = find_by_name( - [self.get_tblspace_path(self.node, tblspace_name)], - ['pg_compression'])[0] - - self.assertTrue( - corrupt_file(corrupted_file), - "ERROR: File is not corrupted or it missing" - ) - - self.assertRaises( - ProbackupException, - self.backup_node, - self.backup_dir, - 'node', - self.node, - backup_type='full' - ) - -# # --- End ---# - - -#class CfsBackupEncTest(CfsBackupNoEncTest): -# # --- Begin --- # -# def setUp(self): -# os.environ["PG_CIPHER_KEY"] = "super_secret_cipher_key" -# super(CfsBackupEncTest, self).setUp() diff --git a/tests/cfs_catchup_test.py b/tests/cfs_catchup_test.py deleted file mode 100644 index 43c3f18f1..000000000 --- a/tests/cfs_catchup_test.py +++ /dev/null @@ -1,117 +0,0 @@ -import os -import unittest -import random -import shutil - -from .helpers.cfs_helpers import find_by_extensions, find_by_name, find_by_pattern, corrupt_file -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException - - -class CfsCatchupNoEncTest(ProbackupTest, unittest.TestCase): - - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_full_catchup_with_tablespace(self): - """ - Test tablespace transfers - """ - # preparation - src_pg = self.make_simple_node( - base_dir = os.path.join(self.module_name, self.fname, 'src'), - set_replication = True - ) - src_pg.slow_start() - tblspace1_old_path = self.get_tblspace_path(src_pg, 'tblspace1_old') - self.create_tblspace_in_node(src_pg, 'tblspace1', tblspc_path = tblspace1_old_path, cfs=True) - src_pg.safe_psql( - "postgres", - "CREATE TABLE ultimate_question TABLESPACE tblspace1 AS SELECT 42 AS answer") - src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") - src_pg.safe_psql( - "postgres", - "CHECKPOINT") - - # do full catchup with tablespace mapping - dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) - tblspace1_new_path = self.get_tblspace_path(dst_pg, 'tblspace1_new') - self.catchup_node( - backup_mode = 'FULL', - source_pgdata = src_pg.data_dir, - destination_node = dst_pg, - options = [ - '-d', 'postgres', - '-p', str(src_pg.port), - '--stream', - '-T', '{0}={1}'.format(tblspace1_old_path, tblspace1_new_path) - ] - ) - - # 1st check: compare data directories - self.compare_pgdata( - self.pgdata_content(src_pg.data_dir), - self.pgdata_content(dst_pg.data_dir) - ) - - # check cfm size - cfms = find_by_extensions([os.path.join(dst_pg.data_dir)], ['.cfm']) - self.assertTrue(cfms, "ERROR: .cfm files not found in backup dir") - for cfm in cfms: - size = os.stat(cfm).st_size - self.assertLessEqual(size, 4096, - "ERROR: {0} is not truncated (has size {1} > 4096)".format( - cfm, size - )) - - # make changes in master tablespace - src_pg.safe_psql( - "postgres", - "UPDATE ultimate_question SET answer = -1") - src_pg.safe_psql( - "postgres", - "CHECKPOINT") - - # run&recover catchup'ed instance - dst_options = {} - dst_options['port'] = str(dst_pg.port) - self.set_auto_conf(dst_pg, dst_options) - dst_pg.slow_start() - - # 2nd check: run verification query - dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") - self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy') - - # and now delta backup - dst_pg.stop() - - self.catchup_node( - backup_mode = 'DELTA', - source_pgdata = src_pg.data_dir, - destination_node = dst_pg, - options = [ - '-d', 'postgres', - '-p', str(src_pg.port), - '--stream', - '-T', '{0}={1}'.format(tblspace1_old_path, tblspace1_new_path) - ] - ) - - # check cfm size again - cfms = find_by_extensions([os.path.join(dst_pg.data_dir)], ['.cfm']) - self.assertTrue(cfms, "ERROR: .cfm files not found in backup dir") - for cfm in cfms: - size = os.stat(cfm).st_size - self.assertLessEqual(size, 4096, - "ERROR: {0} is not truncated (has size {1} > 4096)".format( - cfm, size - )) - - # run&recover catchup'ed instance - dst_options = {} - dst_options['port'] = str(dst_pg.port) - self.set_auto_conf(dst_pg, dst_options) - dst_pg.slow_start() - - - # 3rd check: run verification query - src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") - dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") - self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy') diff --git a/tests/cfs_restore_test.py b/tests/cfs_restore_test.py deleted file mode 100644 index 6b69b4ffe..000000000 --- a/tests/cfs_restore_test.py +++ /dev/null @@ -1,450 +0,0 @@ -""" -restore - Syntax: - - pg_probackup restore -B backupdir --instance instance_name - [-D datadir] - [ -i backup_id | [{--time=time | --xid=xid | --lsn=lsn } [--inclusive=boolean]]][--timeline=timeline] [-T OLDDIR=NEWDIR] - [-j num_threads] [--progress] [-q] [-v] - -""" -import os -import unittest -import shutil - -from .helpers.cfs_helpers import find_by_name -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException - -tblspace_name = 'cfs_tblspace' -tblspace_name_new = 'cfs_tblspace_new' - - -class CfsRestoreBase(ProbackupTest, unittest.TestCase): - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def setUp(self): - self.backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - - self.node = self.make_simple_node( - base_dir="{0}/{1}/node".format(self.module_name, self.fname), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ -# 'ptrack_enable': 'on', - 'cfs_encryption': 'off', - } - ) - - self.init_pb(self.backup_dir) - self.add_instance(self.backup_dir, 'node', self.node) - self.set_archiving(self.backup_dir, 'node', self.node) - - self.node.slow_start() - self.create_tblspace_in_node(self.node, tblspace_name, cfs=True) - - self.add_data_in_cluster() - - self.backup_id = None - try: - self.backup_id = self.backup_node(self.backup_dir, 'node', self.node, backup_type='full') - except ProbackupException as e: - self.fail( - "ERROR: Full backup failed \n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - def add_data_in_cluster(self): - pass - - -class CfsRestoreNoencEmptyTablespaceTest(CfsRestoreBase): - # @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_restore_empty_tablespace_from_fullbackup(self): - """ - Case: Restore empty tablespace from valid full backup. - """ - self.node.stop(["-m", "immediate"]) - self.node.cleanup() - shutil.rmtree(self.get_tblspace_path(self.node, tblspace_name)) - - try: - self.restore_node(self.backup_dir, 'node', self.node, backup_id=self.backup_id) - except ProbackupException as e: - self.fail( - "ERROR: Restore failed. \n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - self.assertTrue( - find_by_name([self.get_tblspace_path(self.node, tblspace_name)], ["pg_compression"]), - "ERROR: Restored data is not valid. pg_compression not found in tablespace dir." - ) - - try: - self.node.slow_start() - except ProbackupException as e: - self.fail( - "ERROR: Instance not started after restore. \n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - tblspace = self.node.safe_psql( - "postgres", - "SELECT * FROM pg_tablespace WHERE spcname='{0}'".format(tblspace_name) - ).decode("UTF-8") - self.assertTrue( - tblspace_name in tblspace and "compression=true" in tblspace, - "ERROR: The tablespace not restored or it restored without compressions" - ) - - -class CfsRestoreNoencTest(CfsRestoreBase): - def add_data_in_cluster(self): - self.node.safe_psql( - "postgres", - 'CREATE TABLE {0} TABLESPACE {1} \ - AS SELECT i AS id, MD5(i::text) AS text, \ - MD5(repeat(i::text,10))::tsvector AS tsvector \ - FROM generate_series(0,1e5) i'.format('t1', tblspace_name) - ) - self.table_t1 = self.node.safe_psql( - "postgres", - "SELECT * FROM t1" - ) - - # --- Restore from full backup ---# - # @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_restore_from_fullbackup_to_old_location(self): - """ - Case: Restore instance from valid full backup to old location. - """ - self.node.stop() - self.node.cleanup() - shutil.rmtree(self.get_tblspace_path(self.node, tblspace_name)) - - try: - self.restore_node(self.backup_dir, 'node', self.node, backup_id=self.backup_id) - except ProbackupException as e: - self.fail( - "ERROR: Restore from full backup failed. \n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - self.assertTrue( - find_by_name([self.get_tblspace_path(self.node, tblspace_name)], ['pg_compression']), - "ERROR: File pg_compression not found in tablespace dir" - ) - try: - self.node.slow_start() - except ProbackupException as e: - self.fail( - "ERROR: Instance not started after restore. \n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - self.assertEqual( - repr(self.node.safe_psql("postgres", "SELECT * FROM %s" % 't1')), - repr(self.table_t1) - ) - - # @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_restore_from_fullbackup_to_old_location_3_jobs(self): - """ - Case: Restore instance from valid full backup to old location. - """ - self.node.stop() - self.node.cleanup() - shutil.rmtree(self.get_tblspace_path(self.node, tblspace_name)) - - try: - self.restore_node(self.backup_dir, 'node', self.node, backup_id=self.backup_id, options=['-j', '3']) - except ProbackupException as e: - self.fail( - "ERROR: Restore from full backup failed. \n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - self.assertTrue( - find_by_name([self.get_tblspace_path(self.node, tblspace_name)], ['pg_compression']), - "ERROR: File pg_compression not found in backup dir" - ) - try: - self.node.slow_start() - except ProbackupException as e: - self.fail( - "ERROR: Instance not started after restore. \n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - self.assertEqual( - repr(self.node.safe_psql("postgres", "SELECT * FROM %s" % 't1')), - repr(self.table_t1) - ) - - # @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_restore_from_fullbackup_to_new_location(self): - """ - Case: Restore instance from valid full backup to new location. - """ - self.node.stop() - self.node.cleanup() - shutil.rmtree(self.get_tblspace_path(self.node, tblspace_name)) - - node_new = self.make_simple_node(base_dir="{0}/{1}/node_new_location".format(self.module_name, self.fname)) - node_new.cleanup() - - try: - self.restore_node(self.backup_dir, 'node', node_new, backup_id=self.backup_id) - self.set_auto_conf(node_new, {'port': node_new.port}) - except ProbackupException as e: - self.fail( - "ERROR: Restore from full backup failed. \n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - self.assertTrue( - find_by_name([self.get_tblspace_path(self.node, tblspace_name)], ['pg_compression']), - "ERROR: File pg_compression not found in backup dir" - ) - try: - node_new.slow_start() - except ProbackupException as e: - self.fail( - "ERROR: Instance not started after restore. \n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - self.assertEqual( - repr(node_new.safe_psql("postgres", "SELECT * FROM %s" % 't1')), - repr(self.table_t1) - ) - node_new.cleanup() - - # @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_restore_from_fullbackup_to_new_location_5_jobs(self): - """ - Case: Restore instance from valid full backup to new location. - """ - self.node.stop() - self.node.cleanup() - shutil.rmtree(self.get_tblspace_path(self.node, tblspace_name)) - - node_new = self.make_simple_node(base_dir="{0}/{1}/node_new_location".format(self.module_name, self.fname)) - node_new.cleanup() - - try: - self.restore_node(self.backup_dir, 'node', node_new, backup_id=self.backup_id, options=['-j', '5']) - self.set_auto_conf(node_new, {'port': node_new.port}) - except ProbackupException as e: - self.fail( - "ERROR: Restore from full backup failed. \n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - self.assertTrue( - find_by_name([self.get_tblspace_path(self.node, tblspace_name)], ['pg_compression']), - "ERROR: File pg_compression not found in backup dir" - ) - try: - node_new.slow_start() - except ProbackupException as e: - self.fail( - "ERROR: Instance not started after restore. \n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - self.assertEqual( - repr(node_new.safe_psql("postgres", "SELECT * FROM %s" % 't1')), - repr(self.table_t1) - ) - node_new.cleanup() - - # @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_restore_from_fullbackup_to_old_location_tablespace_new_location(self): - self.node.stop() - self.node.cleanup() - shutil.rmtree(self.get_tblspace_path(self.node, tblspace_name)) - - os.mkdir(self.get_tblspace_path(self.node, tblspace_name_new)) - - try: - self.restore_node( - self.backup_dir, - 'node', self.node, - backup_id=self.backup_id, - options=["-T", "{0}={1}".format( - self.get_tblspace_path(self.node, tblspace_name), - self.get_tblspace_path(self.node, tblspace_name_new) - ) - ] - ) - except ProbackupException as e: - self.fail( - "ERROR: Restore from full backup failed. \n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - self.assertTrue( - find_by_name([self.get_tblspace_path(self.node, tblspace_name_new)], ['pg_compression']), - "ERROR: File pg_compression not found in new tablespace location" - ) - try: - self.node.slow_start() - except ProbackupException as e: - self.fail( - "ERROR: Instance not started after restore. \n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - self.assertEqual( - repr(self.node.safe_psql("postgres", "SELECT * FROM %s" % 't1')), - repr(self.table_t1) - ) - - # @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_restore_from_fullbackup_to_old_location_tablespace_new_location_3_jobs(self): - self.node.stop() - self.node.cleanup() - shutil.rmtree(self.get_tblspace_path(self.node, tblspace_name)) - - os.mkdir(self.get_tblspace_path(self.node, tblspace_name_new)) - - try: - self.restore_node( - self.backup_dir, - 'node', self.node, - backup_id=self.backup_id, - options=["-j", "3", "-T", "{0}={1}".format( - self.get_tblspace_path(self.node, tblspace_name), - self.get_tblspace_path(self.node, tblspace_name_new) - ) - ] - ) - except ProbackupException as e: - self.fail( - "ERROR: Restore from full backup failed. \n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - self.assertTrue( - find_by_name([self.get_tblspace_path(self.node, tblspace_name_new)], ['pg_compression']), - "ERROR: File pg_compression not found in new tablespace location" - ) - try: - self.node.slow_start() - except ProbackupException as e: - self.fail( - "ERROR: Instance not started after restore. \n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - self.assertEqual( - repr(self.node.safe_psql("postgres", "SELECT * FROM %s" % 't1')), - repr(self.table_t1) - ) - - # @unittest.expectedFailure - @unittest.skip("skip") - def test_restore_from_fullbackup_to_new_location_tablespace_new_location(self): - pass - - # @unittest.expectedFailure - @unittest.skip("skip") - def test_restore_from_fullbackup_to_new_location_tablespace_new_location_5_jobs(self): - pass - - # @unittest.expectedFailure - @unittest.skip("skip") - def test_restore_from_ptrack(self): - """ - Case: Restore from backup to old location - """ - pass - - # @unittest.expectedFailure - @unittest.skip("skip") - def test_restore_from_ptrack_jobs(self): - """ - Case: Restore from backup to old location, four jobs - """ - pass - - # @unittest.expectedFailure - @unittest.skip("skip") - def test_restore_from_ptrack_new_jobs(self): - pass - -# --------------------------------------------------------- # - # @unittest.expectedFailure - @unittest.skip("skip") - def test_restore_from_page(self): - """ - Case: Restore from backup to old location - """ - pass - - # @unittest.expectedFailure - @unittest.skip("skip") - def test_restore_from_page_jobs(self): - """ - Case: Restore from backup to old location, four jobs - """ - pass - - # @unittest.expectedFailure - @unittest.skip("skip") - def test_restore_from_page_new_jobs(self): - """ - Case: Restore from backup to new location, four jobs - """ - pass - - -#class CfsRestoreEncEmptyTablespaceTest(CfsRestoreNoencEmptyTablespaceTest): -# # --- Begin --- # -# def setUp(self): -# os.environ["PG_CIPHER_KEY"] = "super_secret_cipher_key" -# super(CfsRestoreNoencEmptyTablespaceTest, self).setUp() -# -# -#class CfsRestoreEncTest(CfsRestoreNoencTest): -# # --- Begin --- # -# def setUp(self): -# os.environ["PG_CIPHER_KEY"] = "super_secret_cipher_key" -# super(CfsRestoreNoencTest, self).setUp() diff --git a/tests/cfs_validate_backup_test.py b/tests/cfs_validate_backup_test.py deleted file mode 100644 index 343020dfc..000000000 --- a/tests/cfs_validate_backup_test.py +++ /dev/null @@ -1,24 +0,0 @@ -import os -import unittest -import random - -from .helpers.cfs_helpers import find_by_extensions, find_by_name, find_by_pattern, corrupt_file -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException - -tblspace_name = 'cfs_tblspace' - - -class CfsValidateBackupNoenc(ProbackupTest,unittest.TestCase): - def setUp(self): - pass - - def test_validate_fullbackup_empty_tablespace_after_delete_pg_compression(self): - pass - - def tearDown(self): - pass - - -#class CfsValidateBackupNoenc(CfsValidateBackupNoenc): -# os.environ["PG_CIPHER_KEY"] = "super_secret_cipher_key" -# super(CfsValidateBackupNoenc).setUp() diff --git a/tests/checkdb_test.py b/tests/checkdb_test.py deleted file mode 100644 index 2caf4fcb2..000000000 --- a/tests/checkdb_test.py +++ /dev/null @@ -1,849 +0,0 @@ -import os -import unittest -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException -from datetime import datetime, timedelta -import subprocess -from testgres import QueryException -import shutil -import sys -import time - - -class CheckdbTest(ProbackupTest, unittest.TestCase): - - # @unittest.skip("skip") - def test_checkdb_amcheck_only_sanity(self): - """""" - self._check_gdb_flag_or_skip_test() - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir="{0}/{1}/node".format(self.module_name, self.fname), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - "postgres", - "create table t_heap as select i" - " as id from generate_series(0,100) i") - - node.safe_psql( - "postgres", - "create index on t_heap(id)") - - node.safe_psql( - "postgres", - "create table idxpart (a int) " - "partition by range (a)") - - node.safe_psql( - "postgres", - "create index on idxpart(a)") - - try: - node.safe_psql( - "postgres", - "create extension amcheck") - except QueryException as e: - node.safe_psql( - "postgres", - "create extension amcheck_next") - - log_file_path = os.path.join( - backup_dir, 'log', 'pg_probackup.log') - - # simple sanity - try: - self.checkdb_node( - options=['--skip-block-validation']) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because --amcheck option is missing\n" - " Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: Option '--skip-block-validation' must be " - "used with '--amcheck' option", - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - # simple sanity - output = self.checkdb_node( - options=[ - '--amcheck', - '--skip-block-validation', - '-d', 'postgres', '-p', str(node.port)]) - - self.assertIn( - 'INFO: checkdb --amcheck finished successfully', - output) - self.assertIn( - 'All checked indexes are valid', - output) - - # logging to file sanity - try: - self.checkdb_node( - options=[ - '--amcheck', - '--skip-block-validation', - '--log-level-file=verbose', - '-d', 'postgres', '-p', str(node.port)]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because log_directory missing\n" - " Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: Cannot save checkdb logs to a file. " - "You must specify --log-directory option when " - "running checkdb with --log-level-file option enabled", - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - # If backup_dir provided, then instance name must be - # provided too - try: - self.checkdb_node( - backup_dir, - options=[ - '--amcheck', - '--skip-block-validation', - '--log-level-file=verbose', - '-d', 'postgres', '-p', str(node.port)]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because log_directory missing\n" - " Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: required parameter not specified: --instance", - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - # checkdb can use default or set in config values, - # if backup_dir and instance name are provided - self.checkdb_node( - backup_dir, - 'node', - options=[ - '--amcheck', - '--skip-block-validation', - '--log-level-file=verbose', - '-d', 'postgres', '-p', str(node.port)]) - - # check that file present and full of messages - os.path.isfile(log_file_path) - with open(log_file_path) as f: - log_file_content = f.read() - self.assertIn( - 'INFO: checkdb --amcheck finished successfully', - log_file_content) - self.assertIn( - 'VERBOSE: (query)', - log_file_content) - os.unlink(log_file_path) - - # log-level-file and log-directory are provided - self.checkdb_node( - backup_dir, - 'node', - options=[ - '--amcheck', - '--skip-block-validation', - '--log-level-file=verbose', - '--log-directory={0}'.format( - os.path.join(backup_dir, 'log')), - '-d', 'postgres', '-p', str(node.port)]) - - # check that file present and full of messages - os.path.isfile(log_file_path) - with open(log_file_path) as f: - log_file_content = f.read() - self.assertIn( - 'INFO: checkdb --amcheck finished successfully', - log_file_content) - self.assertIn( - 'VERBOSE: (query)', - log_file_content) - os.unlink(log_file_path) - - gdb = self.checkdb_node( - gdb=True, - options=[ - '--amcheck', - '--skip-block-validation', - '--log-level-file=verbose', - '--log-directory={0}'.format( - os.path.join(backup_dir, 'log')), - '-d', 'postgres', '-p', str(node.port)]) - - gdb.set_breakpoint('amcheck_one_index') - gdb.run_until_break() - - node.safe_psql( - "postgres", - "drop table t_heap") - - gdb.remove_all_breakpoints() - - gdb.continue_execution_until_exit() - - # check that message about missing index is present - with open(log_file_path) as f: - log_file_content = f.read() - self.assertIn( - 'ERROR: checkdb --amcheck finished with failure', - log_file_content) - self.assertIn( - "WARNING: Thread [1]. Amcheck failed in database 'postgres' " - "for index: 'public.t_heap_id_idx':", - log_file_content) - self.assertIn( - 'ERROR: could not open relation with OID', - log_file_content) - - # Clean after yourself - gdb.kill() - node.stop() - - # @unittest.skip("skip") - def test_basic_checkdb_amcheck_only_sanity(self): - """""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir="{0}/{1}/node".format(self.module_name, self.fname), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - # create two databases - node.safe_psql("postgres", "create database db1") - try: - node.safe_psql( - "db1", - "create extension amcheck") - except QueryException as e: - node.safe_psql( - "db1", - "create extension amcheck_next") - - node.safe_psql("postgres", "create database db2") - try: - node.safe_psql( - "db2", - "create extension amcheck") - except QueryException as e: - node.safe_psql( - "db2", - "create extension amcheck_next") - - # init pgbench in two databases and corrupt both indexes - node.pgbench_init(scale=5, dbname='db1') - node.pgbench_init(scale=5, dbname='db2') - - node.safe_psql( - "db2", - "alter index pgbench_accounts_pkey rename to some_index") - - index_path_1 = os.path.join( - node.data_dir, - node.safe_psql( - "db1", - "select pg_relation_filepath('pgbench_accounts_pkey')").decode('utf-8').rstrip()) - - index_path_2 = os.path.join( - node.data_dir, - node.safe_psql( - "db2", - "select pg_relation_filepath('some_index')").decode('utf-8').rstrip()) - - try: - self.checkdb_node( - options=[ - '--amcheck', - '--skip-block-validation', - '-d', 'postgres', '-p', str(node.port)]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because some db was not amchecked" - " Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: Some databases were not amchecked", - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - node.stop() - - # Let`s do index corruption - with open(index_path_1, "rb+", 0) as f: - f.seek(42000) - f.write(b"blablahblahs") - f.flush() - f.close - - with open(index_path_2, "rb+", 0) as f: - f.seek(42000) - f.write(b"blablahblahs") - f.flush() - f.close - - node.slow_start() - - log_file_path = os.path.join( - backup_dir, 'log', 'pg_probackup.log') - - try: - self.checkdb_node( - options=[ - '--amcheck', - '--skip-block-validation', - '--log-level-file=verbose', - '--log-directory={0}'.format( - os.path.join(backup_dir, 'log')), - '-d', 'postgres', '-p', str(node.port)]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because some db was not amchecked" - " Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: checkdb --amcheck finished with failure", - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - # corruption of both indexes in db1 and db2 must be detected - # also the that amcheck is not installed in 'postgres' - # should be logged - with open(log_file_path) as f: - log_file_content = f.read() - self.assertIn( - "WARNING: Thread [1]. Amcheck failed in database 'db1' " - "for index: 'public.pgbench_accounts_pkey':", - log_file_content) - - self.assertIn( - "WARNING: Thread [1]. Amcheck failed in database 'db2' " - "for index: 'public.some_index':", - log_file_content) - - self.assertIn( - "ERROR: checkdb --amcheck finished with failure", - log_file_content) - - # Clean after yourself - node.stop() - - # @unittest.skip("skip") - def test_checkdb_block_validation_sanity(self): - """make node, corrupt some pages, check that checkdb failed""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - "postgres", - "create table t_heap as select 1 as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,1000) i") - node.safe_psql( - "postgres", - "CHECKPOINT;") - - heap_path = node.safe_psql( - "postgres", - "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip() - - # sanity - try: - self.checkdb_node() - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because pgdata must be specified\n" - " Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: required parameter not specified: PGDATA (-D, --pgdata)", - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - self.checkdb_node( - data_dir=node.data_dir, - options=['-d', 'postgres', '-p', str(node.port)]) - - self.checkdb_node( - backup_dir, 'node', - options=['-d', 'postgres', '-p', str(node.port)]) - - heap_full_path = os.path.join(node.data_dir, heap_path) - - with open(heap_full_path, "rb+", 0) as f: - f.seek(9000) - f.write(b"bla") - f.flush() - f.close - - with open(heap_full_path, "rb+", 0) as f: - f.seek(42000) - f.write(b"bla") - f.flush() - f.close - - try: - self.checkdb_node( - backup_dir, 'node', - options=['-d', 'postgres', '-p', str(node.port)]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because of data corruption\n" - " Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: Checkdb failed", - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - self.assertIn( - 'WARNING: Corruption detected in file "{0}", block 1'.format( - os.path.normpath(heap_full_path)), - e.message) - - self.assertIn( - 'WARNING: Corruption detected in file "{0}", block 5'.format( - os.path.normpath(heap_full_path)), - e.message) - - # Clean after yourself - node.stop() - - def test_checkdb_checkunique(self): - """Test checkunique parameter of amcheck.bt_index_check function""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - node.slow_start() - - try: - node.safe_psql( - "postgres", - "create extension amcheck") - except QueryException as e: - node.safe_psql( - "postgres", - "create extension amcheck_next") - - # Part of https://commitfest.postgresql.org/32/2976/ patch test - node.safe_psql( - "postgres", - "CREATE TABLE bttest_unique(a varchar(50), b varchar(1500), c bytea, d varchar(50)); " - "ALTER TABLE bttest_unique SET (autovacuum_enabled = false); " - "CREATE UNIQUE INDEX bttest_unique_idx ON bttest_unique(a,b); " - "UPDATE pg_catalog.pg_index SET indisunique = false " - "WHERE indrelid = (SELECT oid FROM pg_catalog.pg_class WHERE relname = 'bttest_unique'); " - "INSERT INTO bttest_unique " - " SELECT i::text::varchar, " - " array_to_string(array( " - " SELECT substr('ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789', ((random()*(36-1)+1)::integer), 1) " - " FROM generate_series(1,1300)),'')::varchar, " - " i::text::bytea, i::text::varchar " - " FROM generate_series(0,1) AS i, generate_series(0,30) AS x; " - "UPDATE pg_catalog.pg_index SET indisunique = true " - "WHERE indrelid = (SELECT oid FROM pg_catalog.pg_class WHERE relname = 'bttest_unique'); " - "DELETE FROM bttest_unique WHERE ctid::text='(0,2)'; " - "DELETE FROM bttest_unique WHERE ctid::text='(4,2)'; " - "DELETE FROM bttest_unique WHERE ctid::text='(4,3)'; " - "DELETE FROM bttest_unique WHERE ctid::text='(9,3)';") - - # run without checkunique option (error will not detected) - output = self.checkdb_node( - options=[ - '--amcheck', - '--skip-block-validation', - '-d', 'postgres', '-p', str(node.port)]) - - self.assertIn( - 'INFO: checkdb --amcheck finished successfully', - output) - self.assertIn( - 'All checked indexes are valid', - output) - - # run with checkunique option - try: - self.checkdb_node( - options=[ - '--amcheck', - '--skip-block-validation', - '--checkunique', - '-d', 'postgres', '-p', str(node.port)]) - if (ProbackupTest.enterprise and - (self.get_version(node) >= 111300 and self.get_version(node) < 120000 - or self.get_version(node) >= 120800 and self.get_version(node) < 130000 - or self.get_version(node) >= 130400)): - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because of index corruption\n" - " Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - else: - self.assertRegex( - self.output, - r"WARNING: Extension 'amcheck(|_next)' version [\d.]* in schema 'public' do not support 'checkunique' parameter") - except ProbackupException as e: - self.assertIn( - "ERROR: checkdb --amcheck finished with failure. Not all checked indexes are valid. All databases were amchecked.", - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - self.assertIn( - "Amcheck failed in database 'postgres' for index: 'public.bttest_unique_idx': ERROR: index \"bttest_unique_idx\" is corrupted. There are tuples violating UNIQUE constraint", - e.message) - - # Clean after yourself - node.stop() - - # @unittest.skip("skip") - def test_checkdb_sigint_handling(self): - """""" - self._check_gdb_flag_or_skip_test() - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - try: - node.safe_psql( - "postgres", - "create extension amcheck") - except QueryException as e: - node.safe_psql( - "postgres", - "create extension amcheck_next") - - # FULL backup - gdb = self.checkdb_node( - backup_dir, 'node', gdb=True, - options=[ - '-d', 'postgres', '-j', '2', - '--skip-block-validation', - '--progress', - '--amcheck', '-p', str(node.port)]) - - gdb.set_breakpoint('amcheck_one_index') - gdb.run_until_break() - - gdb.continue_execution_until_break(20) - gdb.remove_all_breakpoints() - - gdb._execute('signal SIGINT') - gdb.continue_execution_until_error() - - with open(node.pg_log_file, 'r') as f: - output = f.read() - - self.assertNotIn('could not receive data from client', output) - self.assertNotIn('could not send data to client', output) - self.assertNotIn('connection to client lost', output) - - # Clean after yourself - gdb.kill() - node.stop() - - # @unittest.skip("skip") - def test_checkdb_with_least_privileges(self): - """""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - 'postgres', - 'CREATE DATABASE backupdb') - - try: - node.safe_psql( - "backupdb", - "create extension amcheck") - except QueryException as e: - node.safe_psql( - "backupdb", - "create extension amcheck_next") - - node.safe_psql( - 'backupdb', - "REVOKE ALL ON DATABASE backupdb from PUBLIC; " - "REVOKE ALL ON SCHEMA public from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC;") - - # PG 9.5 - if self.get_version(node) < 90600: - node.safe_psql( - 'backupdb', - 'CREATE ROLE backup WITH LOGIN; ' - 'GRANT CONNECT ON DATABASE backupdb to backup; ' - 'GRANT USAGE ON SCHEMA pg_catalog TO backup; ' - 'GRANT USAGE ON SCHEMA public TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_am TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_class TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_index TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_namespace TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.texteq(text, text) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.namene(name, name) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.int8(integer) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.charne("char", "char") TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.string_to_array(text, text) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.array_position(anyarray, anyelement) TO backup; ' - 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool) TO backup;') # amcheck-next function - - # PG 9.6 - elif self.get_version(node) > 90600 and self.get_version(node) < 100000: - node.safe_psql( - 'backupdb', - 'CREATE ROLE backup WITH LOGIN; ' - 'GRANT CONNECT ON DATABASE backupdb to backup; ' - 'GRANT USAGE ON SCHEMA pg_catalog TO backup; ' - 'GRANT USAGE ON SCHEMA public TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_am TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_class TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_index TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_namespace TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.texteq(text, text) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.namene(name, name) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.int8(integer) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.charne("char", "char") TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.string_to_array(text, text) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.array_position(anyarray, anyelement) TO backup; ' -# 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass) TO backup; ' - 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool) TO backup;') - - # PG 10 - elif self.get_version(node) > 100000 and self.get_version(node) < 110000: - node.safe_psql( - 'backupdb', - 'CREATE ROLE backup WITH LOGIN; ' - 'GRANT CONNECT ON DATABASE backupdb to backup; ' - 'GRANT USAGE ON SCHEMA pg_catalog TO backup; ' - 'GRANT USAGE ON SCHEMA public TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_am TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_class TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_index TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_namespace TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.texteq(text, text) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.namene(name, name) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.int8(integer) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.charne("char", "char") TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.string_to_array(text, text) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.array_position(anyarray, anyelement) TO backup;' - 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass) TO backup;') - - if ProbackupTest.enterprise: - # amcheck-1.1 - node.safe_psql( - 'backupdb', - 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool) TO backup') - else: - # amcheck-1.0 - node.safe_psql( - 'backupdb', - 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass) TO backup') - # >= 11 < 14 - elif self.get_version(node) > 110000 and self.get_version(node) < 140000: - node.safe_psql( - 'backupdb', - 'CREATE ROLE backup WITH LOGIN; ' - 'GRANT CONNECT ON DATABASE backupdb to backup; ' - 'GRANT USAGE ON SCHEMA pg_catalog TO backup; ' - 'GRANT USAGE ON SCHEMA public TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_am TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_class TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_index TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_namespace TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.texteq(text, text) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.namene(name, name) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.int8(integer) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.charne("char", "char") TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.string_to_array(text, text) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.array_position(anyarray, anyelement) TO backup; ' - 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass) TO backup; ' - 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool) TO backup;') - - # checkunique parameter - if ProbackupTest.enterprise: - if (self.get_version(node) >= 111300 and self.get_version(node) < 120000 - or self.get_version(node) >= 120800 and self.get_version(node) < 130000 - or self.get_version(node) >= 130400): - node.safe_psql( - "backupdb", - "GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool, bool) TO backup") - # >= 14 - else: - node.safe_psql( - 'backupdb', - 'CREATE ROLE backup WITH LOGIN; ' - 'GRANT CONNECT ON DATABASE backupdb to backup; ' - 'GRANT USAGE ON SCHEMA pg_catalog TO backup; ' - 'GRANT USAGE ON SCHEMA public TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_am TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_class TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_index TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_namespace TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.texteq(text, text) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.namene(name, name) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.int8(integer) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.charne("char", "char") TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.string_to_array(text, text) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.array_position(anycompatiblearray, anycompatible) TO backup; ' - 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass) TO backup; ' - 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool) TO backup;') - - # checkunique parameter - if ProbackupTest.enterprise: - node.safe_psql( - "backupdb", - "GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool, bool) TO backup") - - if ProbackupTest.enterprise: - node.safe_psql( - 'backupdb', - 'GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;') - - # checkdb - try: - self.checkdb_node( - backup_dir, 'node', - options=[ - '--amcheck', '-U', 'backup', - '-d', 'backupdb', '-p', str(node.port)]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because permissions are missing\n" - " Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "INFO: Amcheck succeeded for database 'backupdb'", - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - self.assertIn( - "WARNING: Extension 'amcheck' or 'amcheck_next' are " - "not installed in database postgres", - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - self.assertIn( - "ERROR: Some databases were not amchecked", - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - # Clean after yourself - node.stop() diff --git a/tests/compatibility_test.py b/tests/compatibility_test.py deleted file mode 100644 index 591afb069..000000000 --- a/tests/compatibility_test.py +++ /dev/null @@ -1,1500 +0,0 @@ -import unittest -import subprocess -import os -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException -from sys import exit -import shutil - - -def check_manual_tests_enabled(): - return 'PGPROBACKUP_MANUAL' in os.environ and os.environ['PGPROBACKUP_MANUAL'] == 'ON' - - -def check_ssh_agent_path_exists(): - return 'PGPROBACKUP_SSH_AGENT_PATH' in os.environ - - -class CompatibilityTest(ProbackupTest, unittest.TestCase): - - def setUp(self): - self.fname = self.id().split('.')[3] - - # @unittest.expectedFailure - @unittest.skipUnless(check_manual_tests_enabled(), 'skip manual test') - @unittest.skipUnless(check_ssh_agent_path_exists(), 'skip no ssh agent path exist') - # @unittest.skip("skip") - def test_catchup_with_different_remote_major_pg(self): - """ - Decription in jira issue PBCKP-236 - This test exposures ticket error using pg_probackup builds for both PGPROEE11 and PGPROEE9_6 - - Prerequisites: - - pg_probackup git tag for PBCKP 2.5.1 - - master pg_probackup build should be made for PGPROEE11 - - agent pg_probackup build should be made for PGPROEE9_6 - - Calling probackup PGPROEE9_6 pg_probackup agent from PGPROEE11 pg_probackup master for DELTA backup causes - the PBCKP-236 problem - - Please give env variables PROBACKUP_MANUAL=ON;PGPROBACKUP_SSH_AGENT_PATH= - for the test - - Please make path for agent's pgprobackup_ssh_agent_path = '/home/avaness/postgres/postgres.build.ee.9.6/bin/' - without pg_probackup executable - """ - - self.verbose = True - self.remote = True - # please use your own local path like - # pgprobackup_ssh_agent_path = '/home/avaness/postgres/postgres.build.clean/bin/' - pgprobackup_ssh_agent_path = os.environ['PGPROBACKUP_SSH_AGENT_PATH'] - - src_pg = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'src'), - set_replication=True, - ) - src_pg.slow_start() - src_pg.safe_psql( - "postgres", - "CREATE TABLE ultimate_question AS SELECT 42 AS answer") - - # do full catchup - dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) - self.catchup_node( - backup_mode='FULL', - source_pgdata=src_pg.data_dir, - destination_node=dst_pg, - options=['-d', 'postgres', '-p', str(src_pg.port), '--stream'] - ) - - dst_options = {'port': str(dst_pg.port)} - self.set_auto_conf(dst_pg, dst_options) - dst_pg.slow_start() - dst_pg.stop() - - src_pg.safe_psql( - "postgres", - "CREATE TABLE ultimate_question2 AS SELECT 42 AS answer") - - # do delta catchup with remote pg_probackup agent with another postgres major version - # this DELTA backup should fail without PBCKP-236 patch. - self.catchup_node( - backup_mode='DELTA', - source_pgdata=src_pg.data_dir, - destination_node=dst_pg, - # here's substitution of --remoge-path pg_probackup agent compiled with another postgres version - options=['-d', 'postgres', '-p', str(src_pg.port), '--stream', '--remote-path=' + pgprobackup_ssh_agent_path] - ) - - # @unittest.expectedFailure - # @unittest.skip("skip") - def test_backward_compatibility_page(self): - """Description in jira issue PGPRO-434""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir, old_binary=True) - self.show_pb(backup_dir) - - self.add_instance(backup_dir, 'node', node, old_binary=True) - self.show_pb(backup_dir) - - self.set_archiving(backup_dir, 'node', node, old_binary=True) - node.slow_start() - - node.pgbench_init(scale=10) - - # FULL backup with old binary - self.backup_node( - backup_dir, 'node', node, old_binary=True) - - if self.paranoia: - pgdata = self.pgdata_content(node.data_dir) - - self.show_pb(backup_dir) - - self.validate_pb(backup_dir) - - # RESTORE old FULL with new binary - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - - node_restored.cleanup() - - self.restore_node( - backup_dir, 'node', node_restored, options=["-j", "4"]) - - if self.paranoia: - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # Page BACKUP with old binary - pgbench = node.pgbench( - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - options=["-c", "4", "-T", "20"] - ) - pgbench.wait() - pgbench.stdout.close() - - self.backup_node( - backup_dir, 'node', node, backup_type='page', - old_binary=True) - - if self.paranoia: - pgdata = self.pgdata_content(node.data_dir) - - node_restored.cleanup() - self.restore_node( - backup_dir, 'node', node_restored, options=["-j", "4"]) - - if self.paranoia: - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # Page BACKUP with new binary - pgbench = node.pgbench( - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - options=["-c", "4", "-T", "20"]) - - pgbench.wait() - pgbench.stdout.close() - - self.backup_node( - backup_dir, 'node', node, backup_type='page') - - if self.paranoia: - pgdata = self.pgdata_content(node.data_dir) - - node_restored.cleanup() - - self.restore_node( - backup_dir, 'node', node_restored, options=["-j", "4"]) - - if self.paranoia: - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - node.safe_psql( - 'postgres', - 'create table tmp as select * from pgbench_accounts where aid < 1000') - - node.safe_psql( - 'postgres', - 'delete from pgbench_accounts') - - node.safe_psql( - 'postgres', - 'VACUUM') - - self.backup_node(backup_dir, 'node', node, backup_type='page') - - pgdata = self.pgdata_content(node.data_dir) - - node_restored.cleanup() - self.restore_node( - backup_dir, 'node', node_restored, options=["-j", "4"]) - - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - node.safe_psql( - 'postgres', - 'insert into pgbench_accounts select * from pgbench_accounts') - - self.backup_node(backup_dir, 'node', node, backup_type='page') - - pgdata = self.pgdata_content(node.data_dir) - - node_restored.cleanup() - self.restore_node( - backup_dir, 'node', node_restored, options=["-j", "4"]) - - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.expectedFailure - # @unittest.skip("skip") - def test_backward_compatibility_delta(self): - """Description in jira issue PGPRO-434""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir, old_binary=True) - self.show_pb(backup_dir) - - self.add_instance(backup_dir, 'node', node, old_binary=True) - self.show_pb(backup_dir) - - self.set_archiving(backup_dir, 'node', node, old_binary=True) - node.slow_start() - - node.pgbench_init(scale=10) - - # FULL backup with old binary - self.backup_node( - backup_dir, 'node', node, old_binary=True) - - if self.paranoia: - pgdata = self.pgdata_content(node.data_dir) - - self.show_pb(backup_dir) - - self.validate_pb(backup_dir) - - # RESTORE old FULL with new binary - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - - node_restored.cleanup() - - self.restore_node( - backup_dir, 'node', node_restored, options=["-j", "4"]) - - if self.paranoia: - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # Delta BACKUP with old binary - pgbench = node.pgbench( - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - options=["-c", "4", "-T", "20"] - ) - pgbench.wait() - pgbench.stdout.close() - - self.backup_node( - backup_dir, 'node', node, backup_type='delta', - old_binary=True) - - if self.paranoia: - pgdata = self.pgdata_content(node.data_dir) - - node_restored.cleanup() - self.restore_node( - backup_dir, 'node', node_restored, options=["-j", "4"]) - - if self.paranoia: - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # Delta BACKUP with new binary - pgbench = node.pgbench( - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - options=["-c", "4", "-T", "20"] - ) - pgbench.wait() - pgbench.stdout.close() - - self.backup_node(backup_dir, 'node', node, backup_type='delta') - - if self.paranoia: - pgdata = self.pgdata_content(node.data_dir) - - node_restored.cleanup() - - self.restore_node( - backup_dir, 'node', node_restored, options=["-j", "4"]) - - if self.paranoia: - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - node.safe_psql( - 'postgres', - 'create table tmp as select * from pgbench_accounts where aid < 1000') - - node.safe_psql( - 'postgres', - 'delete from pgbench_accounts') - - node.safe_psql( - 'postgres', - 'VACUUM') - - self.backup_node(backup_dir, 'node', node, backup_type='delta') - - pgdata = self.pgdata_content(node.data_dir) - - node_restored.cleanup() - self.restore_node( - backup_dir, 'node', node_restored, options=["-j", "4"]) - - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - node.safe_psql( - 'postgres', - 'insert into pgbench_accounts select * from pgbench_accounts') - - self.backup_node(backup_dir, 'node', node, backup_type='delta') - - pgdata = self.pgdata_content(node.data_dir) - - node_restored.cleanup() - self.restore_node( - backup_dir, 'node', node_restored, options=["-j", "4"]) - - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.expectedFailure - # @unittest.skip("skip") - def test_backward_compatibility_ptrack(self): - """Description in jira issue PGPRO-434""" - - if not self.ptrack: - self.skipTest('Skipped because ptrack support is disabled') - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir, old_binary=True) - self.show_pb(backup_dir) - - self.add_instance(backup_dir, 'node', node, old_binary=True) - self.show_pb(backup_dir) - - self.set_archiving(backup_dir, 'node', node, old_binary=True) - node.slow_start() - - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") - - node.pgbench_init(scale=10) - - # FULL backup with old binary - self.backup_node( - backup_dir, 'node', node, old_binary=True) - - if self.paranoia: - pgdata = self.pgdata_content(node.data_dir) - - self.show_pb(backup_dir) - - self.validate_pb(backup_dir) - - # RESTORE old FULL with new binary - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - - node_restored.cleanup() - - self.restore_node( - backup_dir, 'node', node_restored, options=["-j", "4"]) - - if self.paranoia: - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # ptrack BACKUP with old binary - pgbench = node.pgbench( - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - options=["-c", "4", "-T", "20"] - ) - pgbench.wait() - pgbench.stdout.close() - - self.backup_node( - backup_dir, 'node', node, backup_type='ptrack', - old_binary=True) - - if self.paranoia: - pgdata = self.pgdata_content(node.data_dir) - - node_restored.cleanup() - self.restore_node( - backup_dir, 'node', node_restored, - options=[ - "-j", "4", - "--recovery-target=latest", - "--recovery-target-action=promote"]) - - if self.paranoia: - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # Ptrack BACKUP with new binary - pgbench = node.pgbench( - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - options=["-c", "4", "-T", "20"] - ) - pgbench.wait() - pgbench.stdout.close() - - self.backup_node( - backup_dir, 'node', node, backup_type='ptrack') - - if self.paranoia: - pgdata = self.pgdata_content(node.data_dir) - - node_restored.cleanup() - - self.restore_node( - backup_dir, 'node', node_restored, - options=[ - "-j", "4", - "--recovery-target=latest", - "--recovery-target-action=promote"]) - - if self.paranoia: - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.expectedFailure - # @unittest.skip("skip") - def test_backward_compatibility_compression(self): - """Description in jira issue PGPRO-434""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir, old_binary=True) - self.add_instance(backup_dir, 'node', node, old_binary=True) - - self.set_archiving(backup_dir, 'node', node, old_binary=True) - node.slow_start() - - node.pgbench_init(scale=10) - - # FULL backup with OLD binary - backup_id = self.backup_node( - backup_dir, 'node', node, - old_binary=True, - options=['--compress']) - - if self.paranoia: - pgdata = self.pgdata_content(node.data_dir) - - # restore OLD FULL with new binary - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - - node_restored.cleanup() - - self.restore_node( - backup_dir, 'node', node_restored, - options=["-j", "4"]) - - if self.paranoia: - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # PAGE backup with OLD binary - pgbench = node.pgbench( - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - options=["-c", "4", "-T", "10"]) - pgbench.wait() - pgbench.stdout.close() - - self.backup_node( - backup_dir, 'node', node, - backup_type='page', - old_binary=True, - options=['--compress']) - - if self.paranoia: - pgdata = self.pgdata_content(node.data_dir) - - node_restored.cleanup() - self.restore_node( - backup_dir, 'node', node_restored, - options=["-j", "4"]) - - if self.paranoia: - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # PAGE backup with new binary - pgbench = node.pgbench( - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - options=["-c", "4", "-T", "10"]) - pgbench.wait() - pgbench.stdout.close() - - self.backup_node( - backup_dir, 'node', node, - backup_type='page', - options=['--compress']) - - if self.paranoia: - pgdata = self.pgdata_content(node.data_dir) - - node_restored.cleanup() - - self.restore_node( - backup_dir, 'node', node_restored, - options=["-j", "4"]) - - if self.paranoia: - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # Delta backup with old binary - self.delete_pb(backup_dir, 'node', backup_id) - - self.backup_node( - backup_dir, 'node', node, - old_binary=True, - options=['--compress']) - - pgbench = node.pgbench( - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - options=["-c", "4", "-T", "10"]) - - pgbench.wait() - pgbench.stdout.close() - - self.backup_node( - backup_dir, 'node', node, - backup_type='delta', - options=['--compress'], - old_binary=True) - - if self.paranoia: - pgdata = self.pgdata_content(node.data_dir) - - node_restored.cleanup() - - self.restore_node( - backup_dir, 'node', node_restored, - options=["-j", "4"]) - - if self.paranoia: - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # Delta backup with new binary - pgbench = node.pgbench( - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - options=["-c", "4", "-T", "10"]) - - pgbench.wait() - pgbench.stdout.close() - - self.backup_node( - backup_dir, 'node', node, - backup_type='delta', - options=['--compress']) - - if self.paranoia: - pgdata = self.pgdata_content(node.data_dir) - - node_restored.cleanup() - - self.restore_node( - backup_dir, 'node', node_restored, - options=["-j", "4"]) - - if self.paranoia: - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.expectedFailure - # @unittest.skip("skip") - def test_backward_compatibility_merge(self): - """ - Create node, take FULL and PAGE backups with old binary, - merge them with new binary - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir, old_binary=True) - self.add_instance(backup_dir, 'node', node, old_binary=True) - - self.set_archiving(backup_dir, 'node', node, old_binary=True) - node.slow_start() - - # FULL backup with OLD binary - self.backup_node( - backup_dir, 'node', node, - old_binary=True) - - node.pgbench_init(scale=1) - - # PAGE backup with OLD binary - backup_id = self.backup_node( - backup_dir, 'node', node, - backup_type='page', old_binary=True) - - if self.paranoia: - pgdata = self.pgdata_content(node.data_dir) - - self.merge_backup(backup_dir, "node", backup_id) - - self.show_pb(backup_dir, as_text=True, as_json=False) - - # restore OLD FULL with new binary - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - - node_restored.cleanup() - - self.restore_node( - backup_dir, 'node', node_restored, options=["-j", "4"]) - - if self.paranoia: - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.expectedFailure - # @unittest.skip("skip") - def test_backward_compatibility_merge_1(self): - """ - Create node, take FULL and PAGE backups with old binary, - merge them with new binary. - old binary version =< 2.2.7 - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir, old_binary=True) - self.add_instance(backup_dir, 'node', node, old_binary=True) - - self.set_archiving(backup_dir, 'node', node, old_binary=True) - node.slow_start() - - node.pgbench_init(scale=20) - - # FULL backup with OLD binary - self.backup_node(backup_dir, 'node', node, old_binary=True) - - pgbench = node.pgbench( - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - options=["-c", "1", "-T", "10", "--no-vacuum"]) - pgbench.wait() - pgbench.stdout.close() - - # PAGE1 backup with OLD binary - self.backup_node( - backup_dir, 'node', node, backup_type='page', old_binary=True) - - node.safe_psql( - 'postgres', - 'DELETE from pgbench_accounts') - - node.safe_psql( - 'postgres', - 'VACUUM pgbench_accounts') - - # PAGE2 backup with OLD binary - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type='page', old_binary=True) - - pgdata = self.pgdata_content(node.data_dir) - - # merge chain created by old binary with new binary - output = self.merge_backup(backup_dir, "node", backup_id) - - # check that in-place is disabled - self.assertIn( - "WARNING: In-place merge is disabled " - "because of storage format incompatibility", output) - - # restore merged backup - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - node_restored.cleanup() - - self.restore_node(backup_dir, 'node', node_restored) - - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.expectedFailure - # @unittest.skip("skip") - def test_backward_compatibility_merge_2(self): - """ - Create node, take FULL and PAGE backups with old binary, - merge them with new binary. - old binary version =< 2.2.7 - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir, old_binary=True) - self.add_instance(backup_dir, 'node', node, old_binary=True) - - self.set_archiving(backup_dir, 'node', node, old_binary=True) - node.slow_start() - - node.pgbench_init(scale=50) - - node.safe_psql( - 'postgres', - 'VACUUM pgbench_accounts') - - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - - # FULL backup with OLD binary - self.backup_node(backup_dir, 'node', node, old_binary=True) - - pgbench = node.pgbench( - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - options=["-c", "1", "-T", "10", "--no-vacuum"]) - pgbench.wait() - pgbench.stdout.close() - - # PAGE1 backup with OLD binary - page1 = self.backup_node( - backup_dir, 'node', node, - backup_type='page', old_binary=True) - - pgdata1 = self.pgdata_content(node.data_dir) - - node.safe_psql( - 'postgres', - "DELETE from pgbench_accounts where ctid > '(10,1)'") - - # PAGE2 backup with OLD binary - page2 = self.backup_node( - backup_dir, 'node', node, - backup_type='page', old_binary=True) - - pgdata2 = self.pgdata_content(node.data_dir) - - # PAGE3 backup with OLD binary - page3 = self.backup_node( - backup_dir, 'node', node, - backup_type='page', old_binary=True) - - pgdata3 = self.pgdata_content(node.data_dir) - - pgbench = node.pgbench( - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - options=["-c", "1", "-T", "10", "--no-vacuum"]) - pgbench.wait() - pgbench.stdout.close() - - # PAGE4 backup with NEW binary - page4 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - pgdata4 = self.pgdata_content(node.data_dir) - - # merge backups one by one and check data correctness - # merge PAGE1 - self.merge_backup( - backup_dir, "node", page1, options=['--log-level-file=VERBOSE']) - - # check data correctness for PAGE1 - node_restored.cleanup() - self.restore_node( - backup_dir, 'node', node_restored, backup_id=page1, - options=['--log-level-file=VERBOSE']) - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata1, pgdata_restored) - - # merge PAGE2 - self.merge_backup(backup_dir, "node", page2) - - # check data correctness for PAGE2 - node_restored.cleanup() - self.restore_node(backup_dir, 'node', node_restored, backup_id=page2) - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata2, pgdata_restored) - - # merge PAGE3 - self.show_pb(backup_dir, 'node', page3) - self.merge_backup(backup_dir, "node", page3) - self.show_pb(backup_dir, 'node', page3) - - # check data correctness for PAGE3 - node_restored.cleanup() - self.restore_node(backup_dir, 'node', node_restored, backup_id=page3) - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata3, pgdata_restored) - - # merge PAGE4 - self.merge_backup(backup_dir, "node", page4) - - # check data correctness for PAGE4 - node_restored.cleanup() - self.restore_node(backup_dir, 'node', node_restored, backup_id=page4) - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata4, pgdata_restored) - - # @unittest.expectedFailure - # @unittest.skip("skip") - def test_backward_compatibility_merge_3(self): - """ - Create node, take FULL and PAGE backups with old binary, - merge them with new binary. - old binary version =< 2.2.7 - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir, old_binary=True) - self.add_instance(backup_dir, 'node', node, old_binary=True) - - self.set_archiving(backup_dir, 'node', node, old_binary=True) - node.slow_start() - - node.pgbench_init(scale=50) - - node.safe_psql( - 'postgres', - 'VACUUM pgbench_accounts') - - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - - # FULL backup with OLD binary - self.backup_node( - backup_dir, 'node', node, old_binary=True, options=['--compress']) - - pgbench = node.pgbench( - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - options=["-c", "1", "-T", "10", "--no-vacuum"]) - pgbench.wait() - pgbench.stdout.close() - - # PAGE1 backup with OLD binary - page1 = self.backup_node( - backup_dir, 'node', node, - backup_type='page', old_binary=True, options=['--compress']) - - pgdata1 = self.pgdata_content(node.data_dir) - - node.safe_psql( - 'postgres', - "DELETE from pgbench_accounts where ctid > '(10,1)'") - - # PAGE2 backup with OLD binary - page2 = self.backup_node( - backup_dir, 'node', node, - backup_type='page', old_binary=True, options=['--compress']) - - pgdata2 = self.pgdata_content(node.data_dir) - - # PAGE3 backup with OLD binary - page3 = self.backup_node( - backup_dir, 'node', node, - backup_type='page', old_binary=True, options=['--compress']) - - pgdata3 = self.pgdata_content(node.data_dir) - - pgbench = node.pgbench( - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - options=["-c", "1", "-T", "10", "--no-vacuum"]) - pgbench.wait() - pgbench.stdout.close() - - # PAGE4 backup with NEW binary - page4 = self.backup_node( - backup_dir, 'node', node, backup_type='page', options=['--compress']) - pgdata4 = self.pgdata_content(node.data_dir) - - # merge backups one by one and check data correctness - # merge PAGE1 - self.merge_backup( - backup_dir, "node", page1, options=['--log-level-file=VERBOSE']) - - # check data correctness for PAGE1 - node_restored.cleanup() - self.restore_node( - backup_dir, 'node', node_restored, backup_id=page1, - options=['--log-level-file=VERBOSE']) - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata1, pgdata_restored) - - # merge PAGE2 - self.merge_backup(backup_dir, "node", page2) - - # check data correctness for PAGE2 - node_restored.cleanup() - self.restore_node(backup_dir, 'node', node_restored, backup_id=page2) - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata2, pgdata_restored) - - # merge PAGE3 - self.show_pb(backup_dir, 'node', page3) - self.merge_backup(backup_dir, "node", page3) - self.show_pb(backup_dir, 'node', page3) - - # check data correctness for PAGE3 - node_restored.cleanup() - self.restore_node(backup_dir, 'node', node_restored, backup_id=page3) - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata3, pgdata_restored) - - # merge PAGE4 - self.merge_backup(backup_dir, "node", page4) - - # check data correctness for PAGE4 - node_restored.cleanup() - self.restore_node(backup_dir, 'node', node_restored, backup_id=page4) - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata4, pgdata_restored) - - # @unittest.expectedFailure - # @unittest.skip("skip") - def test_backward_compatibility_merge_4(self): - """ - Start merge between minor version, crash and retry it. - old binary version =< 2.4.0 - """ - if self.version_to_num(self.old_probackup_version) > self.version_to_num('2.4.0'): - self.assertTrue( - False, 'You need pg_probackup old_binary =< 2.4.0 for this test') - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir, old_binary=True) - self.add_instance(backup_dir, 'node', node, old_binary=True) - - self.set_archiving(backup_dir, 'node', node, old_binary=True) - node.slow_start() - - node.pgbench_init(scale=20) - - node.safe_psql( - 'postgres', - 'VACUUM pgbench_accounts') - - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - - # FULL backup with OLD binary - self.backup_node( - backup_dir, 'node', node, old_binary=True, options=['--compress']) - - pgbench = node.pgbench( - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - options=["-c", "1", "-T", "20", "--no-vacuum"]) - pgbench.wait() - pgbench.stdout.close() - - # PAGE backup with NEW binary - page_id = self.backup_node( - backup_dir, 'node', node, backup_type='page', options=['--compress']) - pgdata = self.pgdata_content(node.data_dir) - - # merge PAGE4 - gdb = self.merge_backup(backup_dir, "node", page_id, gdb=True) - - gdb.set_breakpoint('rename') - gdb.run_until_break() - gdb.continue_execution_until_break(500) - gdb._execute('signal SIGKILL') - - try: - self.merge_backup(backup_dir, "node", page_id) - self.assertEqual( - 1, 0, - "Expecting Error because of format changes.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: Retry of failed merge for backups with different " - "between minor versions is forbidden to avoid data corruption " - "because of storage format changes introduced in 2.4.0 version, " - "please take a new full backup", - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - # @unittest.expectedFailure - # @unittest.skip("skip") - def test_backward_compatibility_merge_5(self): - """ - Create node, take FULL and PAGE backups with old binary, - merge them with new binary. - old binary version >= STORAGE_FORMAT_VERSION (2.4.4) - """ - if self.version_to_num(self.old_probackup_version) < self.version_to_num('2.4.4'): - self.assertTrue( - False, 'OLD pg_probackup binary must be >= 2.4.4 for this test') - - self.assertNotEqual( - self.version_to_num(self.old_probackup_version), - self.version_to_num(self.probackup_version)) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir, old_binary=True) - self.add_instance(backup_dir, 'node', node, old_binary=True) - - self.set_archiving(backup_dir, 'node', node, old_binary=True) - node.slow_start() - - node.pgbench_init(scale=20) - - # FULL backup with OLD binary - self.backup_node(backup_dir, 'node', node, old_binary=True) - - pgbench = node.pgbench( - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - options=["-c", "1", "-T", "10", "--no-vacuum"]) - pgbench.wait() - pgbench.stdout.close() - - # PAGE1 backup with OLD binary - self.backup_node( - backup_dir, 'node', node, backup_type='page', old_binary=True) - - node.safe_psql( - 'postgres', - 'DELETE from pgbench_accounts') - - node.safe_psql( - 'postgres', - 'VACUUM pgbench_accounts') - - # PAGE2 backup with OLD binary - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type='page', old_binary=True) - - pgdata = self.pgdata_content(node.data_dir) - - # merge chain created by old binary with new binary - output = self.merge_backup(backup_dir, "node", backup_id) - - # check that in-place is disabled - self.assertNotIn( - "WARNING: In-place merge is disabled " - "because of storage format incompatibility", output) - - # restore merged backup - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - node_restored.cleanup() - - self.restore_node(backup_dir, 'node', node_restored) - - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.skip("skip") - def test_page_vacuum_truncate(self): - """ - make node, create table, take full backup, - delete all data, vacuum relation, - take page backup, insert some data, - take second page backup, - restore latest page backup using new binary - and check data correctness - old binary should be 2.2.x version - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir, old_binary=True) - self.add_instance(backup_dir, 'node', node, old_binary=True) - self.set_archiving(backup_dir, 'node', node, old_binary=True) - node.slow_start() - - node.safe_psql( - "postgres", - "create sequence t_seq; " - "create table t_heap as select i as id, " - "md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,1024) i") - - node.safe_psql( - "postgres", - "vacuum t_heap") - - id1 = self.backup_node(backup_dir, 'node', node, old_binary=True) - pgdata1 = self.pgdata_content(node.data_dir) - - node.safe_psql( - "postgres", - "delete from t_heap") - - node.safe_psql( - "postgres", - "vacuum t_heap") - - id2 = self.backup_node( - backup_dir, 'node', node, backup_type='page', old_binary=True) - pgdata2 = self.pgdata_content(node.data_dir) - - node.safe_psql( - "postgres", - "insert into t_heap select i as id, " - "md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,1) i") - - id3 = self.backup_node( - backup_dir, 'node', node, backup_type='page', old_binary=True) - pgdata3 = self.pgdata_content(node.data_dir) - - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - node_restored.cleanup() - - self.restore_node( - backup_dir, 'node', node_restored, - data_dir=node_restored.data_dir, backup_id=id1) - - # Physical comparison - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata1, pgdata_restored) - - self.set_auto_conf(node_restored, {'port': node_restored.port}) - node_restored.slow_start() - node_restored.cleanup() - - self.restore_node( - backup_dir, 'node', node_restored, - data_dir=node_restored.data_dir, backup_id=id2) - - # Physical comparison - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata2, pgdata_restored) - - self.set_auto_conf(node_restored, {'port': node_restored.port}) - node_restored.slow_start() - node_restored.cleanup() - - self.restore_node( - backup_dir, 'node', node_restored, - data_dir=node_restored.data_dir, backup_id=id3) - - # Physical comparison - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata3, pgdata_restored) - - self.set_auto_conf(node_restored, {'port': node_restored.port}) - node_restored.slow_start() - node_restored.cleanup() - - # @unittest.skip("skip") - def test_page_vacuum_truncate_compression(self): - """ - make node, create table, take full backup, - delete all data, vacuum relation, - take page backup, insert some data, - take second page backup, - restore latest page backup using new binary - and check data correctness - old binary should be 2.2.x version - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir, old_binary=True) - self.add_instance(backup_dir, 'node', node, old_binary=True) - self.set_archiving(backup_dir, 'node', node, old_binary=True) - node.slow_start() - - node.safe_psql( - "postgres", - "create sequence t_seq; " - "create table t_heap as select i as id, " - "md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,1024) i") - - node.safe_psql( - "postgres", - "vacuum t_heap") - - self.backup_node( - backup_dir, 'node',node, old_binary=True, options=['--compress']) - - node.safe_psql( - "postgres", - "delete from t_heap") - - node.safe_psql( - "postgres", - "vacuum t_heap") - - self.backup_node( - backup_dir, 'node', node, backup_type='page', - old_binary=True, options=['--compress']) - - node.safe_psql( - "postgres", - "insert into t_heap select i as id, " - "md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,1) i") - - self.backup_node( - backup_dir, 'node', node, backup_type='page', - old_binary=True, options=['--compress']) - - pgdata = self.pgdata_content(node.data_dir) - - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - node_restored.cleanup() - - self.restore_node(backup_dir, 'node', node_restored) - - # Physical comparison - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - self.set_auto_conf(node_restored, {'port': node_restored.port}) - node_restored.slow_start() - - # @unittest.skip("skip") - def test_page_vacuum_truncate_compressed_1(self): - """ - make node, create table, take full backup, - delete all data, vacuum relation, - take page backup, insert some data, - take second page backup, - restore latest page backup using new binary - and check data correctness - old binary should be 2.2.x version - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir, old_binary=True) - self.add_instance(backup_dir, 'node', node, old_binary=True) - self.set_archiving(backup_dir, 'node', node, old_binary=True) - node.slow_start() - - node.safe_psql( - "postgres", - "create sequence t_seq; " - "create table t_heap as select i as id, " - "md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,1024) i") - - node.safe_psql( - "postgres", - "vacuum t_heap") - - id1 = self.backup_node( - backup_dir, 'node', node, - old_binary=True, options=['--compress']) - pgdata1 = self.pgdata_content(node.data_dir) - - node.safe_psql( - "postgres", - "delete from t_heap") - - node.safe_psql( - "postgres", - "vacuum t_heap") - - id2 = self.backup_node( - backup_dir, 'node', node, backup_type='page', - old_binary=True, options=['--compress']) - pgdata2 = self.pgdata_content(node.data_dir) - - node.safe_psql( - "postgres", - "insert into t_heap select i as id, " - "md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,1) i") - - id3 = self.backup_node( - backup_dir, 'node', node, backup_type='page', - old_binary=True, options=['--compress']) - pgdata3 = self.pgdata_content(node.data_dir) - - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - node_restored.cleanup() - - self.restore_node( - backup_dir, 'node', node_restored, - data_dir=node_restored.data_dir, backup_id=id1) - - # Physical comparison - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata1, pgdata_restored) - - self.set_auto_conf(node_restored, {'port': node_restored.port}) - node_restored.slow_start() - node_restored.cleanup() - - self.restore_node( - backup_dir, 'node', node_restored, - data_dir=node_restored.data_dir, backup_id=id2) - - # Physical comparison - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata2, pgdata_restored) - - self.set_auto_conf(node_restored, {'port': node_restored.port}) - node_restored.slow_start() - node_restored.cleanup() - - self.restore_node( - backup_dir, 'node', node_restored, - data_dir=node_restored.data_dir, backup_id=id3) - - # Physical comparison - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata3, pgdata_restored) - - self.set_auto_conf(node_restored, {'port': node_restored.port}) - node_restored.slow_start() - node_restored.cleanup() - - # @unittest.skip("skip") - def test_hidden_files(self): - """ - old_version should be < 2.3.0 - Create hidden file in pgdata, take backup - with old binary, then try to delete backup - with new binary - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir, old_binary=True) - self.add_instance(backup_dir, 'node', node, old_binary=True) - node.slow_start() - - open(os.path.join(node.data_dir, ".hidden_stuff"), 'a').close() - - backup_id = self.backup_node( - backup_dir, 'node',node, old_binary=True, options=['--stream']) - - self.delete_pb(backup_dir, 'node', backup_id) - - # @unittest.skip("skip") - def test_compatibility_tablespace(self): - """ - https://github.com/postgrespro/pg_probackup/issues/348 - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node, old_binary=True) - node.slow_start() - - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type="full", - options=["-j", "4", "--stream"], old_binary=True) - - tblspace_old_path = self.get_tblspace_path(node, 'tblspace_old') - - self.create_tblspace_in_node( - node, 'tblspace', - tblspc_path=tblspace_old_path) - - node.safe_psql( - "postgres", - "create table t_heap_lame tablespace tblspace " - "as select 1 as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,1000) i") - - tblspace_new_path = self.get_tblspace_path(node, 'tblspace_new') - - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - node_restored.cleanup() - - try: - self.restore_node( - backup_dir, 'node', node_restored, - options=[ - "-j", "4", - "-T", "{0}={1}".format( - tblspace_old_path, tblspace_new_path)]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because tablespace mapping is incorrect" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Backup {0} has no tablespaceses, ' - 'nothing to remap'.format(backup_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.backup_node( - backup_dir, 'node', node, backup_type="delta", - options=["-j", "4", "--stream"], old_binary=True) - - self.restore_node( - backup_dir, 'node', node_restored, - options=[ - "-j", "4", - "-T", "{0}={1}".format( - tblspace_old_path, tblspace_new_path)]) - - if self.paranoia: - pgdata = self.pgdata_content(node.data_dir) - - if self.paranoia: - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) diff --git a/tests/compression_test.py b/tests/compression_test.py deleted file mode 100644 index 94f2dffff..000000000 --- a/tests/compression_test.py +++ /dev/null @@ -1,495 +0,0 @@ -import os -import unittest -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException, idx_ptrack -from datetime import datetime, timedelta -import subprocess - - -class CompressionTest(ProbackupTest, unittest.TestCase): - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_basic_compression_stream_zlib(self): - """ - make archive node, make full and page stream backups, - check data correctness in restored instance - """ - self.maxDiff = None - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # FULL BACKUP - node.safe_psql( - "postgres", - "create table t_heap as select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,256) i") - full_result = node.execute("postgres", "SELECT * FROM t_heap") - full_backup_id = self.backup_node( - backup_dir, 'node', node, backup_type='full', - options=[ - '--stream', - '--compress-algorithm=zlib']) - - # PAGE BACKUP - node.safe_psql( - "postgres", - "insert into t_heap select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(256,512) i") - page_result = node.execute("postgres", "SELECT * FROM t_heap") - page_backup_id = self.backup_node( - backup_dir, 'node', node, backup_type='page', - options=[ - '--stream', '--compress-algorithm=zlib']) - - # DELTA BACKUP - node.safe_psql( - "postgres", - "insert into t_heap select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(512,768) i") - delta_result = node.execute("postgres", "SELECT * FROM t_heap") - delta_backup_id = self.backup_node( - backup_dir, 'node', node, backup_type='delta', - options=['--stream', '--compress-algorithm=zlib']) - - # Drop Node - node.cleanup() - - # Check full backup - self.assertIn( - "INFO: Restore of backup {0} completed.".format(full_backup_id), - self.restore_node( - backup_dir, 'node', node, backup_id=full_backup_id, - options=[ - "-j", "4", "--immediate", - "--recovery-target-action=promote"]), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) - node.slow_start() - - full_result_new = node.execute("postgres", "SELECT * FROM t_heap") - self.assertEqual(full_result, full_result_new) - node.cleanup() - - # Check page backup - self.assertIn( - "INFO: Restore of backup {0} completed.".format(page_backup_id), - self.restore_node( - backup_dir, 'node', node, backup_id=page_backup_id, - options=[ - "-j", "4", "--immediate", - "--recovery-target-action=promote"]), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) - node.slow_start() - - page_result_new = node.execute("postgres", "SELECT * FROM t_heap") - self.assertEqual(page_result, page_result_new) - node.cleanup() - - # Check delta backup - self.assertIn( - "INFO: Restore of backup {0} completed.".format(delta_backup_id), - self.restore_node( - backup_dir, 'node', node, backup_id=delta_backup_id, - options=[ - "-j", "4", "--immediate", - "--recovery-target-action=promote"]), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) - node.slow_start() - - delta_result_new = node.execute("postgres", "SELECT * FROM t_heap") - self.assertEqual(delta_result, delta_result_new) - - def test_compression_archive_zlib(self): - """ - make archive node, make full and page backups, - check data correctness in restored instance - """ - self.maxDiff = None - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # FULL BACKUP - node.safe_psql( - "postgres", - "create table t_heap as select i as id, md5(i::text) as text, " - "md5(i::text)::tsvector as tsvector from generate_series(0,1) i") - full_result = node.execute("postgres", "SELECT * FROM t_heap") - full_backup_id = self.backup_node( - backup_dir, 'node', node, backup_type='full', - options=["--compress-algorithm=zlib"]) - - # PAGE BACKUP - node.safe_psql( - "postgres", - "insert into t_heap select i as id, md5(i::text) as text, " - "md5(i::text)::tsvector as tsvector " - "from generate_series(0,2) i") - page_result = node.execute("postgres", "SELECT * FROM t_heap") - page_backup_id = self.backup_node( - backup_dir, 'node', node, backup_type='page', - options=["--compress-algorithm=zlib"]) - - # DELTA BACKUP - node.safe_psql( - "postgres", - "insert into t_heap select i as id, md5(i::text) as text, " - "md5(i::text)::tsvector as tsvector from generate_series(0,3) i") - delta_result = node.execute("postgres", "SELECT * FROM t_heap") - delta_backup_id = self.backup_node( - backup_dir, 'node', node, backup_type='delta', - options=['--compress-algorithm=zlib']) - - # Drop Node - node.cleanup() - - # Check full backup - self.assertIn( - "INFO: Restore of backup {0} completed.".format(full_backup_id), - self.restore_node( - backup_dir, 'node', node, backup_id=full_backup_id, - options=[ - "-j", "4", "--immediate", - "--recovery-target-action=promote"]), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) - node.slow_start() - - full_result_new = node.execute("postgres", "SELECT * FROM t_heap") - self.assertEqual(full_result, full_result_new) - node.cleanup() - - # Check page backup - self.assertIn( - "INFO: Restore of backup {0} completed.".format(page_backup_id), - self.restore_node( - backup_dir, 'node', node, backup_id=page_backup_id, - options=[ - "-j", "4", "--immediate", - "--recovery-target-action=promote"]), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) - node.slow_start() - - page_result_new = node.execute("postgres", "SELECT * FROM t_heap") - self.assertEqual(page_result, page_result_new) - node.cleanup() - - # Check delta backup - self.assertIn( - "INFO: Restore of backup {0} completed.".format(delta_backup_id), - self.restore_node( - backup_dir, 'node', node, backup_id=delta_backup_id, - options=[ - "-j", "4", "--immediate", - "--recovery-target-action=promote"]), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) - node.slow_start() - - delta_result_new = node.execute("postgres", "SELECT * FROM t_heap") - self.assertEqual(delta_result, delta_result_new) - node.cleanup() - - def test_compression_stream_pglz(self): - """ - make archive node, make full and page stream backups, - check data correctness in restored instance - """ - self.maxDiff = None - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # FULL BACKUP - node.safe_psql( - "postgres", - "create table t_heap as select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,256) i") - full_result = node.execute("postgres", "SELECT * FROM t_heap") - full_backup_id = self.backup_node( - backup_dir, 'node', node, backup_type='full', - options=['--stream', '--compress-algorithm=pglz']) - - # PAGE BACKUP - node.safe_psql( - "postgres", - "insert into t_heap select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(256,512) i") - page_result = node.execute("postgres", "SELECT * FROM t_heap") - page_backup_id = self.backup_node( - backup_dir, 'node', node, backup_type='page', - options=['--stream', '--compress-algorithm=pglz']) - - # DELTA BACKUP - node.safe_psql( - "postgres", - "insert into t_heap select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(512,768) i") - delta_result = node.execute("postgres", "SELECT * FROM t_heap") - delta_backup_id = self.backup_node( - backup_dir, 'node', node, backup_type='delta', - options=['--stream', '--compress-algorithm=pglz']) - - # Drop Node - node.cleanup() - - # Check full backup - self.assertIn( - "INFO: Restore of backup {0} completed.".format(full_backup_id), - self.restore_node( - backup_dir, 'node', node, backup_id=full_backup_id, - options=[ - "-j", "4", "--immediate", - "--recovery-target-action=promote"]), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) - node.slow_start() - - full_result_new = node.execute("postgres", "SELECT * FROM t_heap") - self.assertEqual(full_result, full_result_new) - node.cleanup() - - # Check page backup - self.assertIn( - "INFO: Restore of backup {0} completed.".format(page_backup_id), - self.restore_node( - backup_dir, 'node', node, backup_id=page_backup_id, - options=[ - "-j", "4", "--immediate", - "--recovery-target-action=promote"]), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) - node.slow_start() - - page_result_new = node.execute("postgres", "SELECT * FROM t_heap") - self.assertEqual(page_result, page_result_new) - node.cleanup() - - # Check delta backup - self.assertIn( - "INFO: Restore of backup {0} completed.".format(delta_backup_id), - self.restore_node( - backup_dir, 'node', node, backup_id=delta_backup_id, - options=[ - "-j", "4", "--immediate", - "--recovery-target-action=promote"]), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) - node.slow_start() - - delta_result_new = node.execute("postgres", "SELECT * FROM t_heap") - self.assertEqual(delta_result, delta_result_new) - node.cleanup() - - def test_compression_archive_pglz(self): - """ - make archive node, make full and page backups, - check data correctness in restored instance - """ - self.maxDiff = None - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # FULL BACKUP - node.safe_psql( - "postgres", - "create table t_heap as select i as id, md5(i::text) as text, " - "md5(i::text)::tsvector as tsvector " - "from generate_series(0,100) i") - full_result = node.execute("postgres", "SELECT * FROM t_heap") - full_backup_id = self.backup_node( - backup_dir, 'node', node, backup_type='full', - options=['--compress-algorithm=pglz']) - - # PAGE BACKUP - node.safe_psql( - "postgres", - "insert into t_heap select i as id, md5(i::text) as text, " - "md5(i::text)::tsvector as tsvector " - "from generate_series(100,200) i") - page_result = node.execute("postgres", "SELECT * FROM t_heap") - page_backup_id = self.backup_node( - backup_dir, 'node', node, backup_type='page', - options=['--compress-algorithm=pglz']) - - # DELTA BACKUP - node.safe_psql( - "postgres", - "insert into t_heap select i as id, md5(i::text) as text, " - "md5(i::text)::tsvector as tsvector " - "from generate_series(200,300) i") - delta_result = node.execute("postgres", "SELECT * FROM t_heap") - delta_backup_id = self.backup_node( - backup_dir, 'node', node, backup_type='delta', - options=['--compress-algorithm=pglz']) - - # Drop Node - node.cleanup() - - # Check full backup - self.assertIn( - "INFO: Restore of backup {0} completed.".format(full_backup_id), - self.restore_node( - backup_dir, 'node', node, backup_id=full_backup_id, - options=[ - "-j", "4", "--immediate", - "--recovery-target-action=promote"]), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) - node.slow_start() - - full_result_new = node.execute("postgres", "SELECT * FROM t_heap") - self.assertEqual(full_result, full_result_new) - node.cleanup() - - # Check page backup - self.assertIn( - "INFO: Restore of backup {0} completed.".format(page_backup_id), - self.restore_node( - backup_dir, 'node', node, backup_id=page_backup_id, - options=[ - "-j", "4", "--immediate", - "--recovery-target-action=promote"]), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) - node.slow_start() - - page_result_new = node.execute("postgres", "SELECT * FROM t_heap") - self.assertEqual(page_result, page_result_new) - node.cleanup() - - # Check delta backup - self.assertIn( - "INFO: Restore of backup {0} completed.".format(delta_backup_id), - self.restore_node( - backup_dir, 'node', node, backup_id=delta_backup_id, - options=[ - "-j", "4", "--immediate", - "--recovery-target-action=promote"]), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) - node.slow_start() - - delta_result_new = node.execute("postgres", "SELECT * FROM t_heap") - self.assertEqual(delta_result, delta_result_new) - node.cleanup() - - def test_compression_wrong_algorithm(self): - """ - make archive node, make full and page backups, - check data correctness in restored instance - """ - self.maxDiff = None - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - try: - self.backup_node( - backup_dir, 'node', node, - backup_type='full', options=['--compress-algorithm=bla-blah']) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because compress-algorithm is invalid.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertEqual( - e.message, - 'ERROR: invalid compress algorithm value "bla-blah"\n', - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - # @unittest.skip("skip") - def test_incompressible_pages(self): - """ - make archive node, create table with incompressible toast pages, - take backup with compression, make sure that page was not compressed, - restore backup and check data correctness - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # Full - self.backup_node( - backup_dir, 'node', node, - options=[ - '--compress-algorithm=zlib', - '--compress-level=0']) - - node.pgbench_init(scale=3) - - self.backup_node( - backup_dir, 'node', node, - backup_type='delta', - options=[ - '--compress-algorithm=zlib', - '--compress-level=0']) - - pgdata = self.pgdata_content(node.data_dir) - - node.cleanup() - - self.restore_node(backup_dir, 'node', node) - - # Physical comparison - if self.paranoia: - pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - node.slow_start() diff --git a/tests/config_test.py b/tests/config_test.py deleted file mode 100644 index b1a0f9295..000000000 --- a/tests/config_test.py +++ /dev/null @@ -1,113 +0,0 @@ -import unittest -import subprocess -import os -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException -from sys import exit -from shutil import copyfile - - -class ConfigTest(ProbackupTest, unittest.TestCase): - - # @unittest.expectedFailure - # @unittest.skip("skip") - def test_remove_instance_config(self): - """remove pg_probackup.conself.f""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.show_pb(backup_dir) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - self.backup_node(backup_dir, 'node', node) - - self.backup_node( - backup_dir, 'node', node, backup_type='page') - - conf_file = os.path.join( - backup_dir, 'backups','node', 'pg_probackup.conf') - - os.unlink(os.path.join(backup_dir, 'backups','node', 'pg_probackup.conf')) - - try: - self.backup_node( - backup_dir, 'node', node, backup_type='page') - self.assertEqual( - 1, 0, - "Expecting Error because pg_probackup.conf is missing. " - ".\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: could not open file "{0}": ' - 'No such file or directory'.format(conf_file), - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - # @unittest.expectedFailure - # @unittest.skip("skip") - def test_corrupt_backup_content(self): - """corrupt backup_content.control""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - full1_id = self.backup_node(backup_dir, 'node', node) - - node.safe_psql( - 'postgres', - 'create table t1()') - - fulle2_id = self.backup_node(backup_dir, 'node', node) - - fulle1_conf_file = os.path.join( - backup_dir, 'backups','node', full1_id, 'backup_content.control') - - fulle2_conf_file = os.path.join( - backup_dir, 'backups','node', fulle2_id, 'backup_content.control') - - copyfile(fulle2_conf_file, fulle1_conf_file) - - try: - self.validate_pb(backup_dir, 'node') - self.assertEqual( - 1, 0, - "Expecting Error because pg_probackup.conf is missing. " - ".\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "WARNING: Invalid CRC of backup control file '{0}':".format(fulle1_conf_file), - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - self.assertIn( - "WARNING: Failed to get file list for backup {0}".format(full1_id), - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - self.assertIn( - "WARNING: Backup {0} file list is corrupted".format(full1_id), - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - self.show_pb(backup_dir, 'node', full1_id)['status'] - - self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], 'CORRUPT') - self.assertEqual(self.show_pb(backup_dir, 'node')[1]['status'], 'OK') diff --git a/tests/delete_test.py b/tests/delete_test.py deleted file mode 100644 index 10100887d..000000000 --- a/tests/delete_test.py +++ /dev/null @@ -1,822 +0,0 @@ -import unittest -import os -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException -import subprocess - - -class DeleteTest(ProbackupTest, unittest.TestCase): - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_delete_full_backups(self): - """delete full backups""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # full backup - self.backup_node(backup_dir, 'node', node) - - pgbench = node.pgbench( - stdout=subprocess.PIPE, stderr=subprocess.STDOUT) - pgbench.wait() - pgbench.stdout.close() - - self.backup_node(backup_dir, 'node', node) - - pgbench = node.pgbench( - stdout=subprocess.PIPE, stderr=subprocess.STDOUT) - pgbench.wait() - pgbench.stdout.close() - - self.backup_node(backup_dir, 'node', node) - - show_backups = self.show_pb(backup_dir, 'node') - id_1 = show_backups[0]['id'] - id_2 = show_backups[1]['id'] - id_3 = show_backups[2]['id'] - self.delete_pb(backup_dir, 'node', id_2) - show_backups = self.show_pb(backup_dir, 'node') - self.assertEqual(show_backups[0]['id'], id_1) - self.assertEqual(show_backups[1]['id'], id_3) - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_del_instance_archive(self): - """delete full backups""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # full backup - self.backup_node(backup_dir, 'node', node) - - # full backup - self.backup_node(backup_dir, 'node', node) - - # restore - node.cleanup() - self.restore_node(backup_dir, 'node', node) - node.slow_start() - - # Delete instance - self.del_instance(backup_dir, 'node') - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_delete_archive_mix_compress_and_non_compressed_segments(self): - """delete full backups""" - node = self.make_simple_node( - base_dir="{0}/{1}/node".format(self.module_name, self.fname), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving( - backup_dir, 'node', node, compress=False) - node.slow_start() - - # full backup - self.backup_node(backup_dir, 'node', node) - - node.pgbench_init(scale=10) - - # Restart archiving with compression - self.set_archiving(backup_dir, 'node', node, compress=True) - - node.restart() - - # full backup - self.backup_node(backup_dir, 'node', node) - - pgbench = node.pgbench(options=['-T', '10', '-c', '2']) - pgbench.wait() - - self.backup_node( - backup_dir, 'node', node, - options=[ - '--retention-redundancy=3', - '--delete-expired']) - - pgbench = node.pgbench(options=['-T', '10', '-c', '2']) - pgbench.wait() - - self.backup_node( - backup_dir, 'node', node, - options=[ - '--retention-redundancy=3', - '--delete-expired']) - - pgbench = node.pgbench(options=['-T', '10', '-c', '2']) - pgbench.wait() - - self.backup_node( - backup_dir, 'node', node, - options=[ - '--retention-redundancy=3', - '--delete-expired']) - - # @unittest.skip("skip") - def test_delete_increment_page(self): - """delete increment and all after him""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # full backup mode - self.backup_node(backup_dir, 'node', node) - # page backup mode - self.backup_node(backup_dir, 'node', node, backup_type="page") - # page backup mode - self.backup_node(backup_dir, 'node', node, backup_type="page") - # full backup mode - self.backup_node(backup_dir, 'node', node) - - show_backups = self.show_pb(backup_dir, 'node') - self.assertEqual(len(show_backups), 4) - - # delete first page backup - self.delete_pb(backup_dir, 'node', show_backups[1]['id']) - - show_backups = self.show_pb(backup_dir, 'node') - self.assertEqual(len(show_backups), 2) - - self.assertEqual(show_backups[0]['backup-mode'], "FULL") - self.assertEqual(show_backups[0]['status'], "OK") - self.assertEqual(show_backups[1]['backup-mode'], "FULL") - self.assertEqual(show_backups[1]['status'], "OK") - - # @unittest.skip("skip") - def test_delete_increment_ptrack(self): - """delete increment and all after him""" - if not self.ptrack: - self.skipTest('Skipped because ptrack support is disabled') - - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - ptrack_enable=self.ptrack, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - 'postgres', - 'CREATE EXTENSION ptrack') - - # full backup mode - self.backup_node(backup_dir, 'node', node) - # ptrack backup mode - self.backup_node(backup_dir, 'node', node, backup_type="ptrack") - # ptrack backup mode - self.backup_node(backup_dir, 'node', node, backup_type="ptrack") - # full backup mode - self.backup_node(backup_dir, 'node', node) - - show_backups = self.show_pb(backup_dir, 'node') - self.assertEqual(len(show_backups), 4) - - # delete first page backup - self.delete_pb(backup_dir, 'node', show_backups[1]['id']) - - show_backups = self.show_pb(backup_dir, 'node') - self.assertEqual(len(show_backups), 2) - - self.assertEqual(show_backups[0]['backup-mode'], "FULL") - self.assertEqual(show_backups[0]['status'], "OK") - self.assertEqual(show_backups[1]['backup-mode'], "FULL") - self.assertEqual(show_backups[1]['status'], "OK") - - # @unittest.skip("skip") - def test_delete_orphaned_wal_segments(self): - """ - make archive node, make three full backups, - delete second backup without --wal option, - then delete orphaned wals via --wal option - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - "postgres", - "create table t_heap as select 1 as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,10000) i") - # first full backup - backup_1_id = self.backup_node(backup_dir, 'node', node) - # second full backup - backup_2_id = self.backup_node(backup_dir, 'node', node) - # third full backup - backup_3_id = self.backup_node(backup_dir, 'node', node) - node.stop() - - # Check wals - wals_dir = os.path.join(backup_dir, 'wal', 'node') - wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join(wals_dir, f))] - original_wal_quantity = len(wals) - - # delete second full backup - self.delete_pb(backup_dir, 'node', backup_2_id) - # check wal quantity - self.validate_pb(backup_dir) - self.assertEqual(self.show_pb(backup_dir, 'node', backup_1_id)['status'], "OK") - self.assertEqual(self.show_pb(backup_dir, 'node', backup_3_id)['status'], "OK") - # try to delete wals for second backup - self.delete_pb(backup_dir, 'node', options=['--wal']) - # check wal quantity - self.validate_pb(backup_dir) - self.assertEqual(self.show_pb(backup_dir, 'node', backup_1_id)['status'], "OK") - self.assertEqual(self.show_pb(backup_dir, 'node', backup_3_id)['status'], "OK") - - # delete first full backup - self.delete_pb(backup_dir, 'node', backup_1_id) - self.validate_pb(backup_dir) - self.assertEqual(self.show_pb(backup_dir, 'node', backup_3_id)['status'], "OK") - - result = self.delete_pb(backup_dir, 'node', options=['--wal']) - # delete useless wals - self.assertTrue('On timeline 1 WAL segments between ' in result - and 'will be removed' in result) - - self.validate_pb(backup_dir) - self.assertEqual(self.show_pb(backup_dir, 'node', backup_3_id)['status'], "OK") - - # Check quantity, it should be lower than original - wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join(wals_dir, f))] - self.assertTrue(original_wal_quantity > len(wals), "Number of wals not changed after 'delete --wal' which is illegal") - - # Delete last backup - self.delete_pb(backup_dir, 'node', backup_3_id, options=['--wal']) - wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join(wals_dir, f))] - self.assertEqual (0, len(wals), "Number of wals should be equal to 0") - - # @unittest.skip("skip") - def test_delete_wal_between_multiple_timelines(self): - """ - /-------B1-- - A1----------------A2---- - - delete A1 backup, check that WAL segments on [A1, A2) and - [A1, B1) are deleted and backups B1 and A2 keep - their WAL - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - A1 = self.backup_node(backup_dir, 'node', node) - - # load some data to node - node.pgbench_init(scale=3) - - node2 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node2')) - node2.cleanup() - - self.restore_node(backup_dir, 'node', node2) - self.set_auto_conf(node2, {'port': node2.port}) - node2.slow_start() - - # load some more data to node - node.pgbench_init(scale=3) - - # take A2 - A2 = self.backup_node(backup_dir, 'node', node) - - # load some more data to node2 - node2.pgbench_init(scale=2) - - B1 = self.backup_node( - backup_dir, 'node', - node2, data_dir=node2.data_dir) - - self.delete_pb(backup_dir, 'node', backup_id=A1, options=['--wal']) - - self.validate_pb(backup_dir) - - # @unittest.skip("skip") - def test_delete_backup_with_empty_control_file(self): - """ - take backup, truncate its control file, - try to delete it via 'delete' command - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums'], - set_replication=True) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - # full backup mode - self.backup_node( - backup_dir, 'node', node, options=['--stream']) - # page backup mode - self.backup_node( - backup_dir, 'node', node, backup_type="delta", options=['--stream']) - # page backup mode - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type="delta", options=['--stream']) - - with open( - os.path.join(backup_dir, 'backups', 'node', backup_id, 'backup.control'), - 'wt') as f: - f.flush() - f.close() - - show_backups = self.show_pb(backup_dir, 'node') - self.assertEqual(len(show_backups), 3) - - self.delete_pb(backup_dir, 'node', backup_id=backup_id) - - # @unittest.skip("skip") - def test_delete_interleaved_incremental_chains(self): - """complicated case of interleaved backup chains""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # Take FULL BACKUPs - backup_id_a = self.backup_node(backup_dir, 'node', node) - backup_id_b = self.backup_node(backup_dir, 'node', node) - - # Change FULLb to ERROR - self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR') - - # FULLb ERROR - # FULLa OK - - # Take PAGEa1 backup - page_id_a1 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # PAGEa1 OK - # FULLb ERROR - # FULLa OK - - # Change FULLb to OK - self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK') - - # Change PAGEa1 to ERROR - self.change_backup_status(backup_dir, 'node', page_id_a1, 'ERROR') - - # PAGEa1 ERROR - # FULLb OK - # FULLa OK - - page_id_b1 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # PAGEb1 OK - # PAGEa1 ERROR - # FULLb OK - # FULLa OK - - # Now we start to play with first generation of PAGE backups - # Change PAGEb1 and FULLb status to ERROR - self.change_backup_status(backup_dir, 'node', page_id_b1, 'ERROR') - self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR') - - # Change PAGEa1 status to OK - self.change_backup_status(backup_dir, 'node', page_id_a1, 'OK') - - # PAGEb1 ERROR - # PAGEa1 OK - # FULLb ERROR - # FULLa OK - - page_id_a2 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # PAGEa2 OK - # PAGEb1 ERROR - # PAGEa1 OK - # FULLb ERROR - # FULLa OK - - # Change PAGEa2 and FULla to ERROR - self.change_backup_status(backup_dir, 'node', page_id_a2, 'ERROR') - self.change_backup_status(backup_dir, 'node', backup_id_a, 'ERROR') - - # Change PAGEb1 and FULlb to OK - self.change_backup_status(backup_dir, 'node', page_id_b1, 'OK') - self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK') - - # PAGEa2 ERROR - # PAGEb1 OK - # PAGEa1 OK - # FULLb OK - # FULLa ERROR - - page_id_b2 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # Change PAGEa2 and FULLa status to OK - self.change_backup_status(backup_dir, 'node', page_id_a2, 'OK') - self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK') - - # PAGEb2 OK - # PAGEa2 OK - # PAGEb1 OK - # PAGEa1 OK - # FULLb OK - # FULLa OK - - self.backup_node(backup_dir, 'node', node) - self.backup_node(backup_dir, 'node', node, backup_type='page') - - # PAGEc1 OK - # FULLc OK - # PAGEb2 OK - # PAGEa2 OK - # PAGEb1 OK - # PAGEa1 OK - # FULLb OK - # FULLa OK - - # Delete FULLb - self.delete_pb( - backup_dir, 'node', backup_id_b) - - self.assertEqual(len(self.show_pb(backup_dir, 'node')), 5) - - print(self.show_pb( - backup_dir, 'node', as_json=False, as_text=True)) - - # @unittest.skip("skip") - def test_delete_multiple_descendants(self): - """ - PAGEb3 - | PAGEa3 - PAGEb2 / - | PAGEa2 / - PAGEb1 \ / - | PAGEa1 - FULLb | - FULLa should be deleted - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # Take FULL BACKUPs - backup_id_a = self.backup_node(backup_dir, 'node', node) - backup_id_b = self.backup_node(backup_dir, 'node', node) - - # Change FULLb to ERROR - self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR') - - page_id_a1 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # Change FULLb to OK - self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK') - - # Change PAGEa1 backup status to ERROR - self.change_backup_status(backup_dir, 'node', page_id_a1, 'ERROR') - - # PAGEa1 ERROR - # FULLb OK - # FULLa OK - - page_id_b1 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # PAGEb1 OK - # PAGEa1 ERROR - # FULLb OK - # FULLa OK - - # Change PAGEa1 to OK - self.change_backup_status(backup_dir, 'node', page_id_a1, 'OK') - - # Change PAGEb1 and FULLb backup status to ERROR - self.change_backup_status(backup_dir, 'node', page_id_b1, 'ERROR') - self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR') - - # PAGEb1 ERROR - # PAGEa1 OK - # FULLb ERROR - # FULLa OK - - page_id_a2 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # PAGEa2 OK - # PAGEb1 ERROR - # PAGEa1 OK - # FULLb ERROR - # FULLa OK - - # Change PAGEb1 and FULLb to OK - self.change_backup_status(backup_dir, 'node', page_id_b1, 'OK') - self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK') - - # Change PAGEa2 and FULLa to ERROR - self.change_backup_status(backup_dir, 'node', page_id_a2, 'ERROR') - self.change_backup_status(backup_dir, 'node', backup_id_a, 'ERROR') - - # PAGEa2 ERROR - # PAGEb1 OK - # PAGEa1 OK - # FULLb OK - # FULLa ERROR - - page_id_b2 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # PAGEb2 OK - # PAGEa2 ERROR - # PAGEb1 OK - # PAGEa1 OK - # FULLb OK - # FULLa ERROR - - # Change PAGEb2, PAGEb1 and FULLb to ERROR - self.change_backup_status(backup_dir, 'node', page_id_b2, 'ERROR') - self.change_backup_status(backup_dir, 'node', page_id_b1, 'ERROR') - self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR') - - # Change FULLa to OK - self.change_backup_status(backup_dir, 'node', backup_id_a, 'OK') - - # PAGEb2 ERROR - # PAGEa2 ERROR - # PAGEb1 ERROR - # PAGEa1 OK - # FULLb ERROR - # FULLa OK - - page_id_a3 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # PAGEa3 OK - # PAGEb2 ERROR - # PAGEa2 ERROR - # PAGEb1 ERROR - # PAGEa1 OK - # FULLb ERROR - # FULLa OK - - # Change PAGEa3 status to ERROR - self.change_backup_status(backup_dir, 'node', page_id_a3, 'ERROR') - - # Change PAGEb2 and FULLb to OK - self.change_backup_status(backup_dir, 'node', page_id_b2, 'OK') - self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK') - - page_id_b3 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # PAGEb3 OK - # PAGEa3 ERROR - # PAGEb2 OK - # PAGEa2 ERROR - # PAGEb1 ERROR - # PAGEa1 OK - # FULLb OK - # FULLa OK - - # Change PAGEa3, PAGEa2 and PAGEb1 to OK - self.change_backup_status(backup_dir, 'node', page_id_a3, 'OK') - self.change_backup_status(backup_dir, 'node', page_id_a2, 'OK') - self.change_backup_status(backup_dir, 'node', page_id_b1, 'OK') - - # PAGEb3 OK - # PAGEa3 OK - # PAGEb2 OK - # PAGEa2 OK - # PAGEb1 OK - # PAGEa1 OK - # FULLb OK - # FULLa OK - - # Check that page_id_a3 and page_id_a2 are both direct descendants of page_id_a1 - self.assertEqual( - self.show_pb(backup_dir, 'node', backup_id=page_id_a3)['parent-backup-id'], - page_id_a1) - - self.assertEqual( - self.show_pb(backup_dir, 'node', backup_id=page_id_a2)['parent-backup-id'], - page_id_a1) - - # Delete FULLa - self.delete_pb(backup_dir, 'node', backup_id_a) - - self.assertEqual(len(self.show_pb(backup_dir, 'node')), 4) - - # @unittest.skip("skip") - def test_delete_multiple_descendants_dry_run(self): - """ - PAGEa3 - PAGEa2 / - \ / - PAGEa1 (delete target) - | - FULLa - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # Take FULL BACKUP - node.pgbench_init(scale=1) - backup_id_a = self.backup_node(backup_dir, 'node', node) - - pgbench = node.pgbench(options=['-T', '10', '-c', '2']) - pgbench.wait() - page_id_a1 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - pgbench = node.pgbench(options=['-T', '10', '-c', '2']) - pgbench.wait() - page_id_a2 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - - # Change PAGEa2 to ERROR - self.change_backup_status(backup_dir, 'node', page_id_a2, 'ERROR') - - pgbench = node.pgbench(options=['-T', '10', '-c', '2']) - pgbench.wait() - page_id_a3 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # Change PAGEa2 to ERROR - self.change_backup_status(backup_dir, 'node', page_id_a2, 'OK') - - # Delete PAGEa1 - output = self.delete_pb( - backup_dir, 'node', page_id_a1, - options=['--dry-run', '--log-level-console=LOG', '--delete-wal']) - - print(output) - self.assertIn( - 'LOG: Backup {0} can be deleted'.format(page_id_a3), - output) - self.assertIn( - 'LOG: Backup {0} can be deleted'.format(page_id_a2), - output) - self.assertIn( - 'LOG: Backup {0} can be deleted'.format(page_id_a1), - output) - - self.assertIn( - 'INFO: Resident data size to free by ' - 'delete of backup {0} :'.format(page_id_a1), - output) - - self.assertIn( - 'On timeline 1 WAL segments between 000000010000000000000001 ' - 'and 000000010000000000000003 can be removed', - output) - - self.assertEqual(len(self.show_pb(backup_dir, 'node')), 4) - - output = self.delete_pb( - backup_dir, 'node', page_id_a1, - options=['--log-level-console=LOG', '--delete-wal']) - - self.assertIn( - 'LOG: Backup {0} will be deleted'.format(page_id_a3), - output) - self.assertIn( - 'LOG: Backup {0} will be deleted'.format(page_id_a2), - output) - self.assertIn( - 'LOG: Backup {0} will be deleted'.format(page_id_a1), - output) - self.assertIn( - 'INFO: Resident data size to free by ' - 'delete of backup {0} :'.format(page_id_a1), - output) - - self.assertIn( - 'On timeline 1 WAL segments between 000000010000000000000001 ' - 'and 000000010000000000000003 will be removed', - output) - - self.assertEqual(len(self.show_pb(backup_dir, 'node')), 1) - - self.validate_pb(backup_dir, 'node') - - def test_delete_error_backups(self): - """delete increment and all after him""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # full backup mode - self.backup_node(backup_dir, 'node', node) - # page backup mode - self.backup_node(backup_dir, 'node', node, backup_type="page") - - # Take FULL BACKUP - backup_id_a = self.backup_node(backup_dir, 'node', node) - # Take PAGE BACKUP - backup_id_b = self.backup_node(backup_dir, 'node', node, backup_type="page") - - backup_id_c = self.backup_node(backup_dir, 'node', node, backup_type="page") - - backup_id_d = self.backup_node(backup_dir, 'node', node, backup_type="page") - - # full backup mode - self.backup_node(backup_dir, 'node', node) - self.backup_node(backup_dir, 'node', node, backup_type="page") - backup_id_e = self.backup_node(backup_dir, 'node', node, backup_type="page") - self.backup_node(backup_dir, 'node', node, backup_type="page") - - # Change status to ERROR - self.change_backup_status(backup_dir, 'node', backup_id_a, 'ERROR') - self.change_backup_status(backup_dir, 'node', backup_id_c, 'ERROR') - self.change_backup_status(backup_dir, 'node', backup_id_e, 'ERROR') - - print(self.show_pb(backup_dir, as_text=True, as_json=False)) - - show_backups = self.show_pb(backup_dir, 'node') - self.assertEqual(len(show_backups), 10) - - # delete error backups - output = self.delete_pb(backup_dir, 'node', options=['--status=ERROR', '--dry-run']) - show_backups = self.show_pb(backup_dir, 'node') - self.assertEqual(len(show_backups), 10) - - self.assertIn( - "Deleting all backups with status 'ERROR' in dry run mode", - output) - - self.assertIn( - "INFO: Backup {0} with status OK can be deleted".format(backup_id_d), - output) - - print(self.show_pb(backup_dir, as_text=True, as_json=False)) - - show_backups = self.show_pb(backup_dir, 'node') - output = self.delete_pb(backup_dir, 'node', options=['--status=ERROR']) - print(output) - show_backups = self.show_pb(backup_dir, 'node') - self.assertEqual(len(show_backups), 4) - - self.assertEqual(show_backups[0]['status'], "OK") - self.assertEqual(show_backups[1]['status'], "OK") - self.assertEqual(show_backups[2]['status'], "OK") - self.assertEqual(show_backups[3]['status'], "OK") diff --git a/tests/delta_test.py b/tests/delta_test.py deleted file mode 100644 index 23583fd93..000000000 --- a/tests/delta_test.py +++ /dev/null @@ -1,1201 +0,0 @@ -import os -import unittest -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException -from datetime import datetime, timedelta -from testgres import QueryException -import subprocess -import time -from threading import Thread - - -class DeltaTest(ProbackupTest, unittest.TestCase): - - # @unittest.skip("skip") - def test_basic_delta_vacuum_truncate(self): - """ - make node, create table, take full backup, - delete last 3 pages, vacuum relation, - take delta backup, take second delta backup, - restore latest delta backup and check data correctness - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node_restored.cleanup() - node.slow_start() - - node.safe_psql( - "postgres", - "create sequence t_seq; " - "create table t_heap as select i as id, " - "md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,1024) i;") - - node.safe_psql( - "postgres", - "vacuum t_heap") - - self.backup_node(backup_dir, 'node', node, options=['--stream']) - - node.safe_psql( - "postgres", - "delete from t_heap where ctid >= '(11,0)'") - - node.safe_psql( - "postgres", - "vacuum t_heap") - - self.backup_node( - backup_dir, 'node', node, backup_type='delta') - - self.backup_node( - backup_dir, 'node', node, backup_type='delta') - - pgdata = self.pgdata_content(node.data_dir) - - self.restore_node( - backup_dir, 'node', node_restored) - - # Physical comparison - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - self.set_auto_conf(node_restored, {'port': node_restored.port}) - node_restored.slow_start() - - # @unittest.skip("skip") - def test_delta_vacuum_truncate_1(self): - """ - make node, create table, take full backup, - delete last 3 pages, vacuum relation, - take delta backup, take second delta backup, - restore latest delta backup and check data correctness - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums'], - ) - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored'), - ) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node_restored.cleanup() - node.slow_start() - self.create_tblspace_in_node(node, 'somedata') - - node.safe_psql( - "postgres", - "create sequence t_seq; " - "create table t_heap tablespace somedata as select i as id, " - "md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,1024) i;" - ) - - node.safe_psql( - "postgres", - "vacuum t_heap" - ) - - self.backup_node(backup_dir, 'node', node) - - node.safe_psql( - "postgres", - "delete from t_heap where ctid >= '(11,0)'" - ) - - node.safe_psql( - "postgres", - "vacuum t_heap" - ) - - self.backup_node( - backup_dir, 'node', node, backup_type='delta' - ) - - self.backup_node( - backup_dir, 'node', node, backup_type='delta' - ) - - pgdata = self.pgdata_content(node.data_dir) - - old_tablespace = self.get_tblspace_path(node, 'somedata') - new_tablespace = self.get_tblspace_path(node_restored, 'somedata_new') - - self.restore_node( - backup_dir, - 'node', - node_restored, - options=[ - "-T", "{0}={1}".format( - old_tablespace, new_tablespace)] - ) - - # Physical comparison - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - self.set_auto_conf(node_restored, {'port': node_restored.port}) - node_restored.slow_start() - - # @unittest.skip("skip") - def test_delta_vacuum_truncate_2(self): - """ - make node, create table, take full backup, - delete last 3 pages, vacuum relation, - take delta backup, take second delta backup, - restore latest delta backup and check data correctness - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums'], - ) - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored'), - ) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node_restored.cleanup() - node.slow_start() - - node.safe_psql( - "postgres", - "create table t_heap as select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,10100000) i;" - ) - filepath = node.safe_psql( - "postgres", - "select pg_relation_filepath('t_heap')" - ).decode('utf-8').rstrip() - - self.backup_node(backup_dir, 'node', node) - - print(os.path.join(node.data_dir, filepath + '.1')) - os.unlink(os.path.join(node.data_dir, filepath + '.1')) - - self.backup_node( - backup_dir, 'node', node, backup_type='delta') - - self.backup_node( - backup_dir, 'node', node, backup_type='delta') - - pgdata = self.pgdata_content(node.data_dir) - - self.restore_node( - backup_dir, 'node', node_restored) - - # Physical comparison - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - self.set_auto_conf(node_restored, {'port': node_restored.port}) - node_restored.slow_start() - - # @unittest.skip("skip") - def test_delta_stream(self): - """ - make archive node, take full and delta stream backups, - restore them and check data correctness - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'checkpoint_timeout': '30s' - } - ) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # FULL BACKUP - node.safe_psql( - "postgres", - "create table t_heap as select i as id, md5(i::text) as text, " - "md5(i::text)::tsvector as tsvector " - "from generate_series(0,100) i") - - full_result = node.execute("postgres", "SELECT * FROM t_heap") - full_backup_id = self.backup_node( - backup_dir, 'node', node, - backup_type='full', options=['--stream']) - - # delta BACKUP - node.safe_psql( - "postgres", - "insert into t_heap select i as id, md5(i::text) as text, " - "md5(i::text)::tsvector as tsvector " - "from generate_series(100,200) i") - delta_result = node.execute("postgres", "SELECT * FROM t_heap") - delta_backup_id = self.backup_node( - backup_dir, 'node', node, - backup_type='delta', options=['--stream']) - - # Drop Node - node.cleanup() - - # Check full backup - self.assertIn( - "INFO: Restore of backup {0} completed.".format(full_backup_id), - self.restore_node( - backup_dir, 'node', node, - backup_id=full_backup_id, - options=[ - "-j", "4", "--immediate", - "--recovery-target-action=promote"]), - '\n Unexpected Error Message: {0}\n' - ' CMD: {1}'.format(repr(self.output), self.cmd)) - node.slow_start() - full_result_new = node.execute("postgres", "SELECT * FROM t_heap") - self.assertEqual(full_result, full_result_new) - node.cleanup() - - # Check delta backup - self.assertIn( - "INFO: Restore of backup {0} completed.".format(delta_backup_id), - self.restore_node( - backup_dir, 'node', node, - backup_id=delta_backup_id, - options=[ - "-j", "4", "--immediate", - "--recovery-target-action=promote"]), - '\n Unexpected Error Message: {0}\n' - ' CMD: {1}'.format(repr(self.output), self.cmd)) - node.slow_start() - delta_result_new = node.execute("postgres", "SELECT * FROM t_heap") - self.assertEqual(delta_result, delta_result_new) - node.cleanup() - - # @unittest.skip("skip") - def test_delta_archive(self): - """ - make archive node, take full and delta archive backups, - restore them and check data correctness - """ - self.maxDiff = None - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # FULL BACKUP - node.safe_psql( - "postgres", - "create table t_heap as select i as id, md5(i::text) as text, " - "md5(i::text)::tsvector as tsvector from generate_series(0,1) i") - full_result = node.execute("postgres", "SELECT * FROM t_heap") - full_backup_id = self.backup_node( - backup_dir, 'node', node, backup_type='full') - - # delta BACKUP - node.safe_psql( - "postgres", - "insert into t_heap select i as id, md5(i::text) as text, " - "md5(i::text)::tsvector as tsvector from generate_series(0,2) i") - delta_result = node.execute("postgres", "SELECT * FROM t_heap") - delta_backup_id = self.backup_node( - backup_dir, 'node', node, backup_type='delta') - - # Drop Node - node.cleanup() - - # Restore and check full backup - self.assertIn( - "INFO: Restore of backup {0} completed.".format(full_backup_id), - self.restore_node( - backup_dir, 'node', node, - backup_id=full_backup_id, - options=[ - "-j", "4", "--immediate", - "--recovery-target-action=promote"]), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) - node.slow_start() - full_result_new = node.execute("postgres", "SELECT * FROM t_heap") - self.assertEqual(full_result, full_result_new) - node.cleanup() - - # Restore and check delta backup - self.assertIn( - "INFO: Restore of backup {0} completed.".format(delta_backup_id), - self.restore_node( - backup_dir, 'node', node, - backup_id=delta_backup_id, - options=[ - "-j", "4", "--immediate", - "--recovery-target-action=promote"]), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) - node.slow_start() - delta_result_new = node.execute("postgres", "SELECT * FROM t_heap") - self.assertEqual(delta_result, delta_result_new) - node.cleanup() - - # @unittest.skip("skip") - def test_delta_multiple_segments(self): - """ - Make node, create table with multiple segments, - write some data to it, check delta and data correctness - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'fsync': 'off', - 'shared_buffers': '1GB', - 'maintenance_work_mem': '1GB', - 'full_page_writes': 'off' - } - ) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - # self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - self.create_tblspace_in_node(node, 'somedata') - - # CREATE TABLE - node.pgbench_init( - scale=100, - options=['--tablespace=somedata', '--no-vacuum']) - # FULL BACKUP - self.backup_node(backup_dir, 'node', node, options=['--stream']) - - # PGBENCH STUFF - pgbench = node.pgbench(options=['-T', '50', '-c', '1', '--no-vacuum']) - pgbench.wait() - node.safe_psql("postgres", "checkpoint") - - # GET LOGICAL CONTENT FROM NODE - result = node.safe_psql("postgres", "select count(*) from pgbench_accounts") - # delta BACKUP - self.backup_node( - backup_dir, 'node', node, - backup_type='delta', options=['--stream']) - # GET PHYSICAL CONTENT FROM NODE - pgdata = self.pgdata_content(node.data_dir) - - # RESTORE NODE - restored_node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'restored_node')) - restored_node.cleanup() - tblspc_path = self.get_tblspace_path(node, 'somedata') - tblspc_path_new = self.get_tblspace_path( - restored_node, 'somedata_restored') - - self.restore_node( - backup_dir, 'node', restored_node, - options=[ - "-j", "4", "-T", "{0}={1}".format( - tblspc_path, tblspc_path_new)]) - - # GET PHYSICAL CONTENT FROM NODE_RESTORED - pgdata_restored = self.pgdata_content(restored_node.data_dir) - - # START RESTORED NODE - self.set_auto_conf(restored_node, {'port': restored_node.port}) - restored_node.slow_start() - - result_new = restored_node.safe_psql( - "postgres", - "select count(*) from pgbench_accounts") - - # COMPARE RESTORED FILES - self.assertEqual(result, result_new, 'data is lost') - - if self.paranoia: - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.skip("skip") - def test_delta_vacuum_full(self): - """ - make node, make full and delta stream backups, - restore them and check data correctness - """ - self._check_gdb_flag_or_skip_test() - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - node_restored.cleanup() - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - self.create_tblspace_in_node(node, 'somedata') - - self.backup_node(backup_dir, 'node', node, options=['--stream']) - - node.safe_psql( - "postgres", - "create table t_heap tablespace somedata as select i" - " as id from generate_series(0,1000000) i" - ) - - pg_connect = node.connect("postgres", autocommit=True) - - gdb = self.gdb_attach(pg_connect.pid) - gdb.set_breakpoint('reform_and_rewrite_tuple') - - gdb.continue_execution_until_running() - - process = Thread( - target=pg_connect.execute, args=["VACUUM FULL t_heap"]) - process.start() - - while not gdb.stopped_in_breakpoint: - time.sleep(1) - - gdb.continue_execution_until_break(20) - - self.backup_node( - backup_dir, 'node', node, - backup_type='delta', options=['--stream']) - - if self.paranoia: - pgdata = self.pgdata_content(node.data_dir) - - gdb.remove_all_breakpoints() - gdb._execute('detach') - process.join() - - old_tablespace = self.get_tblspace_path(node, 'somedata') - new_tablespace = self.get_tblspace_path(node_restored, 'somedata_new') - - self.restore_node( - backup_dir, 'node', node_restored, - options=["-j", "4", "-T", "{0}={1}".format( - old_tablespace, new_tablespace)]) - - # Physical comparison - if self.paranoia: - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - self.set_auto_conf(node_restored, {'port': node_restored.port}) - - node_restored.slow_start() - - # @unittest.skip("skip") - def test_create_db(self): - """ - Make node, take full backup, create database db1, take delta backup, - restore database and check it presense - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'max_wal_size': '10GB', - } - ) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - # FULL BACKUP - node.safe_psql( - "postgres", - "create table t_heap as select i as id, md5(i::text) as text, " - "md5(i::text)::tsvector as tsvector from generate_series(0,100) i") - - node.safe_psql("postgres", "SELECT * FROM t_heap") - self.backup_node( - backup_dir, 'node', node, - options=["--stream"]) - - # CREATE DATABASE DB1 - node.safe_psql("postgres", "create database db1") - node.safe_psql( - "db1", - "create table t_heap as select i as id, md5(i::text) as text, " - "md5(i::text)::tsvector as tsvector from generate_series(0,100) i") - - # DELTA BACKUP - backup_id = self.backup_node( - backup_dir, 'node', node, - backup_type='delta', - options=["--stream"] - ) - - if self.paranoia: - pgdata = self.pgdata_content(node.data_dir) - - # RESTORE - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored') - ) - - node_restored.cleanup() - self.restore_node( - backup_dir, - 'node', - node_restored, - backup_id=backup_id, - options=[ - "-j", "4", - "--immediate", - "--recovery-target-action=promote"]) - - # COMPARE PHYSICAL CONTENT - if self.paranoia: - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # START RESTORED NODE - self.set_auto_conf(node_restored, {'port': node_restored.port}) - node_restored.slow_start() - - # DROP DATABASE DB1 - node.safe_psql( - "postgres", "drop database db1") - # SECOND DELTA BACKUP - backup_id = self.backup_node( - backup_dir, 'node', node, - backup_type='delta', options=["--stream"] - ) - - if self.paranoia: - pgdata = self.pgdata_content(node.data_dir) - - # RESTORE SECOND DELTA BACKUP - node_restored.cleanup() - self.restore_node( - backup_dir, - 'node', - node_restored, - backup_id=backup_id, - options=[ - "-j", "4", - "--immediate", - "--recovery-target-action=promote"] - ) - - # COMPARE PHYSICAL CONTENT - if self.paranoia: - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # START RESTORED NODE - self.set_auto_conf(node_restored, {'port': node_restored.port}) - node_restored.slow_start() - - try: - node_restored.safe_psql('db1', 'select 1') - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because we are connecting to deleted database" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except QueryException as e: - self.assertTrue( - 'FATAL: database "db1" does not exist' in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - # @unittest.skip("skip") - def test_exists_in_previous_backup(self): - """ - Make node, take full backup, create table, take page backup, - take delta backup, check that file is no fully copied to delta backup - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'max_wal_size': '10GB', - 'checkpoint_timeout': '5min', - } - ) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # FULL BACKUP - node.safe_psql( - "postgres", - "create table t_heap as select i as id, md5(i::text) as text, " - "md5(i::text)::tsvector as tsvector from generate_series(0,100) i") - - node.safe_psql("postgres", "SELECT * FROM t_heap") - filepath = node.safe_psql( - "postgres", - "SELECT pg_relation_filepath('t_heap')").decode('utf-8').rstrip() - self.backup_node( - backup_dir, - 'node', - node, - options=["--stream"]) - - # PAGE BACKUP - backup_id = self.backup_node( - backup_dir, - 'node', - node, - backup_type='page' - ) - - fullpath = os.path.join( - backup_dir, 'backups', 'node', backup_id, 'database', filepath) - self.assertFalse(os.path.exists(fullpath)) - -# if self.paranoia: -# pgdata_page = self.pgdata_content( -# os.path.join( -# backup_dir, 'backups', -# 'node', backup_id, 'database')) - - # DELTA BACKUP - backup_id = self.backup_node( - backup_dir, 'node', node, - backup_type='delta', - options=["--stream"] - ) -# if self.paranoia: -# pgdata_delta = self.pgdata_content( -# os.path.join( -# backup_dir, 'backups', -# 'node', backup_id, 'database')) -# self.compare_pgdata( -# pgdata_page, pgdata_delta) - - fullpath = os.path.join( - backup_dir, 'backups', 'node', backup_id, 'database', filepath) - self.assertFalse(os.path.exists(fullpath)) - - if self.paranoia: - pgdata = self.pgdata_content(node.data_dir) - - # RESTORE - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored') - ) - - node_restored.cleanup() - self.restore_node( - backup_dir, - 'node', - node_restored, - backup_id=backup_id, - options=[ - "-j", "4", - "--immediate", - "--recovery-target-action=promote"]) - - # COMPARE PHYSICAL CONTENT - if self.paranoia: - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # START RESTORED NODE - self.set_auto_conf(node_restored, {'port': node_restored.port}) - node_restored.slow_start() - - # @unittest.skip("skip") - def test_alter_table_set_tablespace_delta(self): - """ - Make node, create tablespace with table, take full backup, - alter tablespace location, take delta backup, restore database. - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, initdb_params=['--data-checksums'], - pg_options={ - 'checkpoint_timeout': '30s', - } - ) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - # FULL BACKUP - self.create_tblspace_in_node(node, 'somedata') - node.safe_psql( - "postgres", - "create table t_heap tablespace somedata as select i as id," - " md5(i::text) as text, md5(i::text)::tsvector as tsvector" - " from generate_series(0,100) i") - - # FULL backup - self.backup_node(backup_dir, 'node', node, options=["--stream"]) - - # ALTER TABLESPACE - self.create_tblspace_in_node(node, 'somedata_new') - node.safe_psql( - "postgres", - "alter table t_heap set tablespace somedata_new") - - # DELTA BACKUP - result = node.safe_psql( - "postgres", "select * from t_heap") - self.backup_node( - backup_dir, 'node', node, - backup_type='delta', - options=["--stream"]) - - if self.paranoia: - pgdata = self.pgdata_content(node.data_dir) - - # RESTORE - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - node_restored.cleanup() - - self.restore_node( - backup_dir, 'node', node_restored, - options=[ - "-j", "4", - "-T", "{0}={1}".format( - self.get_tblspace_path(node, 'somedata'), - self.get_tblspace_path(node_restored, 'somedata') - ), - "-T", "{0}={1}".format( - self.get_tblspace_path(node, 'somedata_new'), - self.get_tblspace_path(node_restored, 'somedata_new') - ) - ] - ) - - # GET RESTORED PGDATA AND COMPARE - if self.paranoia: - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # START RESTORED NODE - self.set_auto_conf(node_restored, {'port': node_restored.port}) - node_restored.slow_start() - - result_new = node_restored.safe_psql( - "postgres", "select * from t_heap") - - self.assertEqual(result, result_new, 'lost some data after restore') - - # @unittest.skip("skip") - def test_alter_database_set_tablespace_delta(self): - """ - Make node, take full backup, create database, - take delta backup, alter database tablespace location, - take delta backup restore last delta backup. - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums'], - ) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - self.create_tblspace_in_node(node, 'somedata') - - # FULL backup - self.backup_node(backup_dir, 'node', node, options=["--stream"]) - - # CREATE DATABASE DB1 - node.safe_psql( - "postgres", - "create database db1 tablespace = 'somedata'") - node.safe_psql( - "db1", - "create table t_heap as select i as id, md5(i::text) as text, " - "md5(i::text)::tsvector as tsvector from generate_series(0,100) i") - - # DELTA BACKUP - self.backup_node( - backup_dir, 'node', node, - backup_type='delta', - options=["--stream"] - ) - - # ALTER TABLESPACE - self.create_tblspace_in_node(node, 'somedata_new') - node.safe_psql( - "postgres", - "alter database db1 set tablespace somedata_new" - ) - - # DELTA BACKUP - self.backup_node( - backup_dir, 'node', node, - backup_type='delta', - options=["--stream"] - ) - - if self.paranoia: - pgdata = self.pgdata_content(node.data_dir) - - # RESTORE - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored') - ) - node_restored.cleanup() - - self.restore_node( - backup_dir, 'node', node_restored, - options=[ - "-j", "4", - "-T", "{0}={1}".format( - self.get_tblspace_path(node, 'somedata'), - self.get_tblspace_path(node_restored, 'somedata') - ), - "-T", "{0}={1}".format( - self.get_tblspace_path(node, 'somedata_new'), - self.get_tblspace_path(node_restored, 'somedata_new') - ) - ] - ) - - # GET RESTORED PGDATA AND COMPARE - if self.paranoia: - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # START RESTORED NODE - self.set_auto_conf(node_restored, {'port': node_restored.port}) - node_restored.slow_start() - - # @unittest.skip("skip") - def test_delta_delete(self): - """ - Make node, create tablespace with table, take full backup, - alter tablespace location, take delta backup, restore database. - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, initdb_params=['--data-checksums'], - pg_options={ - 'checkpoint_timeout': '30s', - } - ) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - self.create_tblspace_in_node(node, 'somedata') - - # FULL backup - self.backup_node(backup_dir, 'node', node, options=["--stream"]) - - node.safe_psql( - "postgres", - "create table t_heap tablespace somedata as select i as id," - " md5(i::text) as text, md5(i::text)::tsvector as tsvector" - " from generate_series(0,100) i" - ) - - node.safe_psql( - "postgres", - "delete from t_heap" - ) - - node.safe_psql( - "postgres", - "vacuum t_heap" - ) - - # DELTA BACKUP - self.backup_node( - backup_dir, 'node', node, - backup_type='delta', - options=["--stream"] - ) - - if self.paranoia: - pgdata = self.pgdata_content(node.data_dir) - - # RESTORE - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored') - ) - node_restored.cleanup() - - self.restore_node( - backup_dir, 'node', node_restored, - options=[ - "-j", "4", - "-T", "{0}={1}".format( - self.get_tblspace_path(node, 'somedata'), - self.get_tblspace_path(node_restored, 'somedata') - ) - ] - ) - - # GET RESTORED PGDATA AND COMPARE - if self.paranoia: - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # START RESTORED NODE - self.set_auto_conf(node_restored, {'port': node_restored.port}) - node_restored.slow_start() - - def test_delta_nullified_heap_page_backup(self): - """ - make node, take full backup, nullify some heap block, - take delta backup, restore, physically compare pgdata`s - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.pgbench_init(scale=1) - - file_path = node.safe_psql( - "postgres", - "select pg_relation_filepath('pgbench_accounts')").decode('utf-8').rstrip() - - node.safe_psql( - "postgres", - "CHECKPOINT") - - self.backup_node( - backup_dir, 'node', node) - - # Nullify some block in PostgreSQL - file = os.path.join(node.data_dir, file_path).replace("\\", "/") - if os.name == 'nt': - file = file.replace("\\", "/") - - with open(file, 'r+b', 0) as f: - f.seek(8192) - f.write(b"\x00"*8192) - f.flush() - f.close - - self.backup_node( - backup_dir, 'node', node, - backup_type='delta', options=["--log-level-file=verbose"]) - - if self.paranoia: - pgdata = self.pgdata_content(node.data_dir) - - if not self.remote: - log_file_path = os.path.join(backup_dir, "log", "pg_probackup.log") - with open(log_file_path) as f: - content = f.read() - - self.assertIn( - 'VERBOSE: File: "{0}" blknum 1, empty page'.format(file), - content) - self.assertNotIn( - "Skipping blknum 1 in file: {0}".format(file), - content) - - # Restore DELTA backup - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - node_restored.cleanup() - - self.restore_node( - backup_dir, 'node', node_restored) - - if self.paranoia: - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - def test_delta_backup_from_past(self): - """ - make node, take FULL stream backup, take DELTA stream backup, - restore FULL backup, try to take second DELTA stream backup - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - backup_id = self.backup_node( - backup_dir, 'node', node, options=['--stream']) - - node.pgbench_init(scale=3) - - # First DELTA - self.backup_node( - backup_dir, 'node', node, - backup_type='delta', options=['--stream']) - - # Restore FULL backup - node.cleanup() - self.restore_node(backup_dir, 'node', node, backup_id=backup_id) - node.slow_start() - - # Second DELTA backup - try: - self.backup_node( - backup_dir, 'node', node, - backup_type='delta', options=['--stream']) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because we are backing up an instance from the past" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'ERROR: Current START LSN ' in e.message and - 'is lower than START LSN ' in e.message and - 'of previous backup ' in e.message and - 'It may indicate that we are trying ' - 'to backup PostgreSQL instance from the past' in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - @unittest.skip("skip") - # @unittest.expectedFailure - def test_delta_pg_resetxlog(self): - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'shared_buffers': '512MB', - 'max_wal_size': '3GB'}) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - # Create table - node.safe_psql( - "postgres", - "create extension bloom; create sequence t_seq; " - "create table t_heap " - "as select nextval('t_seq')::int as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " -# "from generate_series(0,25600) i") - "from generate_series(0,2560) i") - - self.backup_node( - backup_dir, 'node', node, options=['--stream']) - - node.safe_psql( - 'postgres', - "update t_heap set id = nextval('t_seq'), text = md5(text), " - "tsvector = md5(repeat(tsvector::text, 10))::tsvector") - - # kill the bastard - if self.verbose: - print('Killing postmaster. Losing Ptrack changes') - node.stop(['-m', 'immediate', '-D', node.data_dir]) - - # now smack it with sledgehammer - if node.major_version >= 10: - pg_resetxlog_path = self.get_bin_path('pg_resetwal') - wal_dir = 'pg_wal' - else: - pg_resetxlog_path = self.get_bin_path('pg_resetxlog') - wal_dir = 'pg_xlog' - - self.run_binary( - [ - pg_resetxlog_path, - '-D', - node.data_dir, - '-o 42', - '-f' - ], - asynchronous=False) - - if not node.status(): - node.slow_start() - else: - print("Die! Die! Why won't you die?... Why won't you die?") - exit(1) - - # take ptrack backup -# self.backup_node( -# backup_dir, 'node', node, -# backup_type='delta', options=['--stream']) - - try: - self.backup_node( - backup_dir, 'node', node, - backup_type='delta', options=['--stream']) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because instance was brutalized by pg_resetxlog" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd) - ) - except ProbackupException as e: - self.assertIn( - 'Insert error message', - e.message, - '\n Unexpected Error Message: {0}\n' - ' CMD: {1}'.format(repr(e.message), self.cmd)) - -# pgdata = self.pgdata_content(node.data_dir) -# -# node_restored = self.make_simple_node( -# base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) -# node_restored.cleanup() -# -# self.restore_node( -# backup_dir, 'node', node_restored) -# -# pgdata_restored = self.pgdata_content(node_restored.data_dir) -# self.compare_pgdata(pgdata, pgdata_restored) diff --git a/tests/exclude_test.py b/tests/exclude_test.py deleted file mode 100644 index cb3530cd5..000000000 --- a/tests/exclude_test.py +++ /dev/null @@ -1,338 +0,0 @@ -import os -import unittest -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException - - -class ExcludeTest(ProbackupTest, unittest.TestCase): - - # @unittest.skip("skip") - def test_exclude_temp_files(self): - """ - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'logging_collector': 'on', - 'log_filename': 'postgresql.log'}) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - oid = node.safe_psql( - 'postgres', - "select oid from pg_database where datname = 'postgres'").rstrip() - - pgsql_tmp_dir = os.path.join(node.data_dir, 'base', 'pgsql_tmp') - - os.mkdir(pgsql_tmp_dir) - - file = os.path.join(pgsql_tmp_dir, 'pgsql_tmp7351.16') - with open(file, 'w') as f: - f.write("HELLO") - f.flush() - f.close - - full_id = self.backup_node( - backup_dir, 'node', node, backup_type='full', options=['--stream']) - - file = os.path.join( - backup_dir, 'backups', 'node', full_id, - 'database', 'base', 'pgsql_tmp', 'pgsql_tmp7351.16') - - self.assertFalse( - os.path.exists(file), - "File must be excluded: {0}".format(file)) - - # TODO check temporary tablespaces - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_exclude_temp_tables(self): - """ - make node without archiving, create temp table, take full backup, - check that temp table not present in backup catalogue - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - with node.connect("postgres") as conn: - - conn.execute( - "create temp table test as " - "select generate_series(0,50050000)::text") - conn.commit() - - temp_schema_name = conn.execute( - "SELECT nspname FROM pg_namespace " - "WHERE oid = pg_my_temp_schema()")[0][0] - conn.commit() - - temp_toast_schema_name = "pg_toast_" + temp_schema_name.replace( - "pg_", "") - conn.commit() - - conn.execute("create index test_idx on test (generate_series)") - conn.commit() - - heap_path = conn.execute( - "select pg_relation_filepath('test')")[0][0] - conn.commit() - - index_path = conn.execute( - "select pg_relation_filepath('test_idx')")[0][0] - conn.commit() - - heap_oid = conn.execute("select 'test'::regclass::oid")[0][0] - conn.commit() - - toast_path = conn.execute( - "select pg_relation_filepath('{0}.{1}')".format( - temp_toast_schema_name, "pg_toast_" + str(heap_oid)))[0][0] - conn.commit() - - toast_idx_path = conn.execute( - "select pg_relation_filepath('{0}.{1}')".format( - temp_toast_schema_name, - "pg_toast_" + str(heap_oid) + "_index"))[0][0] - conn.commit() - - temp_table_filename = os.path.basename(heap_path) - temp_idx_filename = os.path.basename(index_path) - temp_toast_filename = os.path.basename(toast_path) - temp_idx_toast_filename = os.path.basename(toast_idx_path) - - self.backup_node( - backup_dir, 'node', node, backup_type='full', options=['--stream']) - - for root, dirs, files in os.walk(backup_dir): - for file in files: - if file in [ - temp_table_filename, temp_table_filename + ".1", - temp_idx_filename, - temp_idx_filename + ".1", - temp_toast_filename, - temp_toast_filename + ".1", - temp_idx_toast_filename, - temp_idx_toast_filename + ".1" - ]: - self.assertEqual( - 1, 0, - "Found temp table file in backup catalogue.\n " - "Filepath: {0}".format(file)) - - # @unittest.skip("skip") - def test_exclude_unlogged_tables_1(self): - """ - make node without archiving, create unlogged table, take full backup, - alter table to unlogged, take delta backup, restore delta backup, - check that PGDATA`s are physically the same - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - "shared_buffers": "10MB"}) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - conn = node.connect() - with node.connect("postgres") as conn: - - conn.execute( - "create unlogged table test as " - "select generate_series(0,5005000)::text") - conn.commit() - - conn.execute("create index test_idx on test (generate_series)") - conn.commit() - - self.backup_node( - backup_dir, 'node', node, - backup_type='full', options=['--stream']) - - node.safe_psql('postgres', "alter table test set logged") - - self.backup_node( - backup_dir, 'node', node, backup_type='delta', - options=['--stream']) - - pgdata = self.pgdata_content(node.data_dir) - - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - - node_restored.cleanup() - - self.restore_node( - backup_dir, 'node', node_restored, options=["-j", "4"]) - - # Physical comparison - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.skip("skip") - def test_exclude_unlogged_tables_2(self): - """ - 1. make node, create unlogged, take FULL, DELTA, PAGE, - check that unlogged table files was not backed up - 2. restore FULL, DELTA, PAGE to empty db, - ensure unlogged table exist and is epmty - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - "shared_buffers": "10MB"}) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - backup_ids = [] - - for backup_type in ['full', 'delta', 'page']: - - if backup_type == 'full': - node.safe_psql( - 'postgres', - 'create unlogged table test as select generate_series(0,20050000)::text') - else: - node.safe_psql( - 'postgres', - 'insert into test select generate_series(0,20050000)::text') - - rel_path = node.execute( - 'postgres', - "select pg_relation_filepath('test')")[0][0] - - backup_id = self.backup_node( - backup_dir, 'node', node, - backup_type=backup_type, options=['--stream']) - - backup_ids.append(backup_id) - - filelist = self.get_backup_filelist( - backup_dir, 'node', backup_id) - - self.assertNotIn( - rel_path, filelist, - "Unlogged table was not excluded") - - self.assertNotIn( - rel_path + '.1', filelist, - "Unlogged table was not excluded") - - self.assertNotIn( - rel_path + '.2', filelist, - "Unlogged table was not excluded") - - self.assertNotIn( - rel_path + '.3', filelist, - "Unlogged table was not excluded") - - # ensure restoring retrieves back only empty unlogged table - for backup_id in backup_ids: - node.stop() - node.cleanup() - - self.restore_node(backup_dir, 'node', node, backup_id=backup_id) - - node.slow_start() - - self.assertEqual( - node.execute( - 'postgres', - 'select count(*) from test')[0][0], - 0) - - # @unittest.skip("skip") - def test_exclude_log_dir(self): - """ - check that by default 'log' and 'pg_log' directories are not backed up - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'logging_collector': 'on', - 'log_filename': 'postgresql.log'}) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - self.backup_node( - backup_dir, 'node', node, - backup_type='full', options=['--stream']) - - log_dir = node.safe_psql( - 'postgres', - 'show log_directory').decode('utf-8').rstrip() - - node.cleanup() - - self.restore_node( - backup_dir, 'node', node, options=["-j", "4"]) - - # check that PGDATA/log or PGDATA/pg_log do not exists - path = os.path.join(node.data_dir, log_dir) - log_file = os.path.join(path, 'postgresql.log') - self.assertTrue(os.path.exists(path)) - self.assertFalse(os.path.exists(log_file)) - - # @unittest.skip("skip") - def test_exclude_log_dir_1(self): - """ - check that "--backup-pg-log" works correctly - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'logging_collector': 'on', - 'log_filename': 'postgresql.log'}) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - log_dir = node.safe_psql( - 'postgres', - 'show log_directory').decode('utf-8').rstrip() - - self.backup_node( - backup_dir, 'node', node, - backup_type='full', options=['--stream', '--backup-pg-log']) - - node.cleanup() - - self.restore_node( - backup_dir, 'node', node, options=["-j", "4"]) - - # check that PGDATA/log or PGDATA/pg_log do not exists - path = os.path.join(node.data_dir, log_dir) - log_file = os.path.join(path, 'postgresql.log') - self.assertTrue(os.path.exists(path)) - self.assertTrue(os.path.exists(log_file)) diff --git a/tests/external_test.py b/tests/external_test.py deleted file mode 100644 index 53f3c5449..000000000 --- a/tests/external_test.py +++ /dev/null @@ -1,2405 +0,0 @@ -import unittest -import os -from time import sleep -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException -from .helpers.cfs_helpers import find_by_name -import shutil - - -# TODO: add some ptrack tests -class ExternalTest(ProbackupTest, unittest.TestCase): - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_basic_external(self): - """ - make node, create external directory, take backup - with external directory, restore backup, check that - external directory was successfully copied - """ - core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) - shutil.rmtree(core_dir, ignore_errors=True) - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums'], - set_replication=True) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - external_dir = self.get_tblspace_path(node, 'somedirectory') - - # create directory in external_directory - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # take FULL backup with external directory pointing to a file - file_path = os.path.join(core_dir, 'file') - with open(file_path, "w+") as f: - pass - - try: - self.backup_node( - backup_dir, 'node', node, backup_type="full", - options=[ - '--external-dirs={0}'.format(file_path)]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because external dir point to a file" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'ERROR: --external-dirs option' in e.message and - 'directory or symbolic link expected' in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - sleep(1) - - # FULL backup - self.backup_node( - backup_dir, 'node', node, backup_type="full", - options=["-j", "4", "--stream"]) - - # Fill external directories - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir, options=["-j", "4"]) - - # Full backup with external dir - self.backup_node( - backup_dir, 'node', node, - options=[ - '--external-dirs={0}'.format(external_dir)]) - - pgdata = self.pgdata_content( - node.base_dir, exclude_dirs=['logs']) - - node.cleanup() - shutil.rmtree(node.base_dir, ignore_errors=True) - - self.restore_node( - backup_dir, 'node', node, options=["-j", "4"]) - - pgdata_restored = self.pgdata_content( - node.base_dir, exclude_dirs=['logs']) - - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_external_none(self): - """ - make node, create external directory, take backup - with external directory, take delta backup with --external-dirs=none, - restore delta backup, check that - external directory was not copied - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums'], - set_replication=True) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - external_dir = self.get_tblspace_path(node, 'somedirectory') - - # create directory in external_directory - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - # FULL backup - self.backup_node( - backup_dir, 'node', node, backup_type="full", - options=["-j", "4", "--stream"]) - - # Fill external directories with data - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir, options=["-j", "4"]) - - # Full backup with external dir - self.backup_node( - backup_dir, 'node', node, - options=[ - '--stream', - '--external-dirs={0}'.format(external_dir)]) - - # Delta backup without external directory - self.backup_node( - backup_dir, 'node', node, backup_type="delta", - options=['--external-dirs=none', '--stream']) - - shutil.rmtree(external_dir, ignore_errors=True) - pgdata = self.pgdata_content( - node.base_dir, exclude_dirs=['logs']) - - node.cleanup() - shutil.rmtree(node.base_dir, ignore_errors=True) - - self.restore_node( - backup_dir, 'node', node, options=["-j", "4"]) - - pgdata_restored = self.pgdata_content( - node.base_dir, exclude_dirs=['logs']) - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_external_dirs_overlapping(self): - """ - make node, create directory, - take backup with two external directories pointing to - the same directory, backup should fail - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums'], - set_replication=True) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - external_dir1 = self.get_tblspace_path(node, 'external_dir1') - external_dir2 = self.get_tblspace_path(node, 'external_dir2') - - # create directory in external_directory - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - os.mkdir(external_dir1) - os.mkdir(external_dir2) - - # Full backup with external dirs - try: - self.backup_node( - backup_dir, 'node', node, - options=[ - "-j", "4", "--stream", - "-E", "{0}{1}{2}{1}{0}".format( - external_dir1, - self.EXTERNAL_DIRECTORY_DELIMITER, - external_dir2, - self.EXTERNAL_DIRECTORY_DELIMITER, - external_dir1)]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because tablespace mapping is incorrect" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'ERROR: External directory path (-E option)' in e.message and - 'contain another external directory' in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - # @unittest.skip("skip") - def test_external_dir_mapping(self): - """ - make node, take full backup, check that restore with - external-dir mapping will end with error, take page backup, - check that restore with external-dir mapping will end with - success - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - self.backup_node( - backup_dir, 'node', node, backup_type="full", - options=["-j", "4", "--stream"]) - - external_dir1 = self.get_tblspace_path(node, 'external_dir1') - external_dir2 = self.get_tblspace_path(node, 'external_dir2') - - # Fill external directories with data - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir1, options=["-j", "4"]) - - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir2, options=["-j", "4"]) - - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - node_restored.cleanup() - - external_dir1_new = self.get_tblspace_path(node_restored, 'external_dir1') - external_dir2_new = self.get_tblspace_path(node_restored, 'external_dir2') - - try: - self.restore_node( - backup_dir, 'node', node_restored, - options=[ - "-j", "4", - "--external-mapping={0}={1}".format( - external_dir1, external_dir1_new), - "--external-mapping={0}={1}".format( - external_dir2, external_dir2_new)]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because tablespace mapping is incorrect" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'ERROR: --external-mapping option' in e.message and - 'have an entry in list of external directories' in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.backup_node( - backup_dir, 'node', node, backup_type="delta", - options=[ - "-j", "4", "--stream", - "-E", "{0}{1}{2}".format( - external_dir1, - self.EXTERNAL_DIRECTORY_DELIMITER, - external_dir2)]) - - pgdata = self.pgdata_content( - node.base_dir, exclude_dirs=['logs']) - - self.restore_node( - backup_dir, 'node', node_restored, - options=[ - "-j", "4", - "--external-mapping={0}={1}".format( - external_dir1, external_dir1_new), - "--external-mapping={0}={1}".format( - external_dir2, external_dir2_new)]) - - pgdata_restored = self.pgdata_content( - node_restored.base_dir, exclude_dirs=['logs']) - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_backup_multiple_external(self): - """check that cmdline has priority over config""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - external_dir1 = self.get_tblspace_path(node, 'external_dir1') - external_dir2 = self.get_tblspace_path(node, 'external_dir2') - - # FULL backup - self.backup_node( - backup_dir, 'node', node, backup_type="full", - options=["-j", "4", "--stream"]) - - # fill external directories with data - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir1, options=["-j", "4"]) - - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir2, options=["-j", "4"]) - - self.set_config( - backup_dir, 'node', - options=['-E', external_dir1]) - - # cmdline option MUST override options in config - self.backup_node( - backup_dir, 'node', node, backup_type="delta", - options=[ - "-j", "4", "--stream", - "-E", external_dir2]) - - pgdata = self.pgdata_content( - node.base_dir, exclude_dirs=['logs', 'external_dir1']) - - node.cleanup() - shutil.rmtree(external_dir1, ignore_errors=True) - shutil.rmtree(external_dir2, ignore_errors=True) - - self.restore_node( - backup_dir, 'node', node, - options=["-j", "4"]) - - pgdata_restored = self.pgdata_content( - node.base_dir, exclude_dirs=['logs']) - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.expectedFailure - # @unittest.skip("skip") - def test_external_backward_compatibility(self): - """ - take backup with old binary without external dirs support - take delta backup with new binary and 2 external directories - restore delta backup, check that incremental chain - restored correctly - """ - if not self.probackup_old_path: - self.skipTest("You must specify PGPROBACKUPBIN_OLD" - " for run this test") - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir, old_binary=True) - self.show_pb(backup_dir) - - self.add_instance(backup_dir, 'node', node, old_binary=True) - self.show_pb(backup_dir) - - node.slow_start() - - node.pgbench_init(scale=3) - - # FULL backup with old binary without external dirs support - self.backup_node( - backup_dir, 'node', node, - old_binary=True, options=["-j", "4", "--stream"]) - - external_dir1 = self.get_tblspace_path(node, 'external_dir1') - external_dir2 = self.get_tblspace_path(node, 'external_dir2') - - # fill external directories with data - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir1, options=["-j", "4"]) - - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir2, options=["-j", "4"]) - - pgbench = node.pgbench(options=['-T', '30', '-c', '1', '--no-vacuum']) - pgbench.wait() - - # FULL backup - backup_id = self.backup_node( - backup_dir, 'node', node, - old_binary=True, options=["-j", "4", "--stream"]) - - # fill external directories with changed data - shutil.rmtree(external_dir1, ignore_errors=True) - shutil.rmtree(external_dir2, ignore_errors=True) - - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir1, options=["-j", "4"]) - - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir2, options=["-j", "4"]) - - self.delete_pb(backup_dir, 'node', backup_id=backup_id) - - # delta backup with external directories using new binary - self.backup_node( - backup_dir, 'node', node, backup_type="delta", - options=[ - "-j", "4", "--stream", - "-E", "{0}{1}{2}".format( - external_dir1, - self.EXTERNAL_DIRECTORY_DELIMITER, - external_dir2)]) - - pgdata = self.pgdata_content( - node.base_dir, exclude_dirs=['logs']) - - # RESTORE chain with new binary - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - - node_restored.cleanup() - - external_dir1_new = self.get_tblspace_path(node_restored, 'external_dir1') - external_dir2_new = self.get_tblspace_path(node_restored, 'external_dir2') - - self.restore_node( - backup_dir, 'node', node_restored, - options=[ - "-j", "4", - "--external-mapping={0}={1}".format(external_dir1, external_dir1_new), - "--external-mapping={0}={1}".format(external_dir2, external_dir2_new)]) - - pgdata_restored = self.pgdata_content( - node_restored.base_dir, exclude_dirs=['logs']) - - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.expectedFailure - # @unittest.skip("skip") - def test_external_backward_compatibility_merge_1(self): - """ - take backup with old binary without external dirs support - take delta backup with new binary and 2 external directories - merge delta backup ajd restore it - """ - if not self.probackup_old_path: - self.skipTest("You must specify PGPROBACKUPBIN_OLD" - " for run this test") - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir, old_binary=True) - self.show_pb(backup_dir) - - self.add_instance(backup_dir, 'node', node, old_binary=True) - self.show_pb(backup_dir) - - node.slow_start() - - node.pgbench_init(scale=3) - - # tmp FULL backup with old binary - tmp_id = self.backup_node( - backup_dir, 'node', node, - old_binary=True, options=["-j", "4", "--stream"]) - - external_dir1 = self.get_tblspace_path(node, 'external_dir1') - external_dir2 = self.get_tblspace_path(node, 'external_dir2') - - # fill external directories with data - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir1, options=["-j", "4"]) - - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir2, options=["-j", "4"]) - - self.delete_pb(backup_dir, 'node', backup_id=tmp_id) - - # FULL backup with old binary without external dirs support - self.backup_node( - backup_dir, 'node', node, - old_binary=True, options=["-j", "4", "--stream"]) - - pgbench = node.pgbench(options=['-T', '30', '-c', '1']) - pgbench.wait() - - # delta backup with external directories using new binary - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type="delta", - options=[ - "-j", "4", "--stream", - "-E", "{0}{1}{2}".format( - external_dir1, - self.EXTERNAL_DIRECTORY_DELIMITER, - external_dir2)]) - - pgdata = self.pgdata_content( - node.base_dir, exclude_dirs=['logs']) - - # Merge chain chain with new binary - self.merge_backup(backup_dir, 'node', backup_id=backup_id) - - # Restore merged backup - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - - node_restored.cleanup() - - external_dir1_new = self.get_tblspace_path(node_restored, 'external_dir1') - external_dir2_new = self.get_tblspace_path(node_restored, 'external_dir2') - - self.restore_node( - backup_dir, 'node', node_restored, - options=[ - "-j", "4", - "--external-mapping={0}={1}".format(external_dir1, external_dir1_new), - "--external-mapping={0}={1}".format(external_dir2, external_dir2_new)]) - - pgdata_restored = self.pgdata_content( - node_restored.base_dir, exclude_dirs=['logs']) - - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.expectedFailure - # @unittest.skip("skip") - def test_external_backward_compatibility_merge_2(self): - """ - take backup with old binary without external dirs support - take delta backup with new binary and 2 external directories - merge delta backup and restore it - """ - if not self.probackup_old_path: - self.skipTest("You must specify PGPROBACKUPBIN_OLD" - " for run this test") - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir, old_binary=True) - self.show_pb(backup_dir) - - self.add_instance(backup_dir, 'node', node, old_binary=True) - self.show_pb(backup_dir) - - node.slow_start() - - node.pgbench_init(scale=3) - - # tmp FULL backup with old binary - tmp_id = self.backup_node( - backup_dir, 'node', node, - old_binary=True, options=["-j", "4", "--stream"]) - - external_dir1 = self.get_tblspace_path(node, 'external_dir1') - external_dir2 = self.get_tblspace_path(node, 'external_dir2') - - # fill external directories with data - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir1, options=["-j", "4"]) - - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir2, options=["-j", "4"]) - - self.delete_pb(backup_dir, 'node', backup_id=tmp_id) - - # FULL backup with old binary without external dirs support - self.backup_node( - backup_dir, 'node', node, - old_binary=True, options=["-j", "4", "--stream"]) - - pgbench = node.pgbench(options=['-T', '30', '-c', '1']) - pgbench.wait() - - # delta backup with external directories using new binary - self.backup_node( - backup_dir, 'node', node, - backup_type="delta", - options=[ - "-j", "4", "--stream", - "-E", "{0}{1}{2}".format( - external_dir1, - self.EXTERNAL_DIRECTORY_DELIMITER, - external_dir2)]) - - pgbench = node.pgbench(options=['-T', '30', '-c', '1']) - pgbench.wait() - - # Fill external dirs with changed data - shutil.rmtree(external_dir1, ignore_errors=True) - shutil.rmtree(external_dir2, ignore_errors=True) - - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir1, - options=['-j', '4', '--skip-external-dirs']) - - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir2, - options=['-j', '4', '--skip-external-dirs']) - - # delta backup without external directories using old binary - backup_id = self.backup_node( - backup_dir, 'node', node, - backup_type="delta", - options=[ - "-j", "4", "--stream", - "-E", "{0}{1}{2}".format( - external_dir1, - self.EXTERNAL_DIRECTORY_DELIMITER, - external_dir2)]) - - pgdata = self.pgdata_content( - node.base_dir, exclude_dirs=['logs']) - - # Merge chain using new binary - self.merge_backup(backup_dir, 'node', backup_id=backup_id) - - # Restore merged backup - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - - node_restored.cleanup() - - external_dir1_new = self.get_tblspace_path( - node_restored, 'external_dir1') - external_dir2_new = self.get_tblspace_path( - node_restored, 'external_dir2') - - self.restore_node( - backup_dir, 'node', node_restored, - options=[ - "-j", "4", - "--external-mapping={0}={1}".format( - external_dir1, external_dir1_new), - "--external-mapping={0}={1}".format( - external_dir2, external_dir2_new)]) - - pgdata_restored = self.pgdata_content( - node_restored.base_dir, exclude_dirs=['logs']) - - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.expectedFailure - # @unittest.skip("skip") - def test_external_merge(self): - """""" - if not self.probackup_old_path: - self.skipTest("You must specify PGPROBACKUPBIN_OLD" - " for run this test") - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node, old_binary=True) - node.slow_start() - - node.pgbench_init(scale=3) - - # take temp FULL backup - tmp_id = self.backup_node( - backup_dir, 'node', node, options=["-j", "4", "--stream"]) - - external_dir1 = self.get_tblspace_path(node, 'external_dir1') - external_dir2 = self.get_tblspace_path(node, 'external_dir2') - - # fill external directories with data - self.restore_node( - backup_dir, 'node', node, backup_id=tmp_id, - data_dir=external_dir1, options=["-j", "4"]) - - self.restore_node( - backup_dir, 'node', node, backup_id=tmp_id, - data_dir=external_dir2, options=["-j", "4"]) - - self.delete_pb(backup_dir, 'node', backup_id=tmp_id) - - # FULL backup with old binary without external dirs support - self.backup_node( - backup_dir, 'node', node, - old_binary=True, options=["-j", "4", "--stream"]) - - # change data a bit - pgbench = node.pgbench(options=['-T', '30', '-c', '1', '--no-vacuum']) - pgbench.wait() - - # delta backup with external directories using new binary - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type="delta", - options=[ - "-j", "4", "--stream", - "-E", "{0}{1}{2}".format( - external_dir1, - self.EXTERNAL_DIRECTORY_DELIMITER, - external_dir2)]) - - pgdata = self.pgdata_content( - node.base_dir, exclude_dirs=['logs']) - - print(self.show_pb(backup_dir, 'node', as_json=False, as_text=True)) - - # Merge - print(self.merge_backup(backup_dir, 'node', backup_id=backup_id, - options=['--log-level-file=VERBOSE'])) - - # RESTORE - node.cleanup() - shutil.rmtree(node.base_dir, ignore_errors=True) - - external_dir1_new = self.get_tblspace_path(node, 'external_dir1') - external_dir2_new = self.get_tblspace_path(node, 'external_dir2') - - self.restore_node( - backup_dir, 'node', node, - options=[ - "-j", "4", - "--external-mapping={0}={1}".format( - external_dir1, external_dir1_new), - "--external-mapping={0}={1}".format( - external_dir2, external_dir2_new)]) - - pgdata_restored = self.pgdata_content( - node.base_dir, exclude_dirs=['logs']) - - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.expectedFailure - # @unittest.skip("skip") - def test_external_merge_skip_external_dirs(self): - """""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - node.pgbench_init(scale=3) - - # FULL backup with old data - tmp_id = self.backup_node( - backup_dir, 'node', node, options=["-j", "4", "--stream"]) - - external_dir1 = self.get_tblspace_path(node, 'external_dir1') - external_dir2 = self.get_tblspace_path(node, 'external_dir2') - - # fill external directories with old data - self.restore_node( - backup_dir, 'node', node, backup_id=tmp_id, - data_dir=external_dir1, options=["-j", "4"]) - - self.restore_node( - backup_dir, 'node', node, backup_id=tmp_id, - data_dir=external_dir2, options=["-j", "4"]) - - self.delete_pb(backup_dir, 'node', backup_id=tmp_id) - - # change data a bit - pgbench = node.pgbench(options=['-T', '30', '-c', '1', '--no-vacuum']) - pgbench.wait() - - # FULL backup with external directories - self.backup_node( - backup_dir, 'node', node, - options=[ - "-j", "4", "--stream", - "-E", "{0}{1}{2}".format( - external_dir1, - self.EXTERNAL_DIRECTORY_DELIMITER, - external_dir2)]) - - # drop old external data - shutil.rmtree(external_dir1, ignore_errors=True) - shutil.rmtree(external_dir2, ignore_errors=True) - - # fill external directories with new data - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir1, - options=["-j", "4", "--skip-external-dirs"]) - - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir2, - options=["-j", "4", "--skip-external-dirs"]) - - # DELTA backup with external directories - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type="delta", - options=[ - "-j", "4", "--stream", - "-E", "{0}{1}{2}".format( - external_dir1, - self.EXTERNAL_DIRECTORY_DELIMITER, - external_dir2)]) - - pgdata = self.pgdata_content( - node.base_dir, exclude_dirs=['logs']) - - # merge backups without external directories - self.merge_backup( - backup_dir, 'node', - backup_id=backup_id, options=['--skip-external-dirs']) - - # RESTORE - node.cleanup() - shutil.rmtree(node.base_dir, ignore_errors=True) - - self.restore_node( - backup_dir, 'node', node, - options=["-j", "4"]) - - pgdata_restored = self.pgdata_content( - node.base_dir, exclude_dirs=['logs']) - - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.expectedFailure - # @unittest.skip("skip") - def test_external_merge_1(self): - """""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - node.pgbench_init(scale=3) - - # FULL backup - self.backup_node( - backup_dir, 'node', node, options=["-j", "4", "--stream"]) - - external_dir1 = self.get_tblspace_path(node, 'external_dir1') - external_dir2 = self.get_tblspace_path(node, 'external_dir2') - - pgbench = node.pgbench(options=['-T', '30', '-c', '1', '--no-vacuum']) - pgbench.wait() - - # FULL backup with changed data - backup_id = self.backup_node( - backup_dir, 'node', node, - options=["-j", "4", "--stream"]) - - # fill external directories with changed data - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir1, options=["-j", "4"]) - - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir2, options=["-j", "4"]) - - self.delete_pb(backup_dir, 'node', backup_id=backup_id) - - # delta backup with external directories using new binary - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type="delta", - options=[ - "-j", "4", "--stream", - "-E", "{0}{1}{2}".format( - external_dir1, - self.EXTERNAL_DIRECTORY_DELIMITER, - external_dir2)]) - - self.merge_backup(backup_dir, 'node', backup_id=backup_id) - - pgdata = self.pgdata_content( - node.base_dir, exclude_dirs=['logs']) - - # RESTORE - node.cleanup() - shutil.rmtree(node.base_dir, ignore_errors=True) - - external_dir1_new = self.get_tblspace_path(node, 'external_dir1') - external_dir2_new = self.get_tblspace_path(node, 'external_dir2') - - self.restore_node( - backup_dir, 'node', node, - options=[ - "-j", "4", - "--external-mapping={0}={1}".format(external_dir1, external_dir1_new), - "--external-mapping={0}={1}".format(external_dir2, external_dir2_new)]) - - pgdata_restored = self.pgdata_content( - node.base_dir, exclude_dirs=['logs']) - - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.expectedFailure - # @unittest.skip("skip") - def test_external_merge_3(self): - """""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.pgbench_init(scale=3) - - # FULL backup - self.backup_node(backup_dir, 'node', node, options=["-j", "4"]) - - external_dir1 = self.get_tblspace_path(node, 'external_dir1') - external_dir2 = self.get_tblspace_path(node, 'external_dir2') - - pgbench = node.pgbench(options=['-T', '30', '-c', '1', '--no-vacuum']) - pgbench.wait() - - # FULL backup - backup_id = self.backup_node( - backup_dir, 'node', node) - - # fill external directories with changed data - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir1) - - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir2) - - self.delete_pb(backup_dir, 'node', backup_id=backup_id) - - # page backup with external directories - self.backup_node( - backup_dir, 'node', node, backup_type="page", - options=[ - "-j", "4", - "-E", "{0}{1}{2}".format( - external_dir1, - self.EXTERNAL_DIRECTORY_DELIMITER, - external_dir2)]) - - # page backup with external directories - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type="page", - options=[ - "-j", "4", - "-E", "{0}{1}{2}".format( - external_dir1, - self.EXTERNAL_DIRECTORY_DELIMITER, - external_dir2)]) - - pgdata = self.pgdata_content( - node.base_dir, exclude_dirs=['logs']) - - self.merge_backup( - backup_dir, 'node', backup_id=backup_id, - options=['--log-level-file=verbose']) - - # RESTORE - node.cleanup() - shutil.rmtree(node.base_dir, ignore_errors=True) - - external_dir1_new = self.get_tblspace_path(node, 'external_dir1') - external_dir2_new = self.get_tblspace_path(node, 'external_dir2') - - self.restore_node( - backup_dir, 'node', node, - options=[ - "-j", "4", - "--external-mapping={0}={1}".format( - external_dir1, external_dir1_new), - "--external-mapping={0}={1}".format( - external_dir2, external_dir2_new)]) - - pgdata_restored = self.pgdata_content( - node.base_dir, exclude_dirs=['logs']) - - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.expectedFailure - # @unittest.skip("skip") - def test_external_merge_2(self): - """""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - node.pgbench_init(scale=3) - - # FULL backup - self.backup_node( - backup_dir, 'node', node, options=["-j", "4", "--stream"]) - - external_dir1 = self.get_tblspace_path(node, 'external_dir1') - external_dir2 = self.get_tblspace_path(node, 'external_dir2') - - pgbench = node.pgbench(options=['-T', '30', '-c', '1', '--no-vacuum']) - pgbench.wait() - - # FULL backup - backup_id = self.backup_node( - backup_dir, 'node', node, - options=["-j", "4", "--stream"]) - - # fill external directories with changed data - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir1, options=["-j", "4"]) - - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir2, options=["-j", "4"]) - - self.delete_pb(backup_dir, 'node', backup_id=backup_id) - - # delta backup with external directories - self.backup_node( - backup_dir, 'node', node, backup_type="delta", - options=[ - "-j", "4", "--stream", - "-E", "{0}{1}{2}".format( - external_dir1, - self.EXTERNAL_DIRECTORY_DELIMITER, - external_dir2)]) - - # delta backup with external directories - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type="delta", - options=[ - "-j", "4", "--stream", - "-E", "{0}{1}{2}".format( - external_dir1, - self.EXTERNAL_DIRECTORY_DELIMITER, - external_dir2)]) - - pgdata = self.pgdata_content( - node.base_dir, exclude_dirs=['logs']) - - shutil.rmtree(external_dir1, ignore_errors=True) - shutil.rmtree(external_dir2, ignore_errors=True) - - # delta backup without external directories - self.merge_backup(backup_dir, 'node', backup_id=backup_id) - - # RESTORE - node.cleanup() - shutil.rmtree(node.base_dir, ignore_errors=True) - - external_dir1_new = self.get_tblspace_path(node, 'external_dir1') - external_dir2_new = self.get_tblspace_path(node, 'external_dir2') - - self.restore_node( - backup_dir, 'node', node, - options=[ - "-j", "4", - "--external-mapping={0}={1}".format(external_dir1, external_dir1_new), - "--external-mapping={0}={1}".format(external_dir2, external_dir2_new)]) - - pgdata_restored = self.pgdata_content( - node.base_dir, exclude_dirs=['logs']) - - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.expectedFailure - # @unittest.skip("skip") - def test_restore_external_changed_data(self): - """""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - node.pgbench_init(scale=2) - - # set externals - external_dir1 = self.get_tblspace_path(node, 'external_dir1') - external_dir2 = self.get_tblspace_path(node, 'external_dir2') - - # FULL backup - tmp_id = self.backup_node( - backup_dir, 'node', - node, options=["-j", "4", "--stream"]) - - # fill external directories with data - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir1, options=["-j", "4"]) - - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir2, options=["-j", "4"]) - - self.delete_pb(backup_dir, 'node', backup_id=tmp_id) - - # change data a bit - pgbench = node.pgbench(options=['-T', '30', '-c', '1', '--no-vacuum']) - pgbench.wait() - - # FULL backup - backup_id = self.backup_node( - backup_dir, 'node', node, - options=[ - "-j", "4", "--stream", - "-E", "{0}{1}{2}".format( - external_dir1, - self.EXTERNAL_DIRECTORY_DELIMITER, - external_dir2)]) - - # fill external directories with changed data - shutil.rmtree(external_dir1, ignore_errors=True) - shutil.rmtree(external_dir2, ignore_errors=True) - - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir1, backup_id=backup_id, - options=["-j", "4", "--skip-external-dirs"]) - - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir2, backup_id=backup_id, - options=["-j", "4", "--skip-external-dirs"]) - - # change data a bit more - pgbench = node.pgbench(options=['-T', '30', '-c', '1', '--no-vacuum']) - pgbench.wait() - - # Delta backup with external directories - self.backup_node( - backup_dir, 'node', node, backup_type="delta", - options=[ - "-j", "4", "--stream", - "-E", "{0}{1}{2}".format( - external_dir1, - self.EXTERNAL_DIRECTORY_DELIMITER, - external_dir2)]) - - pgdata = self.pgdata_content( - node.base_dir, exclude_dirs=['logs']) - - # Restore - node.cleanup() - shutil.rmtree(node.base_dir, ignore_errors=True) - - self.restore_node( - backup_dir, 'node', node, - options=["-j", "4"]) - - pgdata_restored = self.pgdata_content( - node.base_dir, exclude_dirs=['logs']) - - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.expectedFailure - # @unittest.skip("skip") - def test_restore_external_changed_data_1(self): - """""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'max_wal_size': '32MB'}) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - node.pgbench_init(scale=1) - - # set externals - external_dir1 = self.get_tblspace_path(node, 'external_dir1') - external_dir2 = self.get_tblspace_path(node, 'external_dir2') - - # FULL backup - tmp_id = self.backup_node( - backup_dir, 'node', - node, options=["-j", "4", "--stream"]) - - # fill external directories with data - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir1, options=["-j", "4"]) - - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir2, options=["-j", "4"]) - - self.delete_pb(backup_dir, 'node', backup_id=tmp_id) - - # change data a bit - pgbench = node.pgbench(options=['-T', '5', '-c', '1', '--no-vacuum']) - pgbench.wait() - - # FULL backup - backup_id = self.backup_node( - backup_dir, 'node', node, - options=[ - "-j", "4", "--stream", - "-E", "{0}{1}{2}".format( - external_dir1, - self.EXTERNAL_DIRECTORY_DELIMITER, - external_dir2)]) - - # fill external directories with changed data - shutil.rmtree(external_dir1, ignore_errors=True) - shutil.rmtree(external_dir2, ignore_errors=True) - - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir1, backup_id=backup_id, - options=["-j", "4", "--skip-external-dirs"]) - - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir2, backup_id=backup_id, - options=["-j", "4", "--skip-external-dirs"]) - - # change data a bit more - pgbench = node.pgbench(options=['-T', '30', '-c', '1', '--no-vacuum']) - pgbench.wait() - - # Delta backup with only one external directory - self.backup_node( - backup_dir, 'node', node, backup_type="delta", - options=[ - "-j", "4", "--stream", - "-E", external_dir1]) - - pgdata = self.pgdata_content( - node.base_dir, exclude_dirs=['logs', 'external_dir2']) - - # Restore - node.cleanup() - shutil.rmtree(node._base_dir) - - # create empty file in external_dir2 - os.mkdir(node._base_dir) - os.mkdir(external_dir2) - with open(os.path.join(external_dir2, 'file'), 'w+') as f: - f.close() - - output = self.restore_node( - backup_dir, 'node', node, - options=["-j", "4"]) - - self.assertNotIn( - 'externaldir2', - output) - - pgdata_restored = self.pgdata_content( - node.base_dir, exclude_dirs=['logs', 'external_dir2']) - - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.expectedFailure - # @unittest.skip("skip") - def test_merge_external_changed_data(self): - """""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'max_wal_size': '32MB'}) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - node.pgbench_init(scale=2) - - # set externals - external_dir1 = self.get_tblspace_path(node, 'external_dir1') - external_dir2 = self.get_tblspace_path(node, 'external_dir2') - - # FULL backup - tmp_id = self.backup_node( - backup_dir, 'node', - node, options=["-j", "4", "--stream"]) - - # fill external directories with data - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir1, options=["-j", "4"]) - - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir2, options=["-j", "4"]) - - self.delete_pb(backup_dir, 'node', backup_id=tmp_id) - - # change data a bit - pgbench = node.pgbench(options=['-T', '30', '-c', '1', '--no-vacuum']) - pgbench.wait() - - # FULL backup - backup_id = self.backup_node( - backup_dir, 'node', node, - options=[ - "-j", "4", "--stream", - "-E", "{0}{1}{2}".format( - external_dir1, - self.EXTERNAL_DIRECTORY_DELIMITER, - external_dir2)]) - - # fill external directories with changed data - shutil.rmtree(external_dir1, ignore_errors=True) - shutil.rmtree(external_dir2, ignore_errors=True) - - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir1, backup_id=backup_id, - options=["-j", "4", "--skip-external-dirs"]) - - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir2, backup_id=backup_id, - options=["-j", "4", "--skip-external-dirs"]) - - # change data a bit more - pgbench = node.pgbench(options=['-T', '30', '-c', '1', '--no-vacuum']) - pgbench.wait() - - # Delta backup with external directories - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type="delta", - options=[ - "-j", "4", "--stream", - "-E", "{0}{1}{2}".format( - external_dir1, - self.EXTERNAL_DIRECTORY_DELIMITER, - external_dir2)]) - - pgdata = self.pgdata_content( - node.base_dir, exclude_dirs=['logs']) - - # Merge - self.merge_backup(backup_dir, 'node', backup_id) - - # Restore - node.cleanup() - shutil.rmtree(node.base_dir, ignore_errors=True) - - self.restore_node( - backup_dir, 'node', node, - options=["-j", "4"]) - - pgdata_restored = self.pgdata_content( - node.base_dir, exclude_dirs=['logs']) - - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.expectedFailure - # @unittest.skip("skip") - def test_restore_skip_external(self): - """ - Check that --skip-external-dirs works correctly - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - external_dir1 = self.get_tblspace_path(node, 'external_dir1') - external_dir2 = self.get_tblspace_path(node, 'external_dir2') - - # temp FULL backup - backup_id = self.backup_node( - backup_dir, 'node', node, options=["-j", "4", "--stream"]) - - # fill external directories with data - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir1, options=["-j", "4"]) - - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir2, options=["-j", "4"]) - - self.delete_pb(backup_dir, 'node', backup_id=backup_id) - - # FULL backup with external directories - self.backup_node( - backup_dir, 'node', node, - options=[ - "-j", "4", "--stream", - "-E", "{0}{1}{2}".format( - external_dir1, - self.EXTERNAL_DIRECTORY_DELIMITER, - external_dir2)]) - - # delete first externals, so pgdata_compare - # will be capable of detecting redundant - # external files after restore - shutil.rmtree(external_dir1, ignore_errors=True) - shutil.rmtree(external_dir2, ignore_errors=True) - - pgdata = self.pgdata_content( - node.base_dir, exclude_dirs=['logs']) - - # RESTORE - node.cleanup() - shutil.rmtree(node.base_dir, ignore_errors=True) - - self.restore_node( - backup_dir, 'node', node, - options=[ - "-j", "4", "--skip-external-dirs"]) - - pgdata_restored = self.pgdata_content( - node.base_dir, exclude_dirs=['logs']) - - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.expectedFailure - # @unittest.skip("skip") - def test_external_dir_is_symlink(self): - """ - Check that backup works correctly if external dir is symlink, - symlink pointing to external dir should be followed, - but restored as directory - """ - if os.name == 'nt': - self.skipTest('Skipped for Windows') - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) - shutil.rmtree(core_dir, ignore_errors=True) - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - external_dir = self.get_tblspace_path(node, 'external_dir') - - # temp FULL backup - backup_id = self.backup_node( - backup_dir, 'node', node, options=["-j", "4", "--stream"]) - - # fill some directory with data - core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) - symlinked_dir = os.path.join(core_dir, 'symlinked') - - self.restore_node( - backup_dir, 'node', node, - data_dir=symlinked_dir, options=["-j", "4"]) - - # drop temp FULL backup - self.delete_pb(backup_dir, 'node', backup_id=backup_id) - - # create symlink to directory in external directory - os.symlink(symlinked_dir, external_dir) - - # FULL backup with external directories - backup_id = self.backup_node( - backup_dir, 'node', node, - options=[ - "-j", "4", "--stream", - "-E", external_dir]) - - pgdata = self.pgdata_content( - node.base_dir, exclude_dirs=['logs']) - - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - - # RESTORE - node_restored.cleanup() - - external_dir_new = self.get_tblspace_path( - node_restored, 'external_dir') - - self.restore_node( - backup_dir, 'node', node_restored, - options=[ - "-j", "4", "--external-mapping={0}={1}".format( - external_dir, external_dir_new)]) - - pgdata_restored = self.pgdata_content( - node_restored.base_dir, exclude_dirs=['logs']) - - self.compare_pgdata(pgdata, pgdata_restored) - - self.assertEqual( - external_dir, - self.show_pb( - backup_dir, 'node', - backup_id=backup_id)['external-dirs']) - - # @unittest.expectedFailure - # @unittest.skip("skip") - def test_external_dir_contain_symlink_on_dir(self): - """ - Check that backup works correctly if external dir is symlink, - symlink pointing to external dir should be followed, - but restored as directory - """ - if os.name == 'nt': - self.skipTest('Skipped for Windows') - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) - shutil.rmtree(core_dir, ignore_errors=True) - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - external_dir = self.get_tblspace_path(node, 'external_dir') - dir_in_external_dir = os.path.join(external_dir, 'dir') - - # temp FULL backup - backup_id = self.backup_node( - backup_dir, 'node', node, options=["-j", "4", "--stream"]) - - # fill some directory with data - core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) - symlinked_dir = os.path.join(core_dir, 'symlinked') - - self.restore_node( - backup_dir, 'node', node, - data_dir=symlinked_dir, options=["-j", "4"]) - - # drop temp FULL backup - self.delete_pb(backup_dir, 'node', backup_id=backup_id) - - # create symlink to directory in external directory - os.mkdir(external_dir) - os.symlink(symlinked_dir, dir_in_external_dir) - - # FULL backup with external directories - backup_id = self.backup_node( - backup_dir, 'node', node, - options=[ - "-j", "4", "--stream", - "-E", external_dir]) - - pgdata = self.pgdata_content( - node.base_dir, exclude_dirs=['logs']) - - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - - # RESTORE - node_restored.cleanup() - - external_dir_new = self.get_tblspace_path( - node_restored, 'external_dir') - - self.restore_node( - backup_dir, 'node', node_restored, - options=[ - "-j", "4", "--external-mapping={0}={1}".format( - external_dir, external_dir_new)]) - - pgdata_restored = self.pgdata_content( - node_restored.base_dir, exclude_dirs=['logs']) - - self.compare_pgdata(pgdata, pgdata_restored) - - self.assertEqual( - external_dir, - self.show_pb( - backup_dir, 'node', - backup_id=backup_id)['external-dirs']) - - # @unittest.expectedFailure - # @unittest.skip("skip") - def test_external_dir_contain_symlink_on_file(self): - """ - Check that backup works correctly if external dir is symlink, - symlink pointing to external dir should be followed, - but restored as directory - """ - if os.name == 'nt': - self.skipTest('Skipped for Windows') - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) - shutil.rmtree(core_dir, ignore_errors=True) - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - external_dir = self.get_tblspace_path(node, 'external_dir') - file_in_external_dir = os.path.join(external_dir, 'file') - - # temp FULL backup - backup_id = self.backup_node( - backup_dir, 'node', node, options=["-j", "4", "--stream"]) - - # fill some directory with data - core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) - symlinked_dir = os.path.join(core_dir, 'symlinked') - - self.restore_node( - backup_dir, 'node', node, - data_dir=symlinked_dir, options=["-j", "4"]) - - # drop temp FULL backup - self.delete_pb(backup_dir, 'node', backup_id=backup_id) - - # create symlink to directory in external directory - src_file = os.path.join(symlinked_dir, 'postgresql.conf') - os.mkdir(external_dir) - os.chmod(external_dir, 0o0700) - os.symlink(src_file, file_in_external_dir) - - # FULL backup with external directories - backup_id = self.backup_node( - backup_dir, 'node', node, - options=[ - "-j", "4", "--stream", - "-E", external_dir]) - - pgdata = self.pgdata_content( - node.base_dir, exclude_dirs=['logs']) - - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - - # RESTORE - node_restored.cleanup() - - external_dir_new = self.get_tblspace_path( - node_restored, 'external_dir') - - self.restore_node( - backup_dir, 'node', node_restored, - options=[ - "-j", "4", "--external-mapping={0}={1}".format( - external_dir, external_dir_new)]) - - pgdata_restored = self.pgdata_content( - node_restored.base_dir, exclude_dirs=['logs']) - - self.compare_pgdata(pgdata, pgdata_restored) - - self.assertEqual( - external_dir, - self.show_pb( - backup_dir, 'node', - backup_id=backup_id)['external-dirs']) - - # @unittest.expectedFailure - # @unittest.skip("skip") - def test_external_dir_is_tablespace(self): - """ - Check that backup fails with error - if external directory points to tablespace - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) - shutil.rmtree(core_dir, ignore_errors=True) - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - external_dir = self.get_tblspace_path(node, 'external_dir') - - self.create_tblspace_in_node( - node, 'tblspace1', tblspc_path=external_dir) - - node.pgbench_init(scale=1, tablespace='tblspace1') - - # FULL backup with external directories - try: - backup_id = self.backup_node( - backup_dir, 'node', node, - options=[ - "-j", "4", "--stream", - "-E", external_dir]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because external dir points to the tablespace" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'External directory path (-E option)', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - def test_restore_external_dir_not_empty(self): - """ - Check that backup fails with error - if external directory point to not empty tablespace and - if remapped directory also isn`t empty - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) - shutil.rmtree(core_dir, ignore_errors=True) - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - external_dir = self.get_tblspace_path(node, 'external_dir') - - # create empty file in external directory - # open(os.path.join(external_dir, 'file'), 'a').close() - os.mkdir(external_dir) - with open(os.path.join(external_dir, 'file'), 'w+') as f: - f.close() - - # FULL backup with external directory - self.backup_node( - backup_dir, 'node', node, - options=[ - "-j", "4", "--stream", - "-E", external_dir]) - - node.cleanup() - - try: - self.restore_node(backup_dir, 'node', node) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because external dir is not empty" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'External directory is not empty', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - external_dir_new = self.get_tblspace_path(node, 'external_dir_new') - - # create empty file in directory, which will be a target of - # remapping - os.mkdir(external_dir_new) - with open(os.path.join(external_dir_new, 'file1'), 'w+') as f: - f.close() - - try: - self.restore_node( - backup_dir, 'node', node, - options=['--external-mapping={0}={1}'.format( - external_dir, external_dir_new)]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because remapped external dir is not empty" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'External directory is not empty', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - def test_restore_external_dir_is_missing(self): - """ - take FULL backup with not empty external directory - delete external directory - take DELTA backup with external directory, which - should fail - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) - shutil.rmtree(core_dir, ignore_errors=True) - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - external_dir = self.get_tblspace_path(node, 'external_dir') - - # create empty file in external directory - # open(os.path.join(external_dir, 'file'), 'a').close() - os.mkdir(external_dir) - with open(os.path.join(external_dir, 'file'), 'w+') as f: - f.close() - - # FULL backup with external directory - self.backup_node( - backup_dir, 'node', node, - options=[ - "-j", "4", "--stream", - "-E", external_dir]) - - # drop external directory - shutil.rmtree(external_dir, ignore_errors=True) - - try: - self.backup_node( - backup_dir, 'node', node, - backup_type='delta', - options=[ - "-j", "4", "--stream", - "-E", external_dir]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because external dir is missing" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: External directory is not found:', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - sleep(1) - - # take DELTA without external directories - self.backup_node( - backup_dir, 'node', node, - backup_type='delta', - options=["-j", "4", "--stream"]) - - pgdata = self.pgdata_content( - node.base_dir, exclude_dirs=['logs']) - - # Restore Delta backup - node.cleanup() - shutil.rmtree(node.base_dir, ignore_errors=True) - - self.restore_node(backup_dir, 'node', node) - - pgdata_restored = self.pgdata_content( - node.base_dir, exclude_dirs=['logs']) - self.compare_pgdata(pgdata, pgdata_restored) - - def test_merge_external_dir_is_missing(self): - """ - take FULL backup with not empty external directory - delete external directory - take DELTA backup with external directory, which - should fail, - take DELTA backup without external directory, - merge it into FULL, restore and check - data correctness - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) - shutil.rmtree(core_dir, ignore_errors=True) - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - external_dir = self.get_tblspace_path(node, 'external_dir') - - # create empty file in external directory - # open(os.path.join(external_dir, 'file'), 'a').close() - os.mkdir(external_dir) - with open(os.path.join(external_dir, 'file'), 'w+') as f: - f.close() - - # FULL backup with external directory - self.backup_node( - backup_dir, 'node', node, - options=[ - "-j", "4", "--stream", - "-E", external_dir]) - - # drop external directory - shutil.rmtree(external_dir, ignore_errors=True) - - try: - self.backup_node( - backup_dir, 'node', node, - backup_type='delta', - options=[ - "-j", "4", "--stream", - "-E", external_dir]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because external dir is missing" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: External directory is not found:', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - sleep(1) - - # take DELTA without external directories - backup_id = self.backup_node( - backup_dir, 'node', node, - backup_type='delta', - options=["-j", "4", "--stream"]) - - pgdata = self.pgdata_content( - node.base_dir, exclude_dirs=['logs']) - - # Merge - self.merge_backup(backup_dir, 'node', backup_id=backup_id) - - # Restore - node.cleanup() - shutil.rmtree(node.base_dir, ignore_errors=True) - - self.restore_node(backup_dir, 'node', node) - - pgdata_restored = self.pgdata_content( - node.base_dir, exclude_dirs=['logs']) - self.compare_pgdata(pgdata, pgdata_restored) - - def test_restore_external_dir_is_empty(self): - """ - take FULL backup with not empty external directory - drop external directory content - take DELTA backup with the same external directory - restore DELRA backup, check that restored - external directory is empty - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) - shutil.rmtree(core_dir, ignore_errors=True) - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - external_dir = self.get_tblspace_path(node, 'external_dir') - - # create empty file in external directory - # open(os.path.join(external_dir, 'file'), 'a').close() - os.mkdir(external_dir) - os.chmod(external_dir, 0o0700) - with open(os.path.join(external_dir, 'file'), 'w+') as f: - f.close() - - # FULL backup with external directory - self.backup_node( - backup_dir, 'node', node, - options=[ - "-j", "4", "--stream", - "-E", external_dir]) - - # make external directory empty - os.remove(os.path.join(external_dir, 'file')) - - # take DELTA backup with empty external directory - self.backup_node( - backup_dir, 'node', node, - backup_type='delta', - options=[ - "-j", "4", "--stream", - "-E", external_dir]) - - pgdata = self.pgdata_content( - node.base_dir, exclude_dirs=['logs']) - - # Restore Delta backup - node.cleanup() - shutil.rmtree(node.base_dir, ignore_errors=True) - - self.restore_node(backup_dir, 'node', node) - - pgdata_restored = self.pgdata_content( - node.base_dir, exclude_dirs=['logs']) - self.compare_pgdata(pgdata, pgdata_restored) - - def test_merge_external_dir_is_empty(self): - """ - take FULL backup with not empty external directory - drop external directory content - take DELTA backup with the same external directory - merge backups and restore FULL, check that restored - external directory is empty - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) - shutil.rmtree(core_dir, ignore_errors=True) - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - external_dir = self.get_tblspace_path(node, 'external_dir') - - # create empty file in external directory - # open(os.path.join(external_dir, 'file'), 'a').close() - os.mkdir(external_dir) - os.chmod(external_dir, 0o0700) - with open(os.path.join(external_dir, 'file'), 'w+') as f: - f.close() - - # FULL backup with external directory - self.backup_node( - backup_dir, 'node', node, - options=[ - "-j", "4", "--stream", - "-E", external_dir]) - - # make external directory empty - os.remove(os.path.join(external_dir, 'file')) - - # take DELTA backup with empty external directory - backup_id = self.backup_node( - backup_dir, 'node', node, - backup_type='delta', - options=[ - "-j", "4", "--stream", - "-E", external_dir]) - - pgdata = self.pgdata_content( - node.base_dir, exclude_dirs=['logs']) - - # Merge - self.merge_backup(backup_dir, 'node', backup_id=backup_id) - - # Restore - node.cleanup() - shutil.rmtree(node.base_dir, ignore_errors=True) - - self.restore_node(backup_dir, 'node', node) - - pgdata_restored = self.pgdata_content( - node.base_dir, exclude_dirs=['logs']) - self.compare_pgdata(pgdata, pgdata_restored) - - def test_restore_external_dir_string_order(self): - """ - take FULL backup with not empty external directory - drop external directory content - take DELTA backup with the same external directory - restore DELRA backup, check that restored - external directory is empty - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) - shutil.rmtree(core_dir, ignore_errors=True) - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - external_dir_1 = self.get_tblspace_path(node, 'external_dir_1') - external_dir_2 = self.get_tblspace_path(node, 'external_dir_2') - - # create empty file in external directory - os.mkdir(external_dir_1) - os.chmod(external_dir_1, 0o0700) - with open(os.path.join(external_dir_1, 'fileA'), 'w+') as f: - f.close() - - os.mkdir(external_dir_2) - os.chmod(external_dir_2, 0o0700) - with open(os.path.join(external_dir_2, 'fileZ'), 'w+') as f: - f.close() - - # FULL backup with external directory - self.backup_node( - backup_dir, 'node', node, - options=[ - "-j", "4", "--stream", - "-E", "{0}{1}{2}".format( - external_dir_1, - self.EXTERNAL_DIRECTORY_DELIMITER, - external_dir_2)]) - - with open(os.path.join(external_dir_1, 'fileB'), 'w+') as f: - f.close() - - with open(os.path.join(external_dir_2, 'fileY'), 'w+') as f: - f.close() - - # take DELTA backup and swap external_dir_2 and external_dir_1 - # in external_dir_str - self.backup_node( - backup_dir, 'node', node, - backup_type='delta', - options=[ - "-j", "4", "--stream", - "-E", "{0}{1}{2}".format( - external_dir_2, - self.EXTERNAL_DIRECTORY_DELIMITER, - external_dir_1)]) - - pgdata = self.pgdata_content( - node.base_dir, exclude_dirs=['logs']) - - # Restore Delta backup - node.cleanup() - shutil.rmtree(node.base_dir, ignore_errors=True) - - self.restore_node(backup_dir, 'node', node) - - pgdata_restored = self.pgdata_content( - node.base_dir, exclude_dirs=['logs']) - - self.compare_pgdata(pgdata, pgdata_restored) - - def test_merge_external_dir_string_order(self): - """ - take FULL backup with not empty external directory - drop external directory content - take DELTA backup with the same external directory - restore DELRA backup, check that restored - external directory is empty - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) - shutil.rmtree(core_dir, ignore_errors=True) - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - external_dir_1 = self.get_tblspace_path(node, 'external_dir_1') - external_dir_2 = self.get_tblspace_path(node, 'external_dir_2') - - # create empty file in external directory - os.mkdir(external_dir_1) - os.chmod(external_dir_1, 0o0700) - with open(os.path.join(external_dir_1, 'fileA'), 'w+') as f: - f.close() - - os.mkdir(external_dir_2) - os.chmod(external_dir_2, 0o0700) - with open(os.path.join(external_dir_2, 'fileZ'), 'w+') as f: - f.close() - - # FULL backup with external directory - self.backup_node( - backup_dir, 'node', node, - options=[ - "-j", "4", "--stream", - "-E", "{0}{1}{2}".format( - external_dir_1, - self.EXTERNAL_DIRECTORY_DELIMITER, - external_dir_2)]) - - with open(os.path.join(external_dir_1, 'fileB'), 'w+') as f: - f.close() - - with open(os.path.join(external_dir_2, 'fileY'), 'w+') as f: - f.close() - - # take DELTA backup and swap external_dir_2 and external_dir_1 - # in external_dir_str - backup_id = self.backup_node( - backup_dir, 'node', node, - backup_type='delta', - options=[ - "-j", "4", "--stream", - "-E", "{0}{1}{2}".format( - external_dir_2, - self.EXTERNAL_DIRECTORY_DELIMITER, - external_dir_1)]) - - pgdata = self.pgdata_content( - node.base_dir, exclude_dirs=['logs']) - - # Merge backups - self.merge_backup(backup_dir, 'node', backup_id=backup_id) - - # Restore - node.cleanup() - shutil.rmtree(node.base_dir, ignore_errors=True) - - self.restore_node(backup_dir, 'node', node) - - pgdata_restored = self.pgdata_content( - node.base_dir, exclude_dirs=['logs']) - - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.skip("skip") - def test_smart_restore_externals(self): - """ - make node, create database, take full backup with externals, - take incremental backup without externals and restore it, - make sure that files from externals are not copied during restore - https://github.com/postgrespro/pg_probackup/issues/63 - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # fill external directories with data - tmp_id = self.backup_node(backup_dir, 'node', node) - - external_dir_1 = self.get_tblspace_path(node, 'external_dir_1') - external_dir_2 = self.get_tblspace_path(node, 'external_dir_2') - - self.restore_node( - backup_dir, 'node', node, backup_id=tmp_id, - data_dir=external_dir_1, options=["-j", "4"]) - - self.restore_node( - backup_dir, 'node', node, backup_id=tmp_id, - data_dir=external_dir_2, options=["-j", "4"]) - - self.delete_pb(backup_dir, 'node', backup_id=tmp_id) - - # create database - node.safe_psql( - "postgres", - "CREATE DATABASE testdb") - - # take FULL backup - full_id = self.backup_node(backup_dir, 'node', node) - - # drop database - node.safe_psql( - "postgres", - "DROP DATABASE testdb") - - # take PAGE backup - page_id = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # restore PAGE backup - node.cleanup() - self.restore_node( - backup_dir, 'node', node, backup_id=page_id, - options=['--no-validate', '--log-level-file=VERBOSE']) - - logfile = os.path.join(backup_dir, 'log', 'pg_probackup.log') - with open(logfile, 'r') as f: - logfile_content = f.read() - - # get delta between FULL and PAGE filelists - filelist_full = self.get_backup_filelist( - backup_dir, 'node', full_id) - - filelist_page = self.get_backup_filelist( - backup_dir, 'node', page_id) - - filelist_diff = self.get_backup_filelist_diff( - filelist_full, filelist_page) - - for file in filelist_diff: - self.assertNotIn(file, logfile_content) - - # @unittest.skip("skip") - def test_external_validation(self): - """ - make node, create database, - take full backup with external directory, - corrupt external file in backup, - run validate which should fail - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - # take temp FULL backup - tmp_id = self.backup_node( - backup_dir, 'node', node, options=['--stream']) - - external_dir = self.get_tblspace_path(node, 'external_dir') - - # fill external directories with data - self.restore_node( - backup_dir, 'node', node, backup_id=tmp_id, - data_dir=external_dir, options=["-j", "4"]) - - self.delete_pb(backup_dir, 'node', backup_id=tmp_id) - - # take FULL backup - full_id = self.backup_node( - backup_dir, 'node', node, - options=[ - '--stream', '-E', "{0}".format(external_dir)]) - - # Corrupt file - file = os.path.join( - backup_dir, 'backups', 'node', full_id, - 'external_directories', 'externaldir1', 'postgresql.auto.conf') - - with open(file, "r+b", 0) as f: - f.seek(42) - f.write(b"blah") - f.flush() - f.close - - try: - self.validate_pb(backup_dir) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because file in external dir is corrupted" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'WARNING: Invalid CRC of backup file', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertEqual( - 'CORRUPT', - self.show_pb(backup_dir, 'node', full_id)['status'], - 'Backup STATUS should be "CORRUPT"') diff --git a/tests/false_positive_test.py b/tests/false_positive_test.py deleted file mode 100644 index fbb785c60..000000000 --- a/tests/false_positive_test.py +++ /dev/null @@ -1,337 +0,0 @@ -import unittest -import os -from time import sleep - -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException -from datetime import datetime, timedelta -import subprocess - - -class FalsePositive(ProbackupTest, unittest.TestCase): - - # @unittest.skip("skip") - @unittest.expectedFailure - def test_validate_wal_lost_segment(self): - """ - Loose segment located between backups. ExpectedFailure. This is BUG - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - self.backup_node(backup_dir, 'node', node) - - # make some wals - node.pgbench_init(scale=5) - - # delete last wal segment - wals_dir = os.path.join(backup_dir, "wal", 'node') - wals = [f for f in os.listdir(wals_dir) if os.path.isfile( - os.path.join(wals_dir, f)) and not f.endswith('.backup')] - wals = map(int, wals) - os.remove(os.path.join(wals_dir, '0000000' + str(max(wals)))) - - # We just lost a wal segment and know nothing about it - self.backup_node(backup_dir, 'node', node) - self.assertTrue( - 'validation completed successfully' in self.validate_pb( - backup_dir, 'node')) - ######## - - @unittest.expectedFailure - # Need to force validation of ancestor-chain - def test_incremental_backup_corrupt_full_1(self): - """page-level backup with corrupted full backup""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - backup_id = self.backup_node(backup_dir, 'node', node) - file = os.path.join( - backup_dir, "backups", "node", - backup_id.decode("utf-8"), "database", "postgresql.conf") - os.remove(file) - - try: - self.backup_node(backup_dir, 'node', node, backup_type="page") - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because page backup should not be " - "possible without valid full backup.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertEqual( - e.message, - 'ERROR: Valid full backup on current timeline is not found. ' - 'Create new FULL backup before an incremental one.\n', - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertFalse( - True, - "Expecting Error because page backup should not be " - "possible without valid full backup.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertEqual( - e.message, - 'ERROR: Valid full backup on current timeline is not found. ' - 'Create new FULL backup before an incremental one.\n', - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertEqual( - self.show_pb(backup_dir, 'node')[0]['Status'], "ERROR") - - # @unittest.skip("skip") - @unittest.expectedFailure - def test_pg_10_waldir(self): - """ - test group access for PG >= 11 - """ - if self.pg_config_version < self.version_to_num('10.0'): - self.skipTest('You need PostgreSQL >= 10 for this test') - - wal_dir = os.path.join( - os.path.join(self.tmp_path, self.module_name, self.fname), 'wal_dir') - import shutil - shutil.rmtree(wal_dir, ignore_errors=True) - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=[ - '--data-checksums', - '--waldir={0}'.format(wal_dir)]) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - # take FULL backup - self.backup_node( - backup_dir, 'node', node, options=['--stream']) - - pgdata = self.pgdata_content(node.data_dir) - - # restore backup - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - node_restored.cleanup() - - self.restore_node( - backup_dir, 'node', node_restored) - - # compare pgdata permissions - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - self.assertTrue( - os.path.islink(os.path.join(node_restored.data_dir, 'pg_wal')), - 'pg_wal should be symlink') - - @unittest.expectedFailure - # @unittest.skip("skip") - def test_recovery_target_time_backup_victim(self): - """ - Check that for validation to recovery target - probackup chooses valid backup - https://github.com/postgrespro/pg_probackup/issues/104 - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # FULL backup - self.backup_node(backup_dir, 'node', node) - - node.safe_psql( - "postgres", - "create table t_heap as select 1 as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,10000) i") - - target_time = node.safe_psql( - "postgres", - "select now()").rstrip() - - node.safe_psql( - "postgres", - "create table t_heap1 as select 1 as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,100) i") - - gdb = self.backup_node(backup_dir, 'node', node, gdb=True) - - # Attention! This breakpoint is set to a probackup internal fuction, not a postgres core one - gdb.set_breakpoint('pg_stop_backup') - gdb.run_until_break() - gdb.remove_all_breakpoints() - gdb._execute('signal SIGINT') - gdb.continue_execution_until_error() - - backup_id = self.show_pb(backup_dir, 'node')[1]['id'] - - self.assertEqual( - 'ERROR', - self.show_pb(backup_dir, 'node', backup_id)['status'], - 'Backup STATUS should be "ERROR"') - - self.validate_pb( - backup_dir, 'node', - options=['--recovery-target-time={0}'.format(target_time)]) - - @unittest.expectedFailure - # @unittest.skip("skip") - def test_recovery_target_lsn_backup_victim(self): - """ - Check that for validation to recovery target - probackup chooses valid backup - https://github.com/postgrespro/pg_probackup/issues/104 - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # FULL backup - self.backup_node(backup_dir, 'node', node) - - node.safe_psql( - "postgres", - "create table t_heap as select 1 as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,10000) i") - - node.safe_psql( - "postgres", - "create table t_heap1 as select 1 as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,100) i") - - gdb = self.backup_node( - backup_dir, 'node', node, - options=['--log-level-console=LOG'], gdb=True) - - # Attention! This breakpoint is set to a probackup internal fuction, not a postgres core one - gdb.set_breakpoint('pg_stop_backup') - gdb.run_until_break() - gdb.remove_all_breakpoints() - gdb._execute('signal SIGINT') - gdb.continue_execution_until_error() - - backup_id = self.show_pb(backup_dir, 'node')[1]['id'] - - self.assertEqual( - 'ERROR', - self.show_pb(backup_dir, 'node', backup_id)['status'], - 'Backup STATUS should be "ERROR"') - - self.switch_wal_segment(node) - - target_lsn = self.show_pb(backup_dir, 'node', backup_id)['start-lsn'] - - self.validate_pb( - backup_dir, 'node', - options=['--recovery-target-lsn={0}'.format(target_lsn)]) - - # @unittest.skip("skip") - @unittest.expectedFailure - def test_streaming_timeout(self): - """ - Illustrate the problem of loosing exact error - message because our WAL streaming engine is "borrowed" - from pg_receivexlog - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'checkpoint_timeout': '1h', - 'wal_sender_timeout': '5s'}) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - # FULL backup - gdb = self.backup_node( - backup_dir, 'node', node, gdb=True, - options=['--stream', '--log-level-file=LOG']) - - # Attention! This breakpoint is set to a probackup internal fuction, not a postgres core one - gdb.set_breakpoint('pg_stop_backup') - gdb.run_until_break() - - sleep(10) - gdb.continue_execution_until_error() - gdb._execute('detach') - sleep(2) - - log_file_path = os.path.join(backup_dir, 'log', 'pg_probackup.log') - with open(log_file_path) as f: - log_content = f.read() - - self.assertIn( - 'could not receive data from WAL stream', - log_content) - - self.assertIn( - 'ERROR: Problem in receivexlog', - log_content) - - # @unittest.skip("skip") - @unittest.expectedFailure - def test_validate_all_empty_catalog(self): - """ - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - - try: - self.validate_pb(backup_dir) - self.assertEqual( - 1, 0, - "Expecting Error because backup_dir is empty.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: This backup catalog contains no backup instances', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) diff --git a/tests/incr_restore_test.py b/tests/incr_restore_test.py deleted file mode 100644 index 613e4dd36..000000000 --- a/tests/incr_restore_test.py +++ /dev/null @@ -1,2300 +0,0 @@ -import os -import unittest -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException -import subprocess -from datetime import datetime -import sys -from time import sleep -from datetime import datetime, timedelta -import hashlib -import shutil -import json -from testgres import QueryException - - -class IncrRestoreTest(ProbackupTest, unittest.TestCase): - - # @unittest.skip("skip") - def test_basic_incr_restore(self): - """incremental restore in CHECKSUM mode""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.pgbench_init(scale=50) - - self.backup_node(backup_dir, 'node', node) - - pgbench = node.pgbench( - stdout=subprocess.PIPE, stderr=subprocess.STDOUT, - options=['-T', '10', '-c', '1', '--no-vacuum']) - pgbench.wait() - pgbench.stdout.close() - - self.backup_node(backup_dir, 'node', node, backup_type='page') - - pgbench = node.pgbench( - stdout=subprocess.PIPE, stderr=subprocess.STDOUT, - options=['-T', '10', '-c', '1']) - pgbench.wait() - pgbench.stdout.close() - - self.backup_node(backup_dir, 'node', node, backup_type='page') - - pgbench = node.pgbench( - stdout=subprocess.PIPE, stderr=subprocess.STDOUT, - options=['-T', '10', '-c', '1', '--no-vacuum']) - pgbench.wait() - pgbench.stdout.close() - - backup_id = self.backup_node(backup_dir, 'node', node, backup_type='page') - - pgdata = self.pgdata_content(node.data_dir) - - pgbench = node.pgbench( - stdout=subprocess.PIPE, stderr=subprocess.STDOUT, - options=['-T', '10', '-c', '1', '--no-vacuum']) - pgbench.wait() - pgbench.stdout.close() - - node.stop() - - self.restore_node( - backup_dir, 'node', node, - options=["-j", "4", "--incremental-mode=checksum"]) - - pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.skip("skip") - def test_basic_incr_restore_into_missing_directory(self): - """""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.pgbench_init(scale=10) - - self.backup_node(backup_dir, 'node', node) - - pgbench = node.pgbench( - stdout=subprocess.PIPE, stderr=subprocess.STDOUT, - options=['-T', '10', '-c', '1', '--no-vacuum']) - pgbench.wait() - pgbench.stdout.close() - - self.backup_node(backup_dir, 'node', node, backup_type='page') - - pgbench = node.pgbench( - stdout=subprocess.PIPE, stderr=subprocess.STDOUT, - options=['-T', '10', '-c', '1']) - pgbench.wait() - pgbench.stdout.close() - - self.backup_node(backup_dir, 'node', node, backup_type='page') - - pgdata = self.pgdata_content(node.data_dir) - - node.cleanup() - - self.restore_node( - backup_dir, 'node', node, - options=["-j", "4", "--incremental-mode=checksum"]) - - pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.skip("skip") - def test_checksum_corruption_detection(self): - """ - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.pgbench_init(scale=10) - - self.backup_node(backup_dir, 'node', node) - - pgbench = node.pgbench( - stdout=subprocess.PIPE, stderr=subprocess.STDOUT, - options=['-T', '10', '-c', '1', '--no-vacuum']) - pgbench.wait() - pgbench.stdout.close() - - self.backup_node(backup_dir, 'node', node, backup_type='page') - - pgbench = node.pgbench( - stdout=subprocess.PIPE, stderr=subprocess.STDOUT, - options=['-T', '10', '-c', '1']) - pgbench.wait() - pgbench.stdout.close() - - self.backup_node(backup_dir, 'node', node, backup_type='page') - - pgbench = node.pgbench( - stdout=subprocess.PIPE, stderr=subprocess.STDOUT, - options=['-T', '10', '-c', '1', '--no-vacuum']) - pgbench.wait() - pgbench.stdout.close() - - backup_id = self.backup_node(backup_dir, 'node', node, backup_type='page') - - pgdata = self.pgdata_content(node.data_dir) - - node.stop() - - self.restore_node( - backup_dir, 'node', node, - options=["-j", "4", "--incremental-mode=lsn"]) - - pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.skip("skip") - def test_incr_restore_with_tablespace(self): - """ - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - self.backup_node(backup_dir, 'node', node, options=['--stream']) - - tblspace = self.get_tblspace_path(node, 'tblspace') - some_directory = self.get_tblspace_path(node, 'some_directory') - - # stuff new destination with garbage - self.restore_node(backup_dir, 'node', node, data_dir=some_directory) - - self.create_tblspace_in_node(node, 'tblspace') - node.pgbench_init(scale=10, tablespace='tblspace') - - self.backup_node(backup_dir, 'node', node, options=['--stream']) - pgdata = self.pgdata_content(node.data_dir) - - node.stop() - - self.restore_node( - backup_dir, 'node', node, - options=[ - "-j", "4", "--incremental-mode=checksum", "--force", - "-T{0}={1}".format(tblspace, some_directory)]) - - pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.skip("skip") - def test_incr_restore_with_tablespace_1(self): - """recovery to target timeline""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums'], - set_replication=True) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - self.backup_node(backup_dir, 'node', node, options=['--stream']) - - tblspace = self.get_tblspace_path(node, 'tblspace') - some_directory = self.get_tblspace_path(node, 'some_directory') - - self.restore_node(backup_dir, 'node', node, data_dir=some_directory) - - self.create_tblspace_in_node(node, 'tblspace') - node.pgbench_init(scale=10, tablespace='tblspace') - - self.backup_node(backup_dir, 'node', node, options=['--stream']) - - pgbench = node.pgbench( - stdout=subprocess.PIPE, stderr=subprocess.STDOUT, - options=['-T', '10', '-c', '1', '--no-vacuum']) - pgbench.wait() - pgbench.stdout.close() - - self.backup_node( - backup_dir, 'node', node, backup_type='delta', options=['--stream']) - - pgbench = node.pgbench( - stdout=subprocess.PIPE, stderr=subprocess.STDOUT, - options=['-T', '10', '-c', '1', '--no-vacuum']) - pgbench.wait() - pgbench.stdout.close() - - self.backup_node( - backup_dir, 'node', node, backup_type='delta', options=['--stream']) - - pgdata = self.pgdata_content(node.data_dir) - - node.stop() - - self.restore_node( - backup_dir, 'node', node, - options=["-j", "4", "--incremental-mode=checksum"]) - - pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.skip("skip") - def test_incr_restore_with_tablespace_2(self): - """ - If "--tablespace-mapping" option is used with incremental restore, - then new directory must be empty. - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums'], - set_replication=True) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - self.backup_node(backup_dir, 'node', node, options=['--stream']) - - node_1 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_1')) - - # fill node1 with data - out = self.restore_node( - backup_dir, 'node', node, - data_dir=node_1.data_dir, - options=['--incremental-mode=checksum', '--force']) - - self.assertIn("WARNING: Backup catalog was initialized for system id", out) - - tblspace = self.get_tblspace_path(node, 'tblspace') - self.create_tblspace_in_node(node, 'tblspace') - node.pgbench_init(scale=5, tablespace='tblspace') - - node.safe_psql( - 'postgres', - 'vacuum') - - self.backup_node(backup_dir, 'node', node, backup_type='delta', options=['--stream']) - - pgdata = self.pgdata_content(node.data_dir) - - try: - self.restore_node( - backup_dir, 'node', node, - data_dir=node_1.data_dir, - options=['--incremental-mode=checksum', '-T{0}={1}'.format(tblspace, tblspace)]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because remapped directory is not empty.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Remapped tablespace destination is not empty', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - out = self.restore_node( - backup_dir, 'node', node, - data_dir=node_1.data_dir, - options=[ - '--force', '--incremental-mode=checksum', - '-T{0}={1}'.format(tblspace, tblspace)]) - - pgdata_restored = self.pgdata_content(node_1.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.skip("skip") - def test_incr_restore_with_tablespace_3(self): - """ - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - self.create_tblspace_in_node(node, 'tblspace1') - node.pgbench_init(scale=10, tablespace='tblspace1') - - # take backup with tblspace1 - self.backup_node(backup_dir, 'node', node, options=['--stream']) - pgdata = self.pgdata_content(node.data_dir) - - self.drop_tblspace(node, 'tblspace1') - - self.create_tblspace_in_node(node, 'tblspace2') - node.pgbench_init(scale=10, tablespace='tblspace2') - - node.stop() - - self.restore_node( - backup_dir, 'node', node, - options=[ - "-j", "4", - "--incremental-mode=checksum"]) - - pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.skip("skip") - def test_incr_restore_with_tablespace_4(self): - """ - Check that system ID mismatch is detected, - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - self.create_tblspace_in_node(node, 'tblspace1') - node.pgbench_init(scale=10, tablespace='tblspace1') - - # take backup of node1 with tblspace1 - self.backup_node(backup_dir, 'node', node, options=['--stream']) - pgdata = self.pgdata_content(node.data_dir) - - self.drop_tblspace(node, 'tblspace1') - node.cleanup() - - # recreate node - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - node.slow_start() - - self.create_tblspace_in_node(node, 'tblspace1') - node.pgbench_init(scale=10, tablespace='tblspace1') - node.stop() - - try: - self.restore_node( - backup_dir, 'node', node, - options=[ - "-j", "4", - "--incremental-mode=checksum"]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because destination directory has wrong system id.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'WARNING: Backup catalog was initialized for system id', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'ERROR: Incremental restore is not allowed', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - out = self.restore_node( - backup_dir, 'node', node, - options=[ - "-j", "4", "--force", - "--incremental-mode=checksum"]) - - pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.expectedFailure - @unittest.skip("skip") - def test_incr_restore_with_tablespace_5(self): - """ - More complicated case, we restore backup - with tablespace, which we remap into directory - with some old content, that belongs to an instance - with different system id. - """ - node1 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node1'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node1) - node1.slow_start() - - self.create_tblspace_in_node(node1, 'tblspace') - node1.pgbench_init(scale=10, tablespace='tblspace') - - # take backup of node1 with tblspace - self.backup_node(backup_dir, 'node', node1, options=['--stream']) - pgdata = self.pgdata_content(node1.data_dir) - - node1.stop() - - # recreate node - node2 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node2'), - set_replication=True, - initdb_params=['--data-checksums']) - node2.slow_start() - - self.create_tblspace_in_node(node2, 'tblspace') - node2.pgbench_init(scale=10, tablespace='tblspace') - node2.stop() - - tblspc1_path = self.get_tblspace_path(node1, 'tblspace') - tblspc2_path = self.get_tblspace_path(node2, 'tblspace') - - out = self.restore_node( - backup_dir, 'node', node1, - options=[ - "-j", "4", "--force", - "--incremental-mode=checksum", - "-T{0}={1}".format(tblspc1_path, tblspc2_path)]) - - # check that tblspc1_path is empty - self.assertFalse( - os.listdir(tblspc1_path), - "Dir is not empty: '{0}'".format(tblspc1_path)) - - pgdata_restored = self.pgdata_content(node1.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.skip("skip") - def test_incr_restore_with_tablespace_6(self): - """ - Empty pgdata, not empty tablespace - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - self.create_tblspace_in_node(node, 'tblspace') - node.pgbench_init(scale=10, tablespace='tblspace') - - # take backup of node with tblspace - self.backup_node(backup_dir, 'node', node, options=['--stream']) - pgdata = self.pgdata_content(node.data_dir) - - node.cleanup() - - try: - self.restore_node( - backup_dir, 'node', node, - options=[ - "-j", "4", - "--incremental-mode=checksum"]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because there is running postmaster " - "process in destination directory.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: PGDATA is empty, but tablespace destination is not', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - out = self.restore_node( - backup_dir, 'node', node, - options=[ - "-j", "4", "--force", - "--incremental-mode=checksum"]) - - self.assertIn( - "INFO: Destination directory and tablespace directories are empty, " - "disable incremental restore", out) - - pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.skip("skip") - def test_incr_restore_with_tablespace_7(self): - """ - Restore backup without tablespace into - PGDATA with tablespace. - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - # take backup of node with tblspace - self.backup_node(backup_dir, 'node', node, options=['--stream']) - pgdata = self.pgdata_content(node.data_dir) - - self.create_tblspace_in_node(node, 'tblspace') - node.pgbench_init(scale=5, tablespace='tblspace') - node.stop() - -# try: -# self.restore_node( -# backup_dir, 'node', node, -# options=[ -# "-j", "4", -# "--incremental-mode=checksum"]) -# # we should die here because exception is what we expect to happen -# self.assertEqual( -# 1, 0, -# "Expecting Error because there is running postmaster " -# "process in destination directory.\n " -# "Output: {0} \n CMD: {1}".format( -# repr(self.output), self.cmd)) -# except ProbackupException as e: -# self.assertIn( -# 'ERROR: PGDATA is empty, but tablespace destination is not', -# e.message, -# '\n Unexpected Error Message: {0}\n CMD: {1}'.format( -# repr(e.message), self.cmd)) - - out = self.restore_node( - backup_dir, 'node', node, - options=[ - "-j", "4", "--incremental-mode=checksum"]) - - pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.skip("skip") - def test_basic_incr_restore_sanity(self): - """recovery to target timeline""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums'], - set_replication=True) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - self.backup_node(backup_dir, 'node', node, options=['--stream']) - - try: - self.restore_node( - backup_dir, 'node', node, - options=["-j", "4", "--incremental-mode=checksum"]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because there is running postmaster " - "process in destination directory.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'WARNING: Postmaster with pid', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'ERROR: Incremental restore is not allowed', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - node_1 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_1')) - - try: - self.restore_node( - backup_dir, 'node', node_1, data_dir=node_1.data_dir, - options=["-j", "4", "--incremental-mode=checksum"]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because destination directory has wrong system id.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'WARNING: Backup catalog was initialized for system id', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'ERROR: Incremental restore is not allowed', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - # @unittest.skip("skip") - def test_incr_checksum_restore(self): - """ - /----C-----D - ------A----B---*--------X - - X - is instance, we want to return it to C state. - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums'], - pg_options={'wal_log_hints': 'on'}) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.pgbench_init(scale=50) - self.backup_node(backup_dir, 'node', node) - - pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) - pgbench.wait() - - self.backup_node(backup_dir, 'node', node, backup_type='page') - - pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) - pgbench.wait() - - xid = node.safe_psql( - 'postgres', - 'select txid_current()').decode('utf-8').rstrip() - - # --A-----B--------X - pgbench = node.pgbench(options=['-T', '30', '-c', '1', '--no-vacuum']) - pgbench.wait() - node.stop(['-m', 'immediate', '-D', node.data_dir]) - - node_1 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_1')) - node_1.cleanup() - - self.restore_node( - backup_dir, 'node', node_1, data_dir=node_1.data_dir, - options=[ - '--recovery-target-action=promote', - '--recovery-target-xid={0}'.format(xid)]) - - self.set_auto_conf(node_1, {'port': node_1.port}) - node_1.slow_start() - - # /-- - # --A-----B----*----X - pgbench = node_1.pgbench(options=['-T', '10', '-c', '1']) - pgbench.wait() - - # /--C - # --A-----B----*----X - self.backup_node(backup_dir, 'node', node_1, - data_dir=node_1.data_dir, backup_type='page') - - # /--C------ - # --A-----B----*----X - pgbench = node_1.pgbench(options=['-T', '50', '-c', '1']) - pgbench.wait() - - # /--C------D - # --A-----B----*----X - self.backup_node(backup_dir, 'node', node_1, - data_dir=node_1.data_dir, backup_type='page') - - pgdata = self.pgdata_content(node_1.data_dir) - - self.restore_node( - backup_dir, 'node', node, - options=["-j", "4", "--incremental-mode=checksum"]) - - pgdata_restored = self.pgdata_content(node.data_dir) - - self.set_auto_conf(node, {'port': node.port}) - node.slow_start() - - self.compare_pgdata(pgdata, pgdata_restored) - - - # @unittest.skip("skip") - def test_incr_lsn_restore(self): - """ - /----C-----D - ------A----B---*--------X - - X - is instance, we want to return it to C state. - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums'], - pg_options={'wal_log_hints': 'on'}) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.pgbench_init(scale=50) - self.backup_node(backup_dir, 'node', node) - - pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) - pgbench.wait() - - self.backup_node(backup_dir, 'node', node, backup_type='page') - - pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) - pgbench.wait() - - xid = node.safe_psql( - 'postgres', - 'select txid_current()').decode('utf-8').rstrip() - - # --A-----B--------X - pgbench = node.pgbench(options=['-T', '30', '-c', '1', '--no-vacuum']) - pgbench.wait() - node.stop(['-m', 'immediate', '-D', node.data_dir]) - - node_1 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_1')) - node_1.cleanup() - - self.restore_node( - backup_dir, 'node', node_1, data_dir=node_1.data_dir, - options=[ - '--recovery-target-action=promote', - '--recovery-target-xid={0}'.format(xid)]) - - self.set_auto_conf(node_1, {'port': node_1.port}) - node_1.slow_start() - - # /-- - # --A-----B----*----X - pgbench = node_1.pgbench(options=['-T', '10', '-c', '1']) - pgbench.wait() - - # /--C - # --A-----B----*----X - self.backup_node(backup_dir, 'node', node_1, - data_dir=node_1.data_dir, backup_type='page') - - # /--C------ - # --A-----B----*----X - pgbench = node_1.pgbench(options=['-T', '50', '-c', '1']) - pgbench.wait() - - # /--C------D - # --A-----B----*----X - self.backup_node(backup_dir, 'node', node_1, - data_dir=node_1.data_dir, backup_type='page') - - pgdata = self.pgdata_content(node_1.data_dir) - - self.restore_node( - backup_dir, 'node', node, options=["-j", "4", "--incremental-mode=lsn"]) - - pgdata_restored = self.pgdata_content(node.data_dir) - - self.set_auto_conf(node, {'port': node.port}) - node.slow_start() - - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.skip("skip") - def test_incr_lsn_sanity(self): - """ - /----A-----B - F------*--------X - - X - is instance, we want to return it to state B. - fail is expected behaviour in case of lsn restore. - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums'], - pg_options={'wal_log_hints': 'on'}) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - self.backup_node(backup_dir, 'node', node) - node.pgbench_init(scale=10) - - node_1 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_1')) - node_1.cleanup() - - self.restore_node( - backup_dir, 'node', node_1, data_dir=node_1.data_dir) - - self.set_auto_conf(node_1, {'port': node_1.port}) - node_1.slow_start() - - pgbench = node_1.pgbench(options=['-T', '10', '-c', '1']) - pgbench.wait() - - self.backup_node(backup_dir, 'node', node_1, - data_dir=node_1.data_dir, backup_type='full') - - pgbench = node_1.pgbench(options=['-T', '10', '-c', '1']) - pgbench.wait() - - page_id = self.backup_node(backup_dir, 'node', node_1, - data_dir=node_1.data_dir, backup_type='page') - - node.stop() - - try: - self.restore_node( - backup_dir, 'node', node, data_dir=node.data_dir, - options=["-j", "4", "--incremental-mode=lsn"]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because incremental restore in lsn mode is impossible\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: Cannot perform incremental restore of " - "backup chain {0} in 'lsn' mode".format(page_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - # @unittest.skip("skip") - def test_incr_checksum_sanity(self): - """ - /----A-----B - F------*--------X - - X - is instance, we want to return it to state B. - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - self.backup_node(backup_dir, 'node', node) - node.pgbench_init(scale=20) - - node_1 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_1')) - node_1.cleanup() - - self.restore_node( - backup_dir, 'node', node_1, data_dir=node_1.data_dir) - - self.set_auto_conf(node_1, {'port': node_1.port}) - node_1.slow_start() - - pgbench = node_1.pgbench(options=['-T', '10', '-c', '1']) - pgbench.wait() - - self.backup_node(backup_dir, 'node', node_1, - data_dir=node_1.data_dir, backup_type='full') - - pgbench = node_1.pgbench(options=['-T', '10', '-c', '1']) - pgbench.wait() - - page_id = self.backup_node(backup_dir, 'node', node_1, - data_dir=node_1.data_dir, backup_type='page') - pgdata = self.pgdata_content(node_1.data_dir) - - node.stop() - - self.restore_node( - backup_dir, 'node', node, data_dir=node.data_dir, - options=["-j", "4", "--incremental-mode=checksum"]) - - pgdata_restored = self.pgdata_content(node.data_dir) - - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.skip("skip") - def test_incr_checksum_corruption_detection(self): - """ - check that corrupted page got detected and replaced - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), -# initdb_params=['--data-checksums'], - pg_options={'wal_log_hints': 'on'}) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - self.backup_node(backup_dir, 'node', node) - node.pgbench_init(scale=20) - - pgbench = node.pgbench(options=['-T', '10', '-c', '1']) - pgbench.wait() - - self.backup_node(backup_dir, 'node', node, - data_dir=node.data_dir, backup_type='full') - - heap_path = node.safe_psql( - "postgres", - "select pg_relation_filepath('pgbench_accounts')").decode('utf-8').rstrip() - - pgbench = node.pgbench(options=['-T', '10', '-c', '1']) - pgbench.wait() - - page_id = self.backup_node(backup_dir, 'node', node, - data_dir=node.data_dir, backup_type='page') - - pgdata = self.pgdata_content(node.data_dir) - - node.stop() - - path = os.path.join(node.data_dir, heap_path) - with open(path, "rb+", 0) as f: - f.seek(22000) - f.write(b"bla") - f.flush() - f.close - - self.restore_node( - backup_dir, 'node', node, data_dir=node.data_dir, - options=["-j", "4", "--incremental-mode=checksum"]) - - pgdata_restored = self.pgdata_content(node.data_dir) - - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.skip("skip") - def test_incr_lsn_corruption_detection(self): - """ - check that corrupted page got detected and replaced - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums'], - pg_options={'wal_log_hints': 'on'}) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - self.backup_node(backup_dir, 'node', node) - node.pgbench_init(scale=20) - - pgbench = node.pgbench(options=['-T', '10', '-c', '1']) - pgbench.wait() - - self.backup_node(backup_dir, 'node', node, - data_dir=node.data_dir, backup_type='full') - - heap_path = node.safe_psql( - "postgres", - "select pg_relation_filepath('pgbench_accounts')").decode('utf-8').rstrip() - - pgbench = node.pgbench(options=['-T', '10', '-c', '1']) - pgbench.wait() - - page_id = self.backup_node(backup_dir, 'node', node, - data_dir=node.data_dir, backup_type='page') - - pgdata = self.pgdata_content(node.data_dir) - - node.stop() - - path = os.path.join(node.data_dir, heap_path) - with open(path, "rb+", 0) as f: - f.seek(22000) - f.write(b"bla") - f.flush() - f.close - - self.restore_node( - backup_dir, 'node', node, data_dir=node.data_dir, - options=["-j", "4", "--incremental-mode=lsn"]) - - pgdata_restored = self.pgdata_content(node.data_dir) - - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_incr_restore_multiple_external(self): - """check that cmdline has priority over config""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - external_dir1 = self.get_tblspace_path(node, 'external_dir1') - external_dir2 = self.get_tblspace_path(node, 'external_dir2') - - # FULL backup - node.pgbench_init(scale=20) - self.backup_node( - backup_dir, 'node', node, - backup_type="full", options=["-j", "4"]) - - # fill external directories with data - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir1, options=["-j", "4"]) - - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir2, options=["-j", "4"]) - - self.set_config( - backup_dir, 'node', - options=['-E{0}{1}{2}'.format( - external_dir1, self.EXTERNAL_DIRECTORY_DELIMITER, external_dir2)]) - - # cmdline option MUST override options in config - self.backup_node( - backup_dir, 'node', node, - backup_type='full', options=["-j", "4"]) - - pgbench = node.pgbench(options=['-T', '10', '-c', '1']) - pgbench.wait() - - # cmdline option MUST override options in config - self.backup_node( - backup_dir, 'node', node, - backup_type='page', options=["-j", "4"]) - - pgdata = self.pgdata_content( - node.base_dir, exclude_dirs=['logs']) - - pgbench = node.pgbench(options=['-T', '10', '-c', '1']) - pgbench.wait() - - node.stop() - - self.restore_node( - backup_dir, 'node', node, - options=["-j", "4", '--incremental-mode=checksum']) - - pgdata_restored = self.pgdata_content( - node.base_dir, exclude_dirs=['logs']) - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_incr_lsn_restore_multiple_external(self): - """check that cmdline has priority over config""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - external_dir1 = self.get_tblspace_path(node, 'external_dir1') - external_dir2 = self.get_tblspace_path(node, 'external_dir2') - - # FULL backup - node.pgbench_init(scale=20) - self.backup_node( - backup_dir, 'node', node, - backup_type="full", options=["-j", "4"]) - - # fill external directories with data - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir1, options=["-j", "4"]) - - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir2, options=["-j", "4"]) - - self.set_config( - backup_dir, 'node', - options=['-E{0}{1}{2}'.format( - external_dir1, self.EXTERNAL_DIRECTORY_DELIMITER, external_dir2)]) - - # cmdline option MUST override options in config - self.backup_node( - backup_dir, 'node', node, - backup_type='full', options=["-j", "4"]) - - pgbench = node.pgbench(options=['-T', '10', '-c', '1']) - pgbench.wait() - - # cmdline option MUST override options in config - self.backup_node( - backup_dir, 'node', node, - backup_type='page', options=["-j", "4"]) - - pgdata = self.pgdata_content( - node.base_dir, exclude_dirs=['logs']) - - pgbench = node.pgbench(options=['-T', '10', '-c', '1']) - pgbench.wait() - - node.stop() - - self.restore_node( - backup_dir, 'node', node, - options=["-j", "4", '--incremental-mode=lsn']) - - pgdata_restored = self.pgdata_content( - node.base_dir, exclude_dirs=['logs']) - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_incr_lsn_restore_backward(self): - """ - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={'wal_log_hints': 'on', 'hot_standby': 'on'}) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # FULL backup - node.pgbench_init(scale=2) - full_id = self.backup_node( - backup_dir, 'node', node, - backup_type="full", options=["-j", "4"]) - - full_pgdata = self.pgdata_content(node.data_dir) - - pgbench = node.pgbench(options=['-T', '10', '-c', '1']) - pgbench.wait() - - page_id = self.backup_node( - backup_dir, 'node', node, - backup_type='page', options=["-j", "4"]) - - page_pgdata = self.pgdata_content(node.data_dir) - - pgbench = node.pgbench(options=['-T', '10', '-c', '1']) - pgbench.wait() - - delta_id = self.backup_node( - backup_dir, 'node', node, - backup_type='delta', options=["-j", "4"]) - - delta_pgdata = self.pgdata_content(node.data_dir) - - pgbench = node.pgbench(options=['-T', '10', '-c', '1']) - pgbench.wait() - - node.stop() - - self.restore_node( - backup_dir, 'node', node, backup_id=full_id, - options=[ - "-j", "4", - '--incremental-mode=lsn', - '--recovery-target=immediate', - '--recovery-target-action=pause']) - - pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(full_pgdata, pgdata_restored) - - node.slow_start(replica=True) - node.stop() - - try: - self.restore_node( - backup_dir, 'node', node, backup_id=page_id, - options=[ - "-j", "4", '--incremental-mode=lsn', - '--recovery-target=immediate', '--recovery-target-action=pause']) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because incremental restore in lsn mode is impossible\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "Cannot perform incremental restore of backup chain", - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.restore_node( - backup_dir, 'node', node, backup_id=page_id, - options=[ - "-j", "4", '--incremental-mode=checksum', - '--recovery-target=immediate', '--recovery-target-action=pause']) - - pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(page_pgdata, pgdata_restored) - - node.slow_start(replica=True) - node.stop() - - self.restore_node( - backup_dir, 'node', node, backup_id=delta_id, - options=[ - "-j", "4", - '--incremental-mode=lsn', - '--recovery-target=immediate', - '--recovery-target-action=pause']) - - pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(delta_pgdata, pgdata_restored) - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_incr_checksum_restore_backward(self): - """ - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'hot_standby': 'on'}) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # FULL backup - node.pgbench_init(scale=20) - full_id = self.backup_node( - backup_dir, 'node', node, - backup_type="full", options=["-j", "4"]) - - full_pgdata = self.pgdata_content(node.data_dir) - - pgbench = node.pgbench(options=['-T', '10', '-c', '1']) - pgbench.wait() - - page_id = self.backup_node( - backup_dir, 'node', node, - backup_type='page', options=["-j", "4"]) - - page_pgdata = self.pgdata_content(node.data_dir) - - pgbench = node.pgbench(options=['-T', '10', '-c', '1']) - pgbench.wait() - - delta_id = self.backup_node( - backup_dir, 'node', node, - backup_type='delta', options=["-j", "4"]) - - delta_pgdata = self.pgdata_content(node.data_dir) - - pgbench = node.pgbench(options=['-T', '10', '-c', '1']) - pgbench.wait() - - node.stop() - - self.restore_node( - backup_dir, 'node', node, backup_id=full_id, - options=[ - "-j", "4", - '--incremental-mode=checksum', - '--recovery-target=immediate', - '--recovery-target-action=pause']) - - pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(full_pgdata, pgdata_restored) - - node.slow_start(replica=True) - node.stop() - - self.restore_node( - backup_dir, 'node', node, backup_id=page_id, - options=[ - "-j", "4", - '--incremental-mode=checksum', - '--recovery-target=immediate', - '--recovery-target-action=pause']) - - pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(page_pgdata, pgdata_restored) - - node.slow_start(replica=True) - node.stop() - - self.restore_node( - backup_dir, 'node', node, backup_id=delta_id, - options=[ - "-j", "4", - '--incremental-mode=checksum', - '--recovery-target=immediate', - '--recovery-target-action=pause']) - - pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(delta_pgdata, pgdata_restored) - - # @unittest.skip("skip") - def test_make_replica_via_incr_checksum_restore(self): - """ - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - master = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'master'), - set_replication=True, - initdb_params=['--data-checksums']) - - if self.get_version(master) < self.version_to_num('9.6.0'): - self.skipTest( - 'Skipped because backup from replica is not supported in PG 9.5') - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', master) - self.set_archiving(backup_dir, 'node', master, replica=True) - master.slow_start() - - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) - replica.cleanup() - - master.pgbench_init(scale=20) - - self.backup_node(backup_dir, 'node', master) - - self.restore_node( - backup_dir, 'node', replica, options=['-R']) - - # Settings for Replica - self.set_replica(master, replica, synchronous=False) - - replica.slow_start(replica=True) - - pgbench = master.pgbench(options=['-T', '10', '-c', '1']) - pgbench.wait() - - # PROMOTIONS - replica.promote() - new_master = replica - - # old master is going a bit further - old_master = master - pgbench = old_master.pgbench(options=['-T', '10', '-c', '1']) - pgbench.wait() - old_master.stop() - - pgbench = new_master.pgbench(options=['-T', '10', '-c', '1']) - pgbench.wait() - - # take backup from new master - self.backup_node( - backup_dir, 'node', new_master, - data_dir=new_master.data_dir, backup_type='page') - - # restore old master as replica - self.restore_node( - backup_dir, 'node', old_master, data_dir=old_master.data_dir, - options=['-R', '--incremental-mode=checksum']) - - self.set_replica(new_master, old_master, synchronous=True) - - old_master.slow_start(replica=True) - - pgbench = new_master.pgbench(options=['-T', '10', '-c', '1']) - pgbench.wait() - - # @unittest.skip("skip") - def test_make_replica_via_incr_lsn_restore(self): - """ - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - master = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'master'), - set_replication=True, - initdb_params=['--data-checksums']) - - if self.get_version(master) < self.version_to_num('9.6.0'): - self.skipTest( - 'Skipped because backup from replica is not supported in PG 9.5') - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', master) - self.set_archiving(backup_dir, 'node', master, replica=True) - master.slow_start() - - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) - replica.cleanup() - - master.pgbench_init(scale=20) - - self.backup_node(backup_dir, 'node', master) - - self.restore_node( - backup_dir, 'node', replica, options=['-R']) - - # Settings for Replica - self.set_replica(master, replica, synchronous=False) - - replica.slow_start(replica=True) - - pgbench = master.pgbench(options=['-T', '10', '-c', '1']) - pgbench.wait() - - # PROMOTIONS - replica.promote() - new_master = replica - - # old master is going a bit further - old_master = master - pgbench = old_master.pgbench(options=['-T', '10', '-c', '1']) - pgbench.wait() - old_master.stop() - - pgbench = new_master.pgbench(options=['-T', '10', '-c', '1']) - pgbench.wait() - - # take backup from new master - self.backup_node( - backup_dir, 'node', new_master, - data_dir=new_master.data_dir, backup_type='page') - - # restore old master as replica - self.restore_node( - backup_dir, 'node', old_master, data_dir=old_master.data_dir, - options=['-R', '--incremental-mode=lsn']) - - self.set_replica(new_master, old_master, synchronous=True) - - old_master.slow_start(replica=True) - - pgbench = new_master.pgbench(options=['-T', '10', '-c', '1']) - pgbench.wait() - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_incr_checksum_long_xact(self): - """ - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - 'postgres', - 'create extension pageinspect') - - # FULL backup - con = node.connect("postgres") - con.execute("CREATE TABLE t1 (a int)") - con.commit() - - - con.execute("INSERT INTO t1 values (1)") - con.commit() - - # leave uncommited - con2 = node.connect("postgres") - con.execute("INSERT INTO t1 values (2)") - con2.execute("INSERT INTO t1 values (3)") - - full_id = self.backup_node( - backup_dir, 'node', node, - backup_type="full", options=["-j", "4", "--stream"]) - - self.backup_node( - backup_dir, 'node', node, - backup_type="delta", options=["-j", "4", "--stream"]) - - con.commit() - - node.safe_psql( - 'postgres', - 'select * from t1') - - con2.commit() - node.safe_psql( - 'postgres', - 'select * from t1') - - node.stop() - - self.restore_node( - backup_dir, 'node', node, backup_id=full_id, - options=["-j", "4", '--incremental-mode=checksum']) - - node.slow_start() - - self.assertEqual( - node.safe_psql( - 'postgres', - 'select count(*) from t1').decode('utf-8').rstrip(), - '1') - - # @unittest.skip("skip") - # @unittest.expectedFailure - # This test will pass with Enterprise - # because it has checksums enabled by default - @unittest.skipIf(ProbackupTest.enterprise, 'skip') - def test_incr_lsn_long_xact_1(self): - """ - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - 'postgres', - 'create extension pageinspect') - - # FULL backup - con = node.connect("postgres") - con.execute("CREATE TABLE t1 (a int)") - con.commit() - - - con.execute("INSERT INTO t1 values (1)") - con.commit() - - # leave uncommited - con2 = node.connect("postgres") - con.execute("INSERT INTO t1 values (2)") - con2.execute("INSERT INTO t1 values (3)") - - full_id = self.backup_node( - backup_dir, 'node', node, - backup_type="full", options=["-j", "4", "--stream"]) - - self.backup_node( - backup_dir, 'node', node, - backup_type="delta", options=["-j", "4", "--stream"]) - - con.commit() - - # when does LSN gets stamped when checksum gets updated ? - node.safe_psql( - 'postgres', - 'select * from t1') - - con2.commit() - node.safe_psql( - 'postgres', - 'select * from t1') - - node.stop() - - try: - self.restore_node( - backup_dir, 'node', node, backup_id=full_id, - options=["-j", "4", '--incremental-mode=lsn']) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because incremental restore in lsn mode is impossible\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: Incremental restore in 'lsn' mode require data_checksums to be " - "enabled in destination data directory", - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_incr_lsn_long_xact_2(self): - """ - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'full_page_writes': 'off', - 'wal_log_hints': 'off'}) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - 'postgres', - 'create extension pageinspect') - - # FULL backup - con = node.connect("postgres") - con.execute("CREATE TABLE t1 (a int)") - con.commit() - - - con.execute("INSERT INTO t1 values (1)") - con.commit() - - # leave uncommited - con2 = node.connect("postgres") - con.execute("INSERT INTO t1 values (2)") - con2.execute("INSERT INTO t1 values (3)") - - full_id = self.backup_node( - backup_dir, 'node', node, - backup_type="full", options=["-j", "4", "--stream"]) - - self.backup_node( - backup_dir, 'node', node, - backup_type="delta", options=["-j", "4", "--stream"]) - -# print(node.safe_psql( -# 'postgres', -# "select * from page_header(get_raw_page('t1', 0))")) - - con.commit() - - # when does LSN gets stamped when checksum gets updated ? - node.safe_psql( - 'postgres', - 'select * from t1') - -# print(node.safe_psql( -# 'postgres', -# "select * from page_header(get_raw_page('t1', 0))")) - - con2.commit() - node.safe_psql( - 'postgres', - 'select * from t1') - -# print(node.safe_psql( -# 'postgres', -# "select * from page_header(get_raw_page('t1', 0))")) - - node.stop() - - self.restore_node( - backup_dir, 'node', node, backup_id=full_id, - options=["-j", "4", '--incremental-mode=lsn']) - - node.slow_start() - - self.assertEqual( - node.safe_psql( - 'postgres', - 'select count(*) from t1').decode('utf-8').rstrip(), - '1') - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_incr_restore_zero_size_file_checksum(self): - """ - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - fullpath = os.path.join(node.data_dir, 'simple_file') - with open(fullpath, "w+b", 0) as f: - f.flush() - f.close - - # FULL backup - id1 = self.backup_node( - backup_dir, 'node', node, - options=["-j", "4", "--stream"]) - - pgdata1 = self.pgdata_content(node.data_dir) - - with open(fullpath, "rb+", 0) as f: - f.seek(9000) - f.write(b"bla") - f.flush() - f.close - - id2 = self.backup_node( - backup_dir, 'node', node, - backup_type="delta", options=["-j", "4", "--stream"]) - pgdata2 = self.pgdata_content(node.data_dir) - - with open(fullpath, "w") as f: - f.close() - - id3 = self.backup_node( - backup_dir, 'node', node, - backup_type="delta", options=["-j", "4", "--stream"]) - pgdata3 = self.pgdata_content(node.data_dir) - - node.stop() - - self.restore_node( - backup_dir, 'node', node, backup_id=id1, - options=["-j", "4", '-I', 'checksum']) - - pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(pgdata1, pgdata_restored) - - self.restore_node( - backup_dir, 'node', node, backup_id=id2, - options=["-j", "4", '-I', 'checksum']) - - pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(pgdata2, pgdata_restored) - - self.restore_node( - backup_dir, 'node', node, backup_id=id3, - options=["-j", "4", '-I', 'checksum']) - - pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(pgdata3, pgdata_restored) - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_incr_restore_zero_size_file_lsn(self): - """ - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - fullpath = os.path.join(node.data_dir, 'simple_file') - with open(fullpath, "w+b", 0) as f: - f.flush() - f.close - - # FULL backup - id1 = self.backup_node( - backup_dir, 'node', node, - options=["-j", "4", "--stream"]) - - pgdata1 = self.pgdata_content(node.data_dir) - - with open(fullpath, "rb+", 0) as f: - f.seek(9000) - f.write(b"bla") - f.flush() - f.close - - id2 = self.backup_node( - backup_dir, 'node', node, - backup_type="delta", options=["-j", "4", "--stream"]) - pgdata2 = self.pgdata_content(node.data_dir) - - with open(fullpath, "w") as f: - f.close() - - id3 = self.backup_node( - backup_dir, 'node', node, - backup_type="delta", options=["-j", "4", "--stream"]) - pgdata3 = self.pgdata_content(node.data_dir) - - node.stop() - - self.restore_node( - backup_dir, 'node', node, backup_id=id1, - options=["-j", "4", '-I', 'checksum']) - - pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(pgdata1, pgdata_restored) - - node.slow_start() - node.stop() - - self.restore_node( - backup_dir, 'node', node, backup_id=id2, - options=["-j", "4", '-I', 'checksum']) - - pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(pgdata2, pgdata_restored) - - node.slow_start() - node.stop() - - self.restore_node( - backup_dir, 'node', node, backup_id=id3, - options=["-j", "4", '-I', 'checksum']) - - pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(pgdata3, pgdata_restored) - - def test_incremental_partial_restore_exclude_checksum(self): - """""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - for i in range(1, 10, 1): - node.safe_psql( - 'postgres', - 'CREATE database db{0}'.format(i)) - - db_list_raw = node.safe_psql( - 'postgres', - 'SELECT to_json(a) ' - 'FROM (SELECT oid, datname FROM pg_database) a').rstrip() - - db_list_splitted = db_list_raw.splitlines() - - db_list = {} - for line in db_list_splitted: - line = json.loads(line) - db_list[line['datname']] = line['oid'] - - node.pgbench_init(scale=20) - - # FULL backup - self.backup_node(backup_dir, 'node', node) - pgdata = self.pgdata_content(node.data_dir) - - pgbench = node.pgbench(options=['-T', '10', '-c', '1']) - pgbench.wait() - - # PAGE backup - backup_id = self.backup_node(backup_dir, 'node', node, backup_type='page') - - # restore FULL backup into second node2 - node1 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node1')) - node1.cleanup() - - node2 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node2')) - node2.cleanup() - - # restore some data into node2 - self.restore_node(backup_dir, 'node', node2) - - # partial restore backup into node1 - self.restore_node( - backup_dir, 'node', - node1, options=[ - "--db-exclude=db1", - "--db-exclude=db5"]) - - pgdata1 = self.pgdata_content(node1.data_dir) - - # partial incremental restore backup into node2 - self.restore_node( - backup_dir, 'node', - node2, options=[ - "--db-exclude=db1", - "--db-exclude=db5", - "-I", "checksum"]) - - pgdata2 = self.pgdata_content(node2.data_dir) - - self.compare_pgdata(pgdata1, pgdata2) - - self.set_auto_conf(node2, {'port': node2.port}) - - node2.slow_start() - - node2.safe_psql( - 'postgres', - 'select 1') - - try: - node2.safe_psql( - 'db1', - 'select 1') - except QueryException as e: - self.assertIn('FATAL', e.message) - - try: - node2.safe_psql( - 'db5', - 'select 1') - except QueryException as e: - self.assertIn('FATAL', e.message) - - with open(node2.pg_log_file, 'r') as f: - output = f.read() - - self.assertNotIn('PANIC', output) - - def test_incremental_partial_restore_exclude_lsn(self): - """""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - for i in range(1, 10, 1): - node.safe_psql( - 'postgres', - 'CREATE database db{0}'.format(i)) - - db_list_raw = node.safe_psql( - 'postgres', - 'SELECT to_json(a) ' - 'FROM (SELECT oid, datname FROM pg_database) a').rstrip() - - db_list_splitted = db_list_raw.splitlines() - - db_list = {} - for line in db_list_splitted: - line = json.loads(line) - db_list[line['datname']] = line['oid'] - - node.pgbench_init(scale=20) - - # FULL backup - self.backup_node(backup_dir, 'node', node) - pgdata = self.pgdata_content(node.data_dir) - - pgbench = node.pgbench(options=['-T', '10', '-c', '1']) - pgbench.wait() - - # PAGE backup - backup_id = self.backup_node(backup_dir, 'node', node, backup_type='page') - - node.stop() - - # restore FULL backup into second node2 - node1 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node1')) - node1.cleanup() - - node2 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node2')) - node2.cleanup() - - # restore some data into node2 - self.restore_node(backup_dir, 'node', node2) - - # partial restore backup into node1 - self.restore_node( - backup_dir, 'node', - node1, options=[ - "--db-exclude=db1", - "--db-exclude=db5"]) - - pgdata1 = self.pgdata_content(node1.data_dir) - - # partial incremental restore backup into node2 - node2.port = node.port - node2.slow_start() - node2.stop() - self.restore_node( - backup_dir, 'node', - node2, options=[ - "--db-exclude=db1", - "--db-exclude=db5", - "-I", "lsn"]) - - pgdata2 = self.pgdata_content(node2.data_dir) - - self.compare_pgdata(pgdata1, pgdata2) - - self.set_auto_conf(node2, {'port': node2.port}) - - node2.slow_start() - - node2.safe_psql( - 'postgres', - 'select 1') - - try: - node2.safe_psql( - 'db1', - 'select 1') - except QueryException as e: - self.assertIn('FATAL', e.message) - - try: - node2.safe_psql( - 'db5', - 'select 1') - except QueryException as e: - self.assertIn('FATAL', e.message) - - with open(node2.pg_log_file, 'r') as f: - output = f.read() - - self.assertNotIn('PANIC', output) - - def test_incremental_partial_restore_exclude_tablespace_checksum(self): - """""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # cat_version = node.get_control_data()["Catalog version number"] - # version_specific_dir = 'PG_' + node.major_version_str + '_' + cat_version - - # PG_10_201707211 - # pg_tblspc/33172/PG_9.5_201510051/16386/ - - self.create_tblspace_in_node(node, 'somedata') - - node_tablespace = self.get_tblspace_path(node, 'somedata') - - tbl_oid = node.safe_psql( - 'postgres', - "SELECT oid " - "FROM pg_tablespace " - "WHERE spcname = 'somedata'").rstrip() - - for i in range(1, 10, 1): - node.safe_psql( - 'postgres', - 'CREATE database db{0} tablespace somedata'.format(i)) - - db_list_raw = node.safe_psql( - 'postgres', - 'SELECT to_json(a) ' - 'FROM (SELECT oid, datname FROM pg_database) a').rstrip() - - db_list_splitted = db_list_raw.splitlines() - - db_list = {} - for line in db_list_splitted: - line = json.loads(line) - db_list[line['datname']] = line['oid'] - - # FULL backup - backup_id = self.backup_node(backup_dir, 'node', node) - - # node1 - node1 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node1')) - node1.cleanup() - node1_tablespace = self.get_tblspace_path(node1, 'somedata') - - # node2 - node2 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node2')) - node2.cleanup() - node2_tablespace = self.get_tblspace_path(node2, 'somedata') - - # in node2 restore full backup - self.restore_node( - backup_dir, 'node', - node2, options=[ - "-T", "{0}={1}".format( - node_tablespace, node2_tablespace)]) - - # partial restore into node1 - self.restore_node( - backup_dir, 'node', - node1, options=[ - "--db-exclude=db1", - "--db-exclude=db5", - "-T", "{0}={1}".format( - node_tablespace, node1_tablespace)]) - - pgdata1 = self.pgdata_content(node1.data_dir) - - # partial incremental restore into node2 - try: - self.restore_node( - backup_dir, 'node', - node2, options=[ - "-I", "checksum", - "--db-exclude=db1", - "--db-exclude=db5", - "-T", "{0}={1}".format( - node_tablespace, node2_tablespace)]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because remapped tablespace contain old data .\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Remapped tablespace destination is not empty:', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.restore_node( - backup_dir, 'node', - node2, options=[ - "-I", "checksum", "--force", - "--db-exclude=db1", - "--db-exclude=db5", - "-T", "{0}={1}".format( - node_tablespace, node2_tablespace)]) - - pgdata2 = self.pgdata_content(node2.data_dir) - - self.compare_pgdata(pgdata1, pgdata2) - - self.set_auto_conf(node2, {'port': node2.port}) - node2.slow_start() - - node2.safe_psql( - 'postgres', - 'select 1') - - try: - node2.safe_psql( - 'db1', - 'select 1') - except QueryException as e: - self.assertIn('FATAL', e.message) - - try: - node2.safe_psql( - 'db5', - 'select 1') - except QueryException as e: - self.assertIn('FATAL', e.message) - - with open(node2.pg_log_file, 'r') as f: - output = f.read() - - self.assertNotIn('PANIC', output) - - def test_incremental_pg_filenode_map(self): - """ - https://github.com/postgrespro/pg_probackup/issues/320 - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node1 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node1'), - initdb_params=['--data-checksums']) - node1.cleanup() - - node.pgbench_init(scale=5) - - # FULL backup - backup_id = self.backup_node(backup_dir, 'node', node) - - # in node1 restore full backup - self.restore_node(backup_dir, 'node', node1) - self.set_auto_conf(node1, {'port': node1.port}) - node1.slow_start() - - pgbench = node.pgbench( - stdout=subprocess.PIPE, stderr=subprocess.STDOUT, - options=['-T', '10', '-c', '1']) - - pgbench = node1.pgbench( - stdout=subprocess.PIPE, stderr=subprocess.STDOUT, - options=['-T', '10', '-c', '1']) - - node.safe_psql( - 'postgres', - 'reindex index pg_type_oid_index') - - # FULL backup - backup_id = self.backup_node(backup_dir, 'node', node) - - node1.stop() - - # incremental restore into node1 - self.restore_node(backup_dir, 'node', node1, options=["-I", "checksum"]) - - self.set_auto_conf(node1, {'port': node1.port}) - node1.slow_start() - - node1.safe_psql( - 'postgres', - 'select 1') - -# check that MinRecPoint and BackupStartLsn are correctly used in case of --incrementa-lsn diff --git a/tests/init_test.py b/tests/init_test.py deleted file mode 100644 index 94b076fef..000000000 --- a/tests/init_test.py +++ /dev/null @@ -1,138 +0,0 @@ -import os -import unittest -from .helpers.ptrack_helpers import dir_files, ProbackupTest, ProbackupException -import shutil - - -class InitTest(ProbackupTest, unittest.TestCase): - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_success(self): - """Success normal init""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node(base_dir=os.path.join(self.module_name, self.fname, 'node')) - self.init_pb(backup_dir) - self.assertEqual( - dir_files(backup_dir), - ['backups', 'wal'] - ) - self.add_instance(backup_dir, 'node', node) - self.assertIn( - "INFO: Instance 'node' successfully deleted", - self.del_instance(backup_dir, 'node'), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(self.output), self.cmd)) - - # Show non-existing instance - try: - self.show_pb(backup_dir, 'node') - self.assertEqual(1, 0, 'Expecting Error due to show of non-existing instance. Output: {0} \n CMD: {1}'.format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: Instance 'node' does not exist in this backup catalog", - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format(e.message, self.cmd)) - - # Delete non-existing instance - try: - self.del_instance(backup_dir, 'node1') - self.assertEqual(1, 0, 'Expecting Error due to delete of non-existing instance. Output: {0} \n CMD: {1}'.format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: Instance 'node1' does not exist in this backup catalog", - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format(e.message, self.cmd)) - - # Add instance without pgdata - try: - self.run_pb([ - "add-instance", - "--instance=node1", - "-B", backup_dir - ]) - self.assertEqual(1, 0, 'Expecting Error due to adding instance without pgdata. Output: {0} \n CMD: {1}'.format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: Required parameter not specified: PGDATA (-D, --pgdata)", - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format(e.message, self.cmd)) - - # @unittest.skip("skip") - def test_already_exist(self): - """Failure with backup catalog already existed""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node(base_dir=os.path.join(self.module_name, self.fname, 'node')) - self.init_pb(backup_dir) - try: - self.show_pb(backup_dir, 'node') - self.assertEqual(1, 0, 'Expecting Error due to initialization in non-empty directory. Output: {0} \n CMD: {1}'.format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: Instance 'node' does not exist in this backup catalog", - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) - - # @unittest.skip("skip") - def test_abs_path(self): - """failure with backup catalog should be given as absolute path""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node(base_dir=os.path.join(self.module_name, self.fname, 'node')) - try: - self.run_pb(["init", "-B", os.path.relpath("%s/backup" % node.base_dir, self.dir_path)]) - self.assertEqual(1, 0, 'Expecting Error due to initialization with non-absolute path in --backup-path. Output: {0} \n CMD: {1}'.format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: -B, --backup-path must be an absolute path", - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_add_instance_idempotence(self): - """ - https://github.com/postgrespro/pg_probackup/issues/219 - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node(base_dir=os.path.join(self.module_name, self.fname, 'node')) - self.init_pb(backup_dir) - - self.add_instance(backup_dir, 'node', node) - shutil.rmtree(os.path.join(backup_dir, 'backups', 'node')) - - dir_backups = os.path.join(backup_dir, 'backups', 'node') - dir_wal = os.path.join(backup_dir, 'wal', 'node') - - try: - self.add_instance(backup_dir, 'node', node) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because page backup should not be possible " - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: Instance 'node' WAL archive directory already exists: ", - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - try: - self.add_instance(backup_dir, 'node', node) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because page backup should not be possible " - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: Instance 'node' WAL archive directory already exists: ", - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) diff --git a/tests/locking_test.py b/tests/locking_test.py deleted file mode 100644 index 5367c2610..000000000 --- a/tests/locking_test.py +++ /dev/null @@ -1,629 +0,0 @@ -import unittest -import os -from time import sleep -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException - - -class LockingTest(ProbackupTest, unittest.TestCase): - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_locking_running_validate_1(self): - """ - make node, take full backup, stop it in the middle - run validate, expect it to successfully executed, - concurrent RUNNING backup with pid file and active process is legal - """ - self._check_gdb_flag_or_skip_test() - - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - self.backup_node(backup_dir, 'node', node) - - gdb = self.backup_node( - backup_dir, 'node', node, gdb=True) - - gdb.set_breakpoint('backup_non_data_file') - gdb.run_until_break() - - gdb.continue_execution_until_break(20) - - self.assertEqual( - 'OK', self.show_pb(backup_dir, 'node')[0]['status']) - - self.assertEqual( - 'RUNNING', self.show_pb(backup_dir, 'node')[1]['status']) - - validate_output = self.validate_pb( - backup_dir, options=['--log-level-console=LOG']) - - backup_id = self.show_pb(backup_dir, 'node')[1]['id'] - - self.assertIn( - "is using backup {0}, and is still running".format(backup_id), - validate_output, - '\n Unexpected Validate Output: {0}\n'.format(repr(validate_output))) - - self.assertEqual( - 'OK', self.show_pb(backup_dir, 'node')[0]['status']) - - self.assertEqual( - 'RUNNING', self.show_pb(backup_dir, 'node')[1]['status']) - - # Clean after yourself - gdb.kill() - - def test_locking_running_validate_2(self): - """ - make node, take full backup, stop it in the middle, - kill process so no cleanup is done - pid file is in place, - run validate, expect it to not successfully executed, - RUNNING backup with pid file AND without active pid is legal, - but his status must be changed to ERROR and pid file is deleted - """ - self._check_gdb_flag_or_skip_test() - - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - self.backup_node(backup_dir, 'node', node) - - gdb = self.backup_node( - backup_dir, 'node', node, gdb=True) - - gdb.set_breakpoint('backup_non_data_file') - gdb.run_until_break() - - gdb.continue_execution_until_break(20) - - gdb._execute('signal SIGKILL') - gdb.continue_execution_until_error() - - self.assertEqual( - 'OK', self.show_pb(backup_dir, 'node')[0]['status']) - - self.assertEqual( - 'RUNNING', self.show_pb(backup_dir, 'node')[1]['status']) - - backup_id = self.show_pb(backup_dir, 'node')[1]['id'] - - try: - self.validate_pb(backup_dir) - self.assertEqual( - 1, 0, - "Expecting Error because RUNNING backup is no longer active.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - "which used backup {0} no longer exists".format( - backup_id) in e.message and - "Backup {0} has status RUNNING, change it " - "to ERROR and skip validation".format( - backup_id) in e.message and - "WARNING: Some backups are not valid" in - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertEqual( - 'OK', self.show_pb(backup_dir, 'node')[0]['status']) - - self.assertEqual( - 'ERROR', self.show_pb(backup_dir, 'node')[1]['status']) - - # Clean after yourself - gdb.kill() - - def test_locking_running_validate_2_specific_id(self): - """ - make node, take full backup, stop it in the middle, - kill process so no cleanup is done - pid file is in place, - run validate on this specific backup, - expect it to not successfully executed, - RUNNING backup with pid file AND without active pid is legal, - but his status must be changed to ERROR and pid file is deleted - """ - self._check_gdb_flag_or_skip_test() - - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - self.backup_node(backup_dir, 'node', node) - - gdb = self.backup_node( - backup_dir, 'node', node, gdb=True) - - gdb.set_breakpoint('backup_non_data_file') - gdb.run_until_break() - - gdb.continue_execution_until_break(20) - - gdb._execute('signal SIGKILL') - gdb.continue_execution_until_error() - - self.assertEqual( - 'OK', self.show_pb(backup_dir, 'node')[0]['status']) - - self.assertEqual( - 'RUNNING', self.show_pb(backup_dir, 'node')[1]['status']) - - backup_id = self.show_pb(backup_dir, 'node')[1]['id'] - - try: - self.validate_pb(backup_dir, 'node', backup_id) - self.assertEqual( - 1, 0, - "Expecting Error because RUNNING backup is no longer active.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - "which used backup {0} no longer exists".format( - backup_id) in e.message and - "Backup {0} has status RUNNING, change it " - "to ERROR and skip validation".format( - backup_id) in e.message and - "ERROR: Backup {0} has status: ERROR".format(backup_id) in - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertEqual( - 'OK', self.show_pb(backup_dir, 'node')[0]['status']) - - self.assertEqual( - 'ERROR', self.show_pb(backup_dir, 'node')[1]['status']) - - try: - self.validate_pb(backup_dir, 'node', backup_id) - self.assertEqual( - 1, 0, - "Expecting Error because backup has status ERROR.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: Backup {0} has status: ERROR".format(backup_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - try: - self.validate_pb(backup_dir) - self.assertEqual( - 1, 0, - "Expecting Error because backup has status ERROR.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - "WARNING: Backup {0} has status ERROR. Skip validation".format( - backup_id) in e.message and - "WARNING: Some backups are not valid" in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - # Clean after yourself - gdb.kill() - - def test_locking_running_3(self): - """ - make node, take full backup, stop it in the middle, - terminate process, delete pid file, - run validate, expect it to not successfully executed, - RUNNING backup without pid file AND without active pid is legal, - his status must be changed to ERROR - """ - self._check_gdb_flag_or_skip_test() - - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - self.backup_node(backup_dir, 'node', node) - - gdb = self.backup_node( - backup_dir, 'node', node, gdb=True) - - gdb.set_breakpoint('backup_non_data_file') - gdb.run_until_break() - - gdb.continue_execution_until_break(20) - - gdb._execute('signal SIGKILL') - gdb.continue_execution_until_error() - - self.assertEqual( - 'OK', self.show_pb(backup_dir, 'node')[0]['status']) - - self.assertEqual( - 'RUNNING', self.show_pb(backup_dir, 'node')[1]['status']) - - backup_id = self.show_pb(backup_dir, 'node')[1]['id'] - - os.remove( - os.path.join(backup_dir, 'backups', 'node', backup_id, 'backup.pid')) - - try: - self.validate_pb(backup_dir) - self.assertEqual( - 1, 0, - "Expecting Error because RUNNING backup is no longer active.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - "Backup {0} has status RUNNING, change it " - "to ERROR and skip validation".format( - backup_id) in e.message and - "WARNING: Some backups are not valid" in - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertEqual( - 'OK', self.show_pb(backup_dir, 'node')[0]['status']) - - self.assertEqual( - 'ERROR', self.show_pb(backup_dir, 'node')[1]['status']) - - # Clean after yourself - gdb.kill() - - def test_locking_restore_locked(self): - """ - make node, take full backup, take two page backups, - launch validate on PAGE1 and stop it in the middle, - launch restore of PAGE2. - Expect restore to sucseed because read-only locks - do not conflict - """ - self._check_gdb_flag_or_skip_test() - - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # FULL - full_id = self.backup_node(backup_dir, 'node', node) - - # PAGE1 - backup_id = self.backup_node(backup_dir, 'node', node, backup_type='page') - - # PAGE2 - self.backup_node(backup_dir, 'node', node, backup_type='page') - - gdb = self.validate_pb( - backup_dir, 'node', backup_id=backup_id, gdb=True) - - gdb.set_breakpoint('pgBackupValidate') - gdb.run_until_break() - - node.cleanup() - - self.restore_node(backup_dir, 'node', node) - - # Clean after yourself - gdb.kill() - - def test_concurrent_delete_and_restore(self): - """ - make node, take full backup, take page backup, - launch validate on FULL and stop it in the middle, - launch restore of PAGE. - Expect restore to fail because validation of - intermediate backup is impossible - """ - self._check_gdb_flag_or_skip_test() - - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # FULL - backup_id = self.backup_node(backup_dir, 'node', node) - - # PAGE1 - restore_id = self.backup_node(backup_dir, 'node', node, backup_type='page') - - gdb = self.delete_pb( - backup_dir, 'node', backup_id=backup_id, gdb=True) - - # gdb.set_breakpoint('pgFileDelete') - gdb.set_breakpoint('delete_backup_files') - gdb.run_until_break() - - node.cleanup() - - try: - self.restore_node( - backup_dir, 'node', node, options=['--no-validate']) - self.assertEqual( - 1, 0, - "Expecting Error because restore without whole chain validation " - "is prohibited unless --no-validate provided.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - "Backup {0} is used without validation".format( - restore_id) in e.message and - 'is using backup {0}, and is still running'.format( - backup_id) in e.message and - 'ERROR: Cannot lock backup' in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - # Clean after yourself - gdb.kill() - - def test_locking_concurrent_validate_and_backup(self): - """ - make node, take full backup, launch validate - and stop it in the middle, take page backup. - Expect PAGE backup to be successfully executed - """ - self._check_gdb_flag_or_skip_test() - - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # FULL - self.backup_node(backup_dir, 'node', node) - - # PAGE2 - backup_id = self.backup_node(backup_dir, 'node', node, backup_type='page') - - gdb = self.validate_pb( - backup_dir, 'node', backup_id=backup_id, gdb=True) - - gdb.set_breakpoint('pgBackupValidate') - gdb.run_until_break() - - # This PAGE backup is expected to be successfull - self.backup_node(backup_dir, 'node', node, backup_type='page') - - # Clean after yourself - gdb.kill() - - def test_locking_concurren_restore_and_delete(self): - """ - make node, take full backup, launch restore - and stop it in the middle, delete full backup. - Expect it to fail. - """ - self._check_gdb_flag_or_skip_test() - - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # FULL - full_id = self.backup_node(backup_dir, 'node', node) - - node.cleanup() - gdb = self.restore_node(backup_dir, 'node', node, gdb=True) - - gdb.set_breakpoint('create_data_directories') - gdb.run_until_break() - - try: - self.delete_pb(backup_dir, 'node', full_id) - self.assertEqual( - 1, 0, - "Expecting Error because backup is locked\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: Cannot lock backup {0} directory".format(full_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - # Clean after yourself - gdb.kill() - - def test_backup_directory_name(self): - """ - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # FULL - full_id_1 = self.backup_node(backup_dir, 'node', node) - page_id_1 = self.backup_node(backup_dir, 'node', node, backup_type='page') - - full_id_2 = self.backup_node(backup_dir, 'node', node) - page_id_2 = self.backup_node(backup_dir, 'node', node, backup_type='page') - - node.cleanup() - - old_path = os.path.join(backup_dir, 'backups', 'node', full_id_1) - new_path = os.path.join(backup_dir, 'backups', 'node', 'hello_kitty') - - os.rename(old_path, new_path) - - # This PAGE backup is expected to be successfull - self.show_pb(backup_dir, 'node', full_id_1) - - self.validate_pb(backup_dir) - self.validate_pb(backup_dir, 'node') - self.validate_pb(backup_dir, 'node', full_id_1) - - self.restore_node(backup_dir, 'node', node, backup_id=full_id_1) - - self.delete_pb(backup_dir, 'node', full_id_1) - - old_path = os.path.join(backup_dir, 'backups', 'node', full_id_2) - new_path = os.path.join(backup_dir, 'backups', 'node', 'hello_kitty') - - self.set_backup( - backup_dir, 'node', full_id_2, options=['--note=hello']) - - self.merge_backup(backup_dir, 'node', page_id_2, options=["-j", "4"]) - - self.assertNotIn( - 'note', - self.show_pb(backup_dir, 'node', page_id_2)) - - # Clean after yourself - - def test_empty_lock_file(self): - """ - https://github.com/postgrespro/pg_probackup/issues/308 - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # Fill with data - node.pgbench_init(scale=100) - - # FULL - backup_id = self.backup_node(backup_dir, 'node', node) - - lockfile = os.path.join(backup_dir, 'backups', 'node', backup_id, 'backup.pid') - with open(lockfile, "w+") as f: - f.truncate() - - out = self.validate_pb(backup_dir, 'node', backup_id) - - self.assertIn( - "Waiting 30 seconds on empty exclusive lock for backup", out) - -# lockfile = os.path.join(backup_dir, 'backups', 'node', backup_id, 'backup.pid') -# with open(lockfile, "w+") as f: -# f.truncate() -# -# p1 = self.validate_pb(backup_dir, 'node', backup_id, asynchronous=True, -# options=['--log-level-file=LOG', '--log-filename=validate.log']) -# sleep(3) -# p2 = self.delete_pb(backup_dir, 'node', backup_id, asynchronous=True, -# options=['--log-level-file=LOG', '--log-filename=delete.log']) -# -# p1.wait() -# p2.wait() - - def test_shared_lock(self): - """ - Make sure that shared lock leaves no files with pids - """ - self._check_gdb_flag_or_skip_test() - - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # Fill with data - node.pgbench_init(scale=1) - - # FULL - backup_id = self.backup_node(backup_dir, 'node', node) - - lockfile_excl = os.path.join(backup_dir, 'backups', 'node', backup_id, 'backup.pid') - lockfile_shr = os.path.join(backup_dir, 'backups', 'node', backup_id, 'backup_ro.pid') - - self.validate_pb(backup_dir, 'node', backup_id) - - self.assertFalse( - os.path.exists(lockfile_excl), - "File should not exist: {0}".format(lockfile_excl)) - - self.assertFalse( - os.path.exists(lockfile_shr), - "File should not exist: {0}".format(lockfile_shr)) - - gdb = self.validate_pb(backup_dir, 'node', backup_id, gdb=True) - - gdb.set_breakpoint('validate_one_page') - gdb.run_until_break() - gdb.kill() - - self.assertTrue( - os.path.exists(lockfile_shr), - "File should exist: {0}".format(lockfile_shr)) - - self.validate_pb(backup_dir, 'node', backup_id) - - self.assertFalse( - os.path.exists(lockfile_excl), - "File should not exist: {0}".format(lockfile_excl)) - - self.assertFalse( - os.path.exists(lockfile_shr), - "File should not exist: {0}".format(lockfile_shr)) - diff --git a/tests/logging_test.py b/tests/logging_test.py deleted file mode 100644 index c5cdfa344..000000000 --- a/tests/logging_test.py +++ /dev/null @@ -1,345 +0,0 @@ -import unittest -import os -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException -import datetime - -class LogTest(ProbackupTest, unittest.TestCase): - - # @unittest.skip("skip") - # @unittest.expectedFailure - # PGPRO-2154 - def test_log_rotation(self): - """ - """ - self._check_gdb_flag_or_skip_test() - - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - self.set_config( - backup_dir, 'node', - options=['--log-rotation-age=1s', '--log-rotation-size=1MB']) - - self.backup_node( - backup_dir, 'node', node, - options=['--stream', '--log-level-file=verbose']) - - gdb = self.backup_node( - backup_dir, 'node', node, - options=['--stream', '--log-level-file=verbose'], gdb=True) - - gdb.set_breakpoint('open_logfile') - gdb.run_until_break() - gdb.continue_execution_until_exit() - - def test_log_filename_strftime(self): - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - self.set_config( - backup_dir, 'node', - options=['--log-rotation-age=1d']) - - self.backup_node( - backup_dir, 'node', node, - options=[ - '--stream', - '--log-level-file=VERBOSE', - '--log-filename=pg_probackup-%a.log']) - - day_of_week = datetime.datetime.today().strftime("%a") - - path = os.path.join( - backup_dir, 'log', 'pg_probackup-{0}.log'.format(day_of_week)) - - self.assertTrue(os.path.isfile(path)) - - def test_truncate_rotation_file(self): - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - self.set_config( - backup_dir, 'node', - options=['--log-rotation-age=1d']) - - self.backup_node( - backup_dir, 'node', node, - options=[ - '--stream', - '--log-level-file=VERBOSE']) - - rotation_file_path = os.path.join( - backup_dir, 'log', 'pg_probackup.log.rotation') - - log_file_path = os.path.join( - backup_dir, 'log', 'pg_probackup.log') - - log_file_size = os.stat(log_file_path).st_size - - self.assertTrue(os.path.isfile(rotation_file_path)) - - # truncate .rotation file - with open(rotation_file_path, "rb+", 0) as f: - f.truncate() - f.flush() - f.close - - output = self.backup_node( - backup_dir, 'node', node, - options=[ - '--stream', - '--log-level-file=LOG'], - return_id=False) - - # check that log file wasn`t rotated - self.assertGreater( - os.stat(log_file_path).st_size, - log_file_size) - - self.assertIn( - 'WARNING: cannot read creation timestamp from rotation file', - output) - - output = self.backup_node( - backup_dir, 'node', node, - options=[ - '--stream', - '--log-level-file=LOG'], - return_id=False) - - # check that log file wasn`t rotated - self.assertGreater( - os.stat(log_file_path).st_size, - log_file_size) - - self.assertNotIn( - 'WARNING: cannot read creation timestamp from rotation file', - output) - - self.assertTrue(os.path.isfile(rotation_file_path)) - - def test_unlink_rotation_file(self): - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - self.set_config( - backup_dir, 'node', - options=['--log-rotation-age=1d']) - - self.backup_node( - backup_dir, 'node', node, - options=[ - '--stream', - '--log-level-file=VERBOSE']) - - rotation_file_path = os.path.join( - backup_dir, 'log', 'pg_probackup.log.rotation') - - log_file_path = os.path.join( - backup_dir, 'log', 'pg_probackup.log') - - log_file_size = os.stat(log_file_path).st_size - - self.assertTrue(os.path.isfile(rotation_file_path)) - - # unlink .rotation file - os.unlink(rotation_file_path) - - output = self.backup_node( - backup_dir, 'node', node, - options=[ - '--stream', - '--log-level-file=LOG'], - return_id=False) - - # check that log file wasn`t rotated - self.assertGreater( - os.stat(log_file_path).st_size, - log_file_size) - - self.assertIn( - 'WARNING: missing rotation file:', - output) - - self.assertTrue(os.path.isfile(rotation_file_path)) - - output = self.backup_node( - backup_dir, 'node', node, - options=[ - '--stream', - '--log-level-file=VERBOSE'], - return_id=False) - - self.assertNotIn( - 'WARNING: missing rotation file:', - output) - - # check that log file wasn`t rotated - self.assertGreater( - os.stat(log_file_path).st_size, - log_file_size) - - def test_garbage_in_rotation_file(self): - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - self.set_config( - backup_dir, 'node', - options=['--log-rotation-age=1d']) - - self.backup_node( - backup_dir, 'node', node, - options=[ - '--stream', - '--log-level-file=VERBOSE']) - - rotation_file_path = os.path.join( - backup_dir, 'log', 'pg_probackup.log.rotation') - - log_file_path = os.path.join( - backup_dir, 'log', 'pg_probackup.log') - - log_file_size = os.stat(log_file_path).st_size - - self.assertTrue(os.path.isfile(rotation_file_path)) - - # mangle .rotation file - with open(rotation_file_path, "w+b", 0) as f: - f.write(b"blah") - output = self.backup_node( - backup_dir, 'node', node, - options=[ - '--stream', - '--log-level-file=LOG'], - return_id=False) - - # check that log file wasn`t rotated - self.assertGreater( - os.stat(log_file_path).st_size, - log_file_size) - - self.assertIn( - 'WARNING: rotation file', - output) - - self.assertIn( - 'has wrong creation timestamp', - output) - - self.assertTrue(os.path.isfile(rotation_file_path)) - - output = self.backup_node( - backup_dir, 'node', node, - options=[ - '--stream', - '--log-level-file=LOG'], - return_id=False) - - self.assertNotIn( - 'WARNING: rotation file', - output) - - # check that log file wasn`t rotated - self.assertGreater( - os.stat(log_file_path).st_size, - log_file_size) - - def test_issue_274(self): - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) - replica.cleanup() - - self.backup_node(backup_dir, 'node', node, options=['--stream']) - self.restore_node(backup_dir, 'node', replica) - - # Settings for Replica - self.set_replica(node, replica, synchronous=True) - self.set_archiving(backup_dir, 'node', replica, replica=True) - self.set_auto_conf(replica, {'port': replica.port}) - - replica.slow_start(replica=True) - - node.safe_psql( - "postgres", - "create table t_heap as select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,45600) i") - - log_dir = os.path.join(backup_dir, "somedir") - - try: - self.backup_node( - backup_dir, 'node', replica, backup_type='page', - options=[ - '--log-level-console=verbose', '--log-level-file=verbose', - '--log-directory={0}'.format(log_dir), '-j1', - '--log-filename=somelog.txt', '--archive-timeout=5s', - '--no-validate', '--log-rotation-size=100KB']) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because of archiving timeout" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: WAL segment', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - log_file_path = os.path.join( - log_dir, 'somelog.txt') - - self.assertTrue(os.path.isfile(log_file_path)) - - with open(log_file_path, "r+") as f: - log_content = f.read() - - self.assertIn('INFO: command:', log_content) diff --git a/tests/merge_test.py b/tests/merge_test.py deleted file mode 100644 index ffa73263c..000000000 --- a/tests/merge_test.py +++ /dev/null @@ -1,2759 +0,0 @@ -# coding: utf-8 - -import unittest -import os -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException -from testgres import QueryException -import shutil -from datetime import datetime, timedelta -import time -import subprocess - -class MergeTest(ProbackupTest, unittest.TestCase): - - def test_basic_merge_full_page(self): - """ - Test MERGE command, it merges FULL backup with target PAGE backups - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, "backup") - - # Initialize instance and backup directory - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=["--data-checksums"]) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, "node", node) - self.set_archiving(backup_dir, "node", node) - node.slow_start() - - # Do full backup - self.backup_node(backup_dir, "node", node, options=['--compress']) - show_backup = self.show_pb(backup_dir, "node")[0] - - self.assertEqual(show_backup["status"], "OK") - self.assertEqual(show_backup["backup-mode"], "FULL") - - # Fill with data - with node.connect() as conn: - conn.execute("create table test (id int)") - conn.execute( - "insert into test select i from generate_series(1,10) s(i)") - conn.commit() - - # Do first page backup - self.backup_node(backup_dir, "node", node, backup_type="page", options=['--compress']) - show_backup = self.show_pb(backup_dir, "node")[1] - - # sanity check - self.assertEqual(show_backup["status"], "OK") - self.assertEqual(show_backup["backup-mode"], "PAGE") - - # Fill with data - with node.connect() as conn: - conn.execute( - "insert into test select i from generate_series(1,10) s(i)") - count1 = conn.execute("select count(*) from test") - conn.commit() - - # Do second page backup - self.backup_node( - backup_dir, "node", node, - backup_type="page", options=['--compress']) - show_backup = self.show_pb(backup_dir, "node")[2] - page_id = show_backup["id"] - - if self.paranoia: - pgdata = self.pgdata_content(node.data_dir) - - # sanity check - self.assertEqual(show_backup["status"], "OK") - self.assertEqual(show_backup["backup-mode"], "PAGE") - - # Merge all backups - self.merge_backup(backup_dir, "node", page_id, - options=["-j", "4"]) - show_backups = self.show_pb(backup_dir, "node") - - # sanity check - self.assertEqual(len(show_backups), 1) - self.assertEqual(show_backups[0]["status"], "OK") - self.assertEqual(show_backups[0]["backup-mode"], "FULL") - - # Drop node and restore it - node.cleanup() - self.restore_node(backup_dir, 'node', node) - - # Check physical correctness - if self.paranoia: - pgdata_restored = self.pgdata_content( - node.data_dir, ignore_ptrack=False) - self.compare_pgdata(pgdata, pgdata_restored) - - node.slow_start() - - # Check restored node - count2 = node.execute("postgres", "select count(*) from test") - self.assertEqual(count1, count2) - - def test_merge_compressed_backups(self): - """ - Test MERGE command with compressed backups - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, "backup") - - # Initialize instance and backup directory - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=["--data-checksums"]) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, "node", node) - self.set_archiving(backup_dir, "node", node) - node.slow_start() - - # Do full compressed backup - self.backup_node(backup_dir, "node", node, options=['--compress']) - show_backup = self.show_pb(backup_dir, "node")[0] - - self.assertEqual(show_backup["status"], "OK") - self.assertEqual(show_backup["backup-mode"], "FULL") - - # Fill with data - with node.connect() as conn: - conn.execute("create table test (id int)") - conn.execute( - "insert into test select i from generate_series(1,10) s(i)") - count1 = conn.execute("select count(*) from test") - conn.commit() - - # Do compressed page backup - self.backup_node( - backup_dir, "node", node, backup_type="page", options=['--compress']) - show_backup = self.show_pb(backup_dir, "node")[1] - page_id = show_backup["id"] - - self.assertEqual(show_backup["status"], "OK") - self.assertEqual(show_backup["backup-mode"], "PAGE") - - # Merge all backups - self.merge_backup(backup_dir, "node", page_id, options=['-j2']) - show_backups = self.show_pb(backup_dir, "node") - - self.assertEqual(len(show_backups), 1) - self.assertEqual(show_backups[0]["status"], "OK") - self.assertEqual(show_backups[0]["backup-mode"], "FULL") - - # Drop node and restore it - node.cleanup() - self.restore_node(backup_dir, 'node', node) - node.slow_start() - - # Check restored node - count2 = node.execute("postgres", "select count(*) from test") - self.assertEqual(count1, count2) - - # Clean after yourself - node.cleanup() - - def test_merge_compressed_backups_1(self): - """ - Test MERGE command with compressed backups - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, "backup") - - # Initialize instance and backup directory - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, initdb_params=["--data-checksums"]) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, "node", node) - self.set_archiving(backup_dir, "node", node) - node.slow_start() - - # Fill with data - node.pgbench_init(scale=10) - - # Do compressed FULL backup - self.backup_node(backup_dir, "node", node, options=['--compress', '--stream']) - show_backup = self.show_pb(backup_dir, "node")[0] - - self.assertEqual(show_backup["status"], "OK") - self.assertEqual(show_backup["backup-mode"], "FULL") - - # Change data - pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) - pgbench.wait() - - # Do compressed DELTA backup - self.backup_node( - backup_dir, "node", node, - backup_type="delta", options=['--compress', '--stream']) - - # Change data - pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) - pgbench.wait() - - # Do compressed PAGE backup - self.backup_node( - backup_dir, "node", node, backup_type="page", options=['--compress']) - - pgdata = self.pgdata_content(node.data_dir) - - show_backup = self.show_pb(backup_dir, "node")[2] - page_id = show_backup["id"] - - self.assertEqual(show_backup["status"], "OK") - self.assertEqual(show_backup["backup-mode"], "PAGE") - - # Merge all backups - self.merge_backup(backup_dir, "node", page_id, options=['-j2']) - show_backups = self.show_pb(backup_dir, "node") - - self.assertEqual(len(show_backups), 1) - self.assertEqual(show_backups[0]["status"], "OK") - self.assertEqual(show_backups[0]["backup-mode"], "FULL") - - # Drop node and restore it - node.cleanup() - self.restore_node(backup_dir, 'node', node) - - pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # Clean after yourself - node.cleanup() - - def test_merge_compressed_and_uncompressed_backups(self): - """ - Test MERGE command with compressed and uncompressed backups - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, "backup") - - # Initialize instance and backup directory - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, initdb_params=["--data-checksums"], - ) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, "node", node) - self.set_archiving(backup_dir, "node", node) - node.slow_start() - - # Fill with data - node.pgbench_init(scale=10) - - # Do compressed FULL backup - self.backup_node(backup_dir, "node", node, options=[ - '--compress-algorithm=zlib', '--stream']) - show_backup = self.show_pb(backup_dir, "node")[0] - - self.assertEqual(show_backup["status"], "OK") - self.assertEqual(show_backup["backup-mode"], "FULL") - - # Change data - pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) - pgbench.wait() - - # Do compressed DELTA backup - self.backup_node( - backup_dir, "node", node, backup_type="delta", - options=['--compress', '--stream']) - - # Change data - pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) - pgbench.wait() - - # Do uncompressed PAGE backup - self.backup_node(backup_dir, "node", node, backup_type="page") - - pgdata = self.pgdata_content(node.data_dir) - - show_backup = self.show_pb(backup_dir, "node")[2] - page_id = show_backup["id"] - - self.assertEqual(show_backup["status"], "OK") - self.assertEqual(show_backup["backup-mode"], "PAGE") - - # Merge all backups - self.merge_backup(backup_dir, "node", page_id, options=['-j2']) - show_backups = self.show_pb(backup_dir, "node") - - self.assertEqual(len(show_backups), 1) - self.assertEqual(show_backups[0]["status"], "OK") - self.assertEqual(show_backups[0]["backup-mode"], "FULL") - - # Drop node and restore it - node.cleanup() - self.restore_node(backup_dir, 'node', node) - - pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # Clean after yourself - node.cleanup() - - def test_merge_compressed_and_uncompressed_backups_1(self): - """ - Test MERGE command with compressed and uncompressed backups - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, "backup") - - # Initialize instance and backup directory - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, initdb_params=["--data-checksums"], - ) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, "node", node) - self.set_archiving(backup_dir, "node", node) - node.slow_start() - - # Fill with data - node.pgbench_init(scale=5) - - # Do compressed FULL backup - self.backup_node(backup_dir, "node", node, options=[ - '--compress-algorithm=zlib', '--stream']) - show_backup = self.show_pb(backup_dir, "node")[0] - - self.assertEqual(show_backup["status"], "OK") - self.assertEqual(show_backup["backup-mode"], "FULL") - - # Change data - pgbench = node.pgbench(options=['-T', '20', '-c', '1', '--no-vacuum']) - pgbench.wait() - - # Do uncompressed DELTA backup - self.backup_node( - backup_dir, "node", node, backup_type="delta", - options=['--stream']) - - # Change data - pgbench = node.pgbench(options=['-T', '20', '-c', '1', '--no-vacuum']) - pgbench.wait() - - # Do compressed PAGE backup - self.backup_node( - backup_dir, "node", node, backup_type="page", - options=['--compress-algorithm=zlib']) - - pgdata = self.pgdata_content(node.data_dir) - - show_backup = self.show_pb(backup_dir, "node")[2] - page_id = show_backup["id"] - - self.assertEqual(show_backup["status"], "OK") - self.assertEqual(show_backup["backup-mode"], "PAGE") - - # Merge all backups - self.merge_backup(backup_dir, "node", page_id) - show_backups = self.show_pb(backup_dir, "node") - - self.assertEqual(len(show_backups), 1) - self.assertEqual(show_backups[0]["status"], "OK") - self.assertEqual(show_backups[0]["backup-mode"], "FULL") - - # Drop node and restore it - node.cleanup() - self.restore_node(backup_dir, 'node', node) - - pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # Clean after yourself - node.cleanup() - - def test_merge_compressed_and_uncompressed_backups_2(self): - """ - Test MERGE command with compressed and uncompressed backups - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, "backup") - - # Initialize instance and backup directory - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, initdb_params=["--data-checksums"], - ) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, "node", node) - self.set_archiving(backup_dir, "node", node) - node.slow_start() - - # Fill with data - node.pgbench_init(scale=20) - - # Do uncompressed FULL backup - self.backup_node(backup_dir, "node", node) - show_backup = self.show_pb(backup_dir, "node")[0] - - self.assertEqual(show_backup["status"], "OK") - self.assertEqual(show_backup["backup-mode"], "FULL") - - # Change data - pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) - pgbench.wait() - - # Do compressed DELTA backup - self.backup_node( - backup_dir, "node", node, backup_type="delta", - options=['--compress-algorithm=zlib', '--stream']) - - # Change data - pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) - pgbench.wait() - - # Do uncompressed PAGE backup - self.backup_node( - backup_dir, "node", node, backup_type="page") - - pgdata = self.pgdata_content(node.data_dir) - - show_backup = self.show_pb(backup_dir, "node")[2] - page_id = show_backup["id"] - - self.assertEqual(show_backup["status"], "OK") - self.assertEqual(show_backup["backup-mode"], "PAGE") - - # Merge all backups - self.merge_backup(backup_dir, "node", page_id) - show_backups = self.show_pb(backup_dir, "node") - - self.assertEqual(len(show_backups), 1) - self.assertEqual(show_backups[0]["status"], "OK") - self.assertEqual(show_backups[0]["backup-mode"], "FULL") - - # Drop node and restore it - node.cleanup() - self.restore_node(backup_dir, 'node', node) - - pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.skip("skip") - def test_merge_tablespaces(self): - """ - Create tablespace with table, take FULL backup, - create another tablespace with another table and drop previous - tablespace, take page backup, merge it and restore - - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, initdb_params=['--data-checksums'], - ) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - self.create_tblspace_in_node(node, 'somedata') - node.safe_psql( - "postgres", - "create table t_heap tablespace somedata as select i as id," - " md5(i::text) as text, md5(i::text)::tsvector as tsvector" - " from generate_series(0,100) i" - ) - # FULL backup - self.backup_node(backup_dir, 'node', node) - - # Create new tablespace - self.create_tblspace_in_node(node, 'somedata1') - - node.safe_psql( - "postgres", - "create table t_heap1 tablespace somedata1 as select i as id," - " md5(i::text) as text, md5(i::text)::tsvector as tsvector" - " from generate_series(0,100) i" - ) - - node.safe_psql( - "postgres", - "drop table t_heap" - ) - - # Drop old tablespace - node.safe_psql( - "postgres", - "drop tablespace somedata" - ) - - # PAGE backup - backup_id = self.backup_node(backup_dir, 'node', node, backup_type="page") - - pgdata = self.pgdata_content(node.data_dir) - - node.stop() - shutil.rmtree( - self.get_tblspace_path(node, 'somedata'), - ignore_errors=True) - shutil.rmtree( - self.get_tblspace_path(node, 'somedata1'), - ignore_errors=True) - node.cleanup() - - self.merge_backup(backup_dir, 'node', backup_id) - - self.restore_node( - backup_dir, 'node', node, options=["-j", "4"]) - - pgdata_restored = self.pgdata_content(node.data_dir) - - # this compare should fall because we lost some directories - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.skip("skip") - def test_merge_tablespaces_1(self): - """ - Create tablespace with table, take FULL backup, - create another tablespace with another table, take page backup, - drop first tablespace and take delta backup, - merge it and restore - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, initdb_params=['--data-checksums'], - ) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - self.create_tblspace_in_node(node, 'somedata') - - # FULL backup - self.backup_node(backup_dir, 'node', node) - node.safe_psql( - "postgres", - "create table t_heap tablespace somedata as select i as id," - " md5(i::text) as text, md5(i::text)::tsvector as tsvector" - " from generate_series(0,100) i" - ) - - # CREATE NEW TABLESPACE - self.create_tblspace_in_node(node, 'somedata1') - - node.safe_psql( - "postgres", - "create table t_heap1 tablespace somedata1 as select i as id," - " md5(i::text) as text, md5(i::text)::tsvector as tsvector" - " from generate_series(0,100) i" - ) - - # PAGE backup - self.backup_node(backup_dir, 'node', node, backup_type="page") - - node.safe_psql( - "postgres", - "drop table t_heap" - ) - node.safe_psql( - "postgres", - "drop tablespace somedata" - ) - - # DELTA backup - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type="delta") - - pgdata = self.pgdata_content(node.data_dir) - - node.stop() - shutil.rmtree( - self.get_tblspace_path(node, 'somedata'), - ignore_errors=True) - shutil.rmtree( - self.get_tblspace_path(node, 'somedata1'), - ignore_errors=True) - node.cleanup() - - self.merge_backup(backup_dir, 'node', backup_id) - - self.restore_node( - backup_dir, 'node', node, - options=["-j", "4"]) - - pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - def test_merge_page_truncate(self): - """ - make node, create table, take full backup, - delete last 3 pages, vacuum relation, - take page backup, merge full and page, - restore last page backup and check data correctness - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'checkpoint_timeout': '300s'}) - - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node_restored.cleanup() - node.slow_start() - self.create_tblspace_in_node(node, 'somedata') - - node.safe_psql( - "postgres", - "create sequence t_seq; " - "create table t_heap tablespace somedata as select i as id, " - "md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,1024) i;") - - node.safe_psql( - "postgres", - "vacuum t_heap") - - self.backup_node(backup_dir, 'node', node) - - node.safe_psql( - "postgres", - "delete from t_heap where ctid >= '(11,0)'") - node.safe_psql( - "postgres", - "vacuum t_heap") - - self.backup_node( - backup_dir, 'node', node, backup_type='page') - - if self.paranoia: - pgdata = self.pgdata_content(node.data_dir) - - page_id = self.show_pb(backup_dir, "node")[1]["id"] - self.merge_backup(backup_dir, "node", page_id) - - self.validate_pb(backup_dir) - - old_tablespace = self.get_tblspace_path(node, 'somedata') - new_tablespace = self.get_tblspace_path(node_restored, 'somedata_new') - - self.restore_node( - backup_dir, 'node', node_restored, - options=[ - "-j", "4", - "-T", "{0}={1}".format(old_tablespace, new_tablespace)]) - - # Physical comparison - if self.paranoia: - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - self.set_auto_conf(node_restored, {'port': node_restored.port}) - node_restored.slow_start() - - # Logical comparison - result1 = node.safe_psql( - "postgres", - "select * from t_heap") - - result2 = node_restored.safe_psql( - "postgres", - "select * from t_heap") - - self.assertEqual(result1, result2) - - def test_merge_delta_truncate(self): - """ - make node, create table, take full backup, - delete last 3 pages, vacuum relation, - take page backup, merge full and page, - restore last page backup and check data correctness - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'checkpoint_timeout': '300s'}) - - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node_restored.cleanup() - node.slow_start() - self.create_tblspace_in_node(node, 'somedata') - - node.safe_psql( - "postgres", - "create sequence t_seq; " - "create table t_heap tablespace somedata as select i as id, " - "md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,1024) i;") - - node.safe_psql( - "postgres", - "vacuum t_heap") - - self.backup_node(backup_dir, 'node', node) - - node.safe_psql( - "postgres", - "delete from t_heap where ctid >= '(11,0)'") - node.safe_psql( - "postgres", - "vacuum t_heap") - - self.backup_node( - backup_dir, 'node', node, backup_type='delta') - - if self.paranoia: - pgdata = self.pgdata_content(node.data_dir) - - page_id = self.show_pb(backup_dir, "node")[1]["id"] - self.merge_backup(backup_dir, "node", page_id) - - self.validate_pb(backup_dir) - - old_tablespace = self.get_tblspace_path(node, 'somedata') - new_tablespace = self.get_tblspace_path(node_restored, 'somedata_new') - - self.restore_node( - backup_dir, 'node', node_restored, - options=[ - "-j", "4", - "-T", "{0}={1}".format(old_tablespace, new_tablespace)]) - - # Physical comparison - if self.paranoia: - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - self.set_auto_conf(node_restored, {'port': node_restored.port}) - node_restored.slow_start() - - # Logical comparison - result1 = node.safe_psql( - "postgres", - "select * from t_heap") - - result2 = node_restored.safe_psql( - "postgres", - "select * from t_heap") - - self.assertEqual(result1, result2) - - def test_merge_ptrack_truncate(self): - """ - make node, create table, take full backup, - delete last 3 pages, vacuum relation, - take page backup, merge full and page, - restore last page backup and check data correctness - """ - if not self.ptrack: - self.skipTest('Skipped because ptrack support is disabled') - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums'], - ptrack_enable=True) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") - - self.create_tblspace_in_node(node, 'somedata') - - node.safe_psql( - "postgres", - "create sequence t_seq; " - "create table t_heap tablespace somedata as select i as id, " - "md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,1024) i;") - - node.safe_psql( - "postgres", - "vacuum t_heap") - - self.backup_node(backup_dir, 'node', node) - - node.safe_psql( - "postgres", - "delete from t_heap where ctid >= '(11,0)'") - - node.safe_psql( - "postgres", - "vacuum t_heap") - - page_id = self.backup_node( - backup_dir, 'node', node, backup_type='ptrack') - - if self.paranoia: - pgdata = self.pgdata_content(node.data_dir) - - self.merge_backup(backup_dir, "node", page_id) - - self.validate_pb(backup_dir) - - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - node_restored.cleanup() - - old_tablespace = self.get_tblspace_path(node, 'somedata') - new_tablespace = self.get_tblspace_path(node_restored, 'somedata_new') - - self.restore_node( - backup_dir, 'node', node_restored, - options=[ - "-j", "4", - "-T", "{0}={1}".format(old_tablespace, new_tablespace)]) - - # Physical comparison - if self.paranoia: - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - self.set_auto_conf(node_restored, {'port': node_restored.port}) - node_restored.slow_start() - - # Logical comparison - result1 = node.safe_psql( - "postgres", - "select * from t_heap") - - result2 = node_restored.safe_psql( - "postgres", - "select * from t_heap") - - self.assertEqual(result1, result2) - - # @unittest.skip("skip") - def test_merge_delta_delete(self): - """ - Make node, create tablespace with table, take full backup, - alter tablespace location, take delta backup, merge full and delta, - restore database. - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, initdb_params=['--data-checksums'], - pg_options={ - 'checkpoint_timeout': '30s', - } - ) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - self.create_tblspace_in_node(node, 'somedata') - - # FULL backup - self.backup_node(backup_dir, 'node', node, options=["--stream"]) - - node.safe_psql( - "postgres", - "create table t_heap tablespace somedata as select i as id," - " md5(i::text) as text, md5(i::text)::tsvector as tsvector" - " from generate_series(0,100) i" - ) - - node.safe_psql( - "postgres", - "delete from t_heap" - ) - - node.safe_psql( - "postgres", - "vacuum t_heap" - ) - - # DELTA BACKUP - self.backup_node( - backup_dir, 'node', node, - backup_type='delta', - options=["--stream"] - ) - - if self.paranoia: - pgdata = self.pgdata_content(node.data_dir) - - backup_id = self.show_pb(backup_dir, "node")[1]["id"] - self.merge_backup(backup_dir, "node", backup_id, options=["-j", "4"]) - - # RESTORE - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored') - ) - node_restored.cleanup() - - self.restore_node( - backup_dir, 'node', node_restored, - options=[ - "-j", "4", - "-T", "{0}={1}".format( - self.get_tblspace_path(node, 'somedata'), - self.get_tblspace_path(node_restored, 'somedata') - ) - ] - ) - - # GET RESTORED PGDATA AND COMPARE - if self.paranoia: - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # START RESTORED NODE - self.set_auto_conf(node_restored, {'port': node_restored.port}) - node_restored.slow_start() - - # @unittest.skip("skip") - def test_continue_failed_merge(self): - """ - Check that failed MERGE can be continued - """ - self._check_gdb_flag_or_skip_test() - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join( - self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # FULL backup - self.backup_node(backup_dir, 'node', node) - - node.safe_psql( - "postgres", - "create table t_heap as select i as id," - " md5(i::text) as text, md5(i::text)::tsvector as tsvector" - " from generate_series(0,1000) i" - ) - - # DELTA BACKUP - self.backup_node( - backup_dir, 'node', node, backup_type='delta' - ) - - node.safe_psql( - "postgres", - "delete from t_heap" - ) - - node.safe_psql( - "postgres", - "vacuum t_heap" - ) - - # DELTA BACKUP - self.backup_node( - backup_dir, 'node', node, backup_type='delta' - ) - - if self.paranoia: - pgdata = self.pgdata_content(node.data_dir) - - backup_id = self.show_pb(backup_dir, "node")[2]["id"] - - gdb = self.merge_backup(backup_dir, "node", backup_id, gdb=True) - - gdb.set_breakpoint('backup_non_data_file_internal') - gdb.run_until_break() - - gdb.continue_execution_until_break(5) - - gdb._execute('signal SIGKILL') - gdb._execute('detach') - time.sleep(1) - - print(self.show_pb(backup_dir, as_text=True, as_json=False)) - - # Try to continue failed MERGE - self.merge_backup(backup_dir, "node", backup_id) - - # Drop node and restore it - node.cleanup() - self.restore_node(backup_dir, 'node', node) - - # @unittest.skip("skip") - def test_continue_failed_merge_with_corrupted_delta_backup(self): - """ - Fail merge via gdb, corrupt DELTA backup, try to continue merge - """ - self._check_gdb_flag_or_skip_test() - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # FULL backup - self.backup_node(backup_dir, 'node', node) - - node.safe_psql( - "postgres", - "create table t_heap as select i as id," - " md5(i::text) as text, md5(i::text)::tsvector as tsvector" - " from generate_series(0,1000) i") - - old_path = node.safe_psql( - "postgres", - "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip() - - # DELTA BACKUP - self.backup_node( - backup_dir, 'node', node, backup_type='delta') - - node.safe_psql( - "postgres", - "update t_heap set id = 100500") - - node.safe_psql( - "postgres", - "vacuum full t_heap") - - new_path = node.safe_psql( - "postgres", - "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip() - - # DELTA BACKUP - backup_id_2 = self.backup_node( - backup_dir, 'node', node, backup_type='delta') - - backup_id = self.show_pb(backup_dir, "node")[1]["id"] - - # Failed MERGE - gdb = self.merge_backup(backup_dir, "node", backup_id, gdb=True) - gdb.set_breakpoint('backup_non_data_file_internal') - gdb.run_until_break() - - gdb.continue_execution_until_break(2) - - gdb._execute('signal SIGKILL') - - # CORRUPT incremental backup - # read block from future - # block_size + backup_header = 8200 - file = os.path.join( - backup_dir, 'backups', 'node', - backup_id_2, 'database', new_path) - with open(file, 'rb') as f: - f.seek(8200) - block_1 = f.read(8200) - f.close - - # write block from future - file = os.path.join( - backup_dir, 'backups', 'node', - backup_id, 'database', old_path) - with open(file, 'r+b') as f: - f.seek(8200) - f.write(block_1) - f.close - - # Try to continue failed MERGE - try: - print(self.merge_backup(backup_dir, "node", backup_id)) - self.assertEqual( - 1, 0, - "Expecting Error because of incremental backup corruption.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - "ERROR: Backup {0} has status CORRUPT, merge is aborted".format( - backup_id) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - def test_continue_failed_merge_2(self): - """ - Check that failed MERGE on delete can be continued - """ - self._check_gdb_flag_or_skip_test() - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # FULL backup - self.backup_node(backup_dir, 'node', node) - - node.safe_psql( - "postgres", - "create table t_heap as select i as id," - " md5(i::text) as text, md5(i::text)::tsvector as tsvector" - " from generate_series(0,1000) i") - - # DELTA BACKUP - self.backup_node( - backup_dir, 'node', node, backup_type='delta') - - node.safe_psql( - "postgres", - "delete from t_heap") - - node.safe_psql( - "postgres", - "vacuum t_heap") - - # DELTA BACKUP - self.backup_node( - backup_dir, 'node', node, backup_type='delta') - - if self.paranoia: - pgdata = self.pgdata_content(node.data_dir) - - backup_id = self.show_pb(backup_dir, "node")[2]["id"] - - gdb = self.merge_backup(backup_dir, "node", backup_id, gdb=True) - - gdb.set_breakpoint('pgFileDelete') - - gdb.run_until_break() - - gdb._execute('thread apply all bt') - - gdb.continue_execution_until_break(20) - - gdb._execute('thread apply all bt') - - gdb._execute('signal SIGKILL') - - print(self.show_pb(backup_dir, as_text=True, as_json=False)) - - backup_id_deleted = self.show_pb(backup_dir, "node")[1]["id"] - - # TODO check that full backup has meta info is equal to DELETTING - - # Try to continue failed MERGE - self.merge_backup(backup_dir, "node", backup_id) - - def test_continue_failed_merge_3(self): - """ - Check that failed MERGE cannot be continued if intermediate - backup is missing. - """ - self._check_gdb_flag_or_skip_test() - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # Create test data - node.safe_psql("postgres", "create sequence t_seq") - node.safe_psql( - "postgres", - "create table t_heap as select i as id, nextval('t_seq')" - " as t_seq, md5(i::text) as text, md5(i::text)::tsvector" - " as tsvector from generate_series(0,100000) i" - ) - - # FULL backup - self.backup_node(backup_dir, 'node', node) - - # CREATE FEW PAGE BACKUP - i = 0 - - while i < 2: - - node.safe_psql( - "postgres", - "delete from t_heap" - ) - - node.safe_psql( - "postgres", - "vacuum t_heap" - ) - node.safe_psql( - "postgres", - "insert into t_heap select i as id, nextval('t_seq') as t_seq," - " md5(i::text) as text, md5(i::text)::tsvector as tsvector" - " from generate_series(100,200000) i" - ) - - # PAGE BACKUP - self.backup_node( - backup_dir, 'node', node, backup_type='page' - ) - i = i + 1 - - if self.paranoia: - pgdata = self.pgdata_content(node.data_dir) - - backup_id_merge = self.show_pb(backup_dir, "node")[2]["id"] - backup_id_delete = self.show_pb(backup_dir, "node")[1]["id"] - - print(self.show_pb(backup_dir, as_text=True, as_json=False)) - - gdb = self.merge_backup(backup_dir, "node", backup_id_merge, gdb=True) - - gdb.set_breakpoint('backup_non_data_file_internal') - gdb.run_until_break() - gdb.continue_execution_until_break(2) - - gdb._execute('signal SIGKILL') - - print(self.show_pb(backup_dir, as_text=True, as_json=False)) - # print(os.path.join(backup_dir, "backups", "node", backup_id_delete)) - - # DELETE PAGE1 - shutil.rmtree( - os.path.join(backup_dir, "backups", "node", backup_id_delete)) - - # Try to continue failed MERGE - try: - self.merge_backup(backup_dir, "node", backup_id_merge) - self.assertEqual( - 1, 0, - "Expecting Error because of backup corruption.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - "ERROR: Incremental chain is broken, " - "merge is impossible to finish" in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - def test_merge_different_compression_algo(self): - """ - Check that backups with different compression algorithms can be merged - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # FULL backup - self.backup_node( - backup_dir, 'node', node, options=['--compress-algorithm=zlib']) - - node.safe_psql( - "postgres", - "create table t_heap as select i as id," - " md5(i::text) as text, md5(i::text)::tsvector as tsvector" - " from generate_series(0,1000) i") - - # DELTA BACKUP - self.backup_node( - backup_dir, 'node', node, - backup_type='delta', options=['--compress-algorithm=pglz']) - - node.safe_psql( - "postgres", - "delete from t_heap") - - node.safe_psql( - "postgres", - "vacuum t_heap") - - # DELTA BACKUP - self.backup_node( - backup_dir, 'node', node, backup_type='delta') - - if self.paranoia: - pgdata = self.pgdata_content(node.data_dir) - - backup_id = self.show_pb(backup_dir, "node")[2]["id"] - - self.merge_backup(backup_dir, "node", backup_id) - - def test_merge_different_wal_modes(self): - """ - Check that backups with different wal modes can be merged - correctly - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # FULL stream backup - self.backup_node( - backup_dir, 'node', node, options=['--stream']) - - # DELTA archive backup - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type='delta') - - self.merge_backup(backup_dir, 'node', backup_id=backup_id) - - self.assertEqual( - 'ARCHIVE', self.show_pb(backup_dir, 'node', backup_id)['wal']) - - # DELTA stream backup - backup_id = self.backup_node( - backup_dir, 'node', node, - backup_type='delta', options=['--stream']) - - self.merge_backup(backup_dir, 'node', backup_id=backup_id) - - self.assertEqual( - 'STREAM', self.show_pb(backup_dir, 'node', backup_id)['wal']) - - def test_crash_after_opening_backup_control_1(self): - """ - check that crashing after opening backup.control - for writing will not result in losing backup metadata - """ - self._check_gdb_flag_or_skip_test() - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # FULL stream backup - self.backup_node( - backup_dir, 'node', node, options=['--stream']) - - # DELTA archive backup - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type='delta') - - print(self.show_pb( - backup_dir, 'node', as_json=False, as_text=True)) - - gdb = self.merge_backup(backup_dir, "node", backup_id, gdb=True) - gdb.set_breakpoint('write_backup_filelist') - gdb.run_until_break() - - gdb.set_breakpoint('write_backup') - gdb.continue_execution_until_break() - gdb.set_breakpoint('pgBackupWriteControl') - gdb.continue_execution_until_break() - - gdb._execute('signal SIGKILL') - - print(self.show_pb( - backup_dir, 'node', as_json=False, as_text=True)) - - self.assertEqual( - 'MERGING', self.show_pb(backup_dir, 'node')[0]['status']) - - self.assertEqual( - 'MERGING', self.show_pb(backup_dir, 'node')[1]['status']) - - # @unittest.skip("skip") - def test_crash_after_opening_backup_control_2(self): - """ - check that crashing after opening backup_content.control - for writing will not result in losing metadata about backup files - TODO: rewrite - """ - self._check_gdb_flag_or_skip_test() - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # Add data - node.pgbench_init(scale=3) - - # FULL backup - full_id = self.backup_node( - backup_dir, 'node', node, options=['--stream']) - - # Change data - pgbench = node.pgbench(options=['-T', '20', '-c', '2']) - pgbench.wait() - - path = node.safe_psql( - 'postgres', - "select pg_relation_filepath('pgbench_accounts')").decode('utf-8').rstrip() - - fsm_path = path + '_fsm' - - node.safe_psql( - 'postgres', - 'vacuum pgbench_accounts') - - # DELTA backup - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type='delta') - - pgdata = self.pgdata_content(node.data_dir) - - print(self.show_pb( - backup_dir, 'node', as_json=False, as_text=True)) - - gdb = self.merge_backup(backup_dir, "node", backup_id, gdb=True) - gdb.set_breakpoint('write_backup_filelist') - gdb.run_until_break() - -# gdb.set_breakpoint('sprintf') -# gdb.continue_execution_until_break(1) - - gdb._execute('signal SIGKILL') - - print(self.show_pb( - backup_dir, 'node', as_json=False, as_text=True)) - - self.assertEqual( - 'MERGING', self.show_pb(backup_dir, 'node')[0]['status']) - - self.assertEqual( - 'MERGING', self.show_pb(backup_dir, 'node')[1]['status']) - - # In to_backup drop file that comes from from_backup - # emulate crash during previous merge - file_to_remove = os.path.join( - backup_dir, 'backups', - 'node', full_id, 'database', fsm_path) - - # print(file_to_remove) - - os.remove(file_to_remove) - - # Continue failed merge - self.merge_backup(backup_dir, "node", backup_id) - - node.cleanup() - - # restore merge backup - self.restore_node(backup_dir, 'node', node) - - pgdata_restored = self.pgdata_content(node.data_dir) - - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.skip("skip") - def test_losing_file_after_failed_merge(self): - """ - check that crashing after opening backup_content.control - for writing will not result in losing metadata about backup files - TODO: rewrite - """ - self._check_gdb_flag_or_skip_test() - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # Add data - node.pgbench_init(scale=1) - - # FULL backup - full_id = self.backup_node( - backup_dir, 'node', node, options=['--stream']) - - # Change data - node.safe_psql( - 'postgres', - "update pgbench_accounts set aid = aid + 1005000") - - path = node.safe_psql( - 'postgres', - "select pg_relation_filepath('pgbench_accounts')").decode('utf-8').rstrip() - - node.safe_psql( - 'postgres', - "VACUUM pgbench_accounts") - - vm_path = path + '_vm' - - # DELTA backup - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type='delta') - - pgdata = self.pgdata_content(node.data_dir) - - print(self.show_pb( - backup_dir, 'node', as_json=False, as_text=True)) - - gdb = self.merge_backup(backup_dir, "node", backup_id, gdb=True) - gdb.set_breakpoint('write_backup_filelist') - gdb.run_until_break() - -# gdb.set_breakpoint('sprintf') -# gdb.continue_execution_until_break(20) - - gdb._execute('signal SIGKILL') - - print(self.show_pb( - backup_dir, 'node', as_json=False, as_text=True)) - - self.assertEqual( - 'MERGING', self.show_pb(backup_dir, 'node')[0]['status']) - - self.assertEqual( - 'MERGING', self.show_pb(backup_dir, 'node')[1]['status']) - - # In to_backup drop file that comes from from_backup - # emulate crash during previous merge - file_to_remove = os.path.join( - backup_dir, 'backups', - 'node', full_id, 'database', vm_path) - - os.remove(file_to_remove) - - # Try to continue failed MERGE - self.merge_backup(backup_dir, "node", backup_id) - - self.assertEqual( - 'OK', self.show_pb(backup_dir, 'node')[0]['status']) - - node.cleanup() - - self.restore_node(backup_dir, 'node', node) - - pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - def test_failed_merge_after_delete(self): - """ - """ - self._check_gdb_flag_or_skip_test() - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # add database - node.safe_psql( - 'postgres', - 'CREATE DATABASE testdb') - - dboid = node.safe_psql( - "postgres", - "select oid from pg_database where datname = 'testdb'").decode('utf-8').rstrip() - - # take FULL backup - full_id = self.backup_node( - backup_dir, 'node', node, options=['--stream']) - - # drop database - node.safe_psql( - 'postgres', - 'DROP DATABASE testdb') - - # take PAGE backup - page_id = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - page_id_2 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - gdb = self.merge_backup( - backup_dir, 'node', page_id, - gdb=True, options=['--log-level-console=verbose']) - - gdb.set_breakpoint('delete_backup_files') - gdb.run_until_break() - - gdb.set_breakpoint('pgFileDelete') - gdb.continue_execution_until_break(20) - - gdb._execute('signal SIGKILL') - - # backup half-merged - self.assertEqual( - 'MERGED', self.show_pb(backup_dir, 'node')[0]['status']) - - self.assertEqual( - full_id, self.show_pb(backup_dir, 'node')[0]['id']) - - db_path = os.path.join( - backup_dir, 'backups', 'node', - full_id, 'database', 'base', dboid) - - try: - self.merge_backup( - backup_dir, 'node', page_id_2, - options=['--log-level-console=verbose']) - self.assertEqual( - 1, 0, - "Expecting Error because of missing parent.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - "ERROR: Full backup {0} has unfinished merge with backup {1}".format( - full_id, page_id) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - def test_failed_merge_after_delete_1(self): - """ - """ - self._check_gdb_flag_or_skip_test() - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # take FULL backup - full_id = self.backup_node( - backup_dir, 'node', node, options=['--stream']) - - node.pgbench_init(scale=1) - - page_1 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # Change PAGE1 backup status to ERROR - self.change_backup_status(backup_dir, 'node', page_1, 'ERROR') - - pgdata = self.pgdata_content(node.data_dir) - - # add data - pgbench = node.pgbench(options=['-T', '10', '-c', '2', '--no-vacuum']) - pgbench.wait() - - # take PAGE2 backup - page_id = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # Change PAGE1 backup status to OK - self.change_backup_status(backup_dir, 'node', page_1, 'OK') - - gdb = self.merge_backup( - backup_dir, 'node', page_id, - gdb=True, options=['--log-level-console=verbose']) - - gdb.set_breakpoint('delete_backup_files') - gdb.run_until_break() - -# gdb.set_breakpoint('parray_bsearch') -# gdb.continue_execution_until_break() - - gdb.set_breakpoint('pgFileDelete') - gdb.continue_execution_until_break(30) - gdb._execute('signal SIGKILL') - - self.assertEqual( - full_id, self.show_pb(backup_dir, 'node')[0]['id']) - - # restore - node.cleanup() - try: - #self.restore_node(backup_dir, 'node', node, backup_id=page_1) - self.restore_node(backup_dir, 'node', node) - self.assertEqual( - 1, 0, - "Expecting Error because of orphan status.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: Backup {0} is orphan".format(page_1), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - def test_failed_merge_after_delete_2(self): - """ - """ - self._check_gdb_flag_or_skip_test() - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # take FULL backup - full_id = self.backup_node( - backup_dir, 'node', node, options=['--stream']) - - node.pgbench_init(scale=1) - - page_1 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # add data - pgbench = node.pgbench(options=['-T', '10', '-c', '2', '--no-vacuum']) - pgbench.wait() - - # take PAGE2 backup - page_2 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - gdb = self.merge_backup( - backup_dir, 'node', page_2, gdb=True, - options=['--log-level-console=VERBOSE']) - - gdb.set_breakpoint('pgFileDelete') - gdb.run_until_break() - gdb.continue_execution_until_break(2) - gdb._execute('signal SIGKILL') - - self.delete_pb(backup_dir, 'node', backup_id=page_2) - - # rerun merge - try: - #self.restore_node(backup_dir, 'node', node, backup_id=page_1) - self.merge_backup(backup_dir, 'node', page_1) - self.assertEqual( - 1, 0, - "Expecting Error because of backup is missing.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: Full backup {0} has unfinished merge " - "with backup {1}".format(full_id, page_2), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - def test_failed_merge_after_delete_3(self): - """ - """ - self._check_gdb_flag_or_skip_test() - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # add database - node.safe_psql( - 'postgres', - 'CREATE DATABASE testdb') - - dboid = node.safe_psql( - "postgres", - "select oid from pg_database where datname = 'testdb'").rstrip() - - # take FULL backup - full_id = self.backup_node( - backup_dir, 'node', node, options=['--stream']) - - # drop database - node.safe_psql( - 'postgres', - 'DROP DATABASE testdb') - - # take PAGE backup - page_id = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # create database - node.safe_psql( - 'postgres', - 'create DATABASE testdb') - - page_id_2 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - gdb = self.merge_backup( - backup_dir, 'node', page_id, - gdb=True, options=['--log-level-console=verbose']) - - gdb.set_breakpoint('delete_backup_files') - gdb.run_until_break() - - gdb.set_breakpoint('pgFileDelete') - gdb.continue_execution_until_break(20) - - gdb._execute('signal SIGKILL') - - # backup half-merged - self.assertEqual( - 'MERGED', self.show_pb(backup_dir, 'node')[0]['status']) - - self.assertEqual( - full_id, self.show_pb(backup_dir, 'node')[0]['id']) - - db_path = os.path.join( - backup_dir, 'backups', 'node', full_id) - - # FULL backup is missing now - shutil.rmtree(db_path) - - try: - self.merge_backup( - backup_dir, 'node', page_id_2, - options=['--log-level-console=verbose']) - self.assertEqual( - 1, 0, - "Expecting Error because of missing parent.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - "ERROR: Failed to find parent full backup for {0}".format( - page_id_2) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - # Skipped, because backups from the future are invalid. - # This cause a "ERROR: Can't assign backup_id, there is already a backup in future" - # now (PBCKP-259). We can conduct such a test again when we - # untie 'backup_id' from 'start_time' - @unittest.skip("skip") - def test_merge_backup_from_future(self): - """ - take FULL backup, table PAGE backup from future, - try to merge page with FULL - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # Take FULL - self.backup_node(backup_dir, 'node', node) - - node.pgbench_init(scale=5) - - # Take PAGE from future - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - with open( - os.path.join( - backup_dir, 'backups', 'node', - backup_id, "backup.control"), "a") as conf: - conf.write("start-time='{:%Y-%m-%d %H:%M:%S}'\n".format( - datetime.now() + timedelta(days=3))) - - # rename directory - new_id = self.show_pb(backup_dir, 'node')[1]['id'] - - os.rename( - os.path.join(backup_dir, 'backups', 'node', backup_id), - os.path.join(backup_dir, 'backups', 'node', new_id)) - - pgbench = node.pgbench(options=['-T', '5', '-c', '1', '--no-vacuum']) - pgbench.wait() - - backup_id = self.backup_node(backup_dir, 'node', node, backup_type='page') - pgdata = self.pgdata_content(node.data_dir) - - result = node.safe_psql( - 'postgres', - 'SELECT * from pgbench_accounts') - - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - node_restored.cleanup() - - self.restore_node( - backup_dir, 'node', - node_restored, backup_id=backup_id) - - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # check that merged backup has the same state as - node_restored.cleanup() - self.merge_backup(backup_dir, 'node', backup_id=backup_id) - self.restore_node( - backup_dir, 'node', - node_restored, backup_id=backup_id) - pgdata_restored = self.pgdata_content(node_restored.data_dir) - - self.set_auto_conf( - node_restored, - {'port': node_restored.port}) - node_restored.slow_start() - - result_new = node_restored.safe_psql( - 'postgres', - 'SELECT * from pgbench_accounts') - - self.assertTrue(result, result_new) - - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.skip("skip") - def test_merge_multiple_descendants(self): - """ - PAGEb3 - | PAGEa3 - PAGEb2 / - | PAGEa2 / - PAGEb1 \ / - | PAGEa1 - FULLb | - FULLa - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # Take FULL BACKUPs - backup_id_a = self.backup_node(backup_dir, 'node', node) - - backup_id_b = self.backup_node(backup_dir, 'node', node) - - # Change FULLb backup status to ERROR - self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR') - - page_id_a1 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # Change FULLb backup status to OK - self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK') - - # Change PAGEa1 backup status to ERROR - self.change_backup_status(backup_dir, 'node', page_id_a1, 'ERROR') - - # PAGEa1 ERROR - # FULLb OK - # FULLa OK - - page_id_b1 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # PAGEb1 OK - # PAGEa1 ERROR - # FULLb OK - # FULLa OK - - # Change PAGEa1 to OK - self.change_backup_status(backup_dir, 'node', page_id_a1, 'OK') - - # Change PAGEb1 and FULLb to ERROR - self.change_backup_status(backup_dir, 'node', page_id_b1, 'ERROR') - self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR') - - # PAGEb1 ERROR - # PAGEa1 OK - # FULLb ERROR - # FULLa OK - - page_id_a2 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # PAGEa2 OK - # PAGEb1 ERROR - # PAGEa1 OK - # FULLb ERROR - # FULLa OK - - # Change PAGEb1 and FULLb to OK - self.change_backup_status(backup_dir, 'node', page_id_b1, 'OK') - self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK') - - # Change PAGEa2 and FULL to ERROR - self.change_backup_status(backup_dir, 'node', page_id_a2, 'ERROR') - self.change_backup_status(backup_dir, 'node', backup_id_a, 'ERROR') - - # PAGEa2 ERROR - # PAGEb1 OK - # PAGEa1 OK - # FULLb OK - # FULLa ERROR - - page_id_b2 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # PAGEb2 OK - # PAGEa2 ERROR - # PAGEb1 OK - # PAGEa1 OK - # FULLb OK - # FULLa ERROR - - # Change PAGEb2, PAGEb1 and FULLb to ERROR - self.change_backup_status(backup_dir, 'node', page_id_b2, 'ERROR') - self.change_backup_status(backup_dir, 'node', page_id_b1, 'ERROR') - self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR') - - # Change FULLa to OK - self.change_backup_status(backup_dir, 'node', backup_id_a, 'OK') - - # PAGEb2 ERROR - # PAGEa2 ERROR - # PAGEb1 ERROR - # PAGEa1 OK - # FULLb ERROR - # FULLa OK - - page_id_a3 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # PAGEa3 OK - # PAGEb2 ERROR - # PAGEa2 ERROR - # PAGEb1 ERROR - # PAGEa1 OK - # FULLb ERROR - # FULLa OK - - # Change PAGEa3 and FULLa to ERROR - self.change_backup_status(backup_dir, 'node', page_id_a3, 'ERROR') - - # Change PAGEb2, PAGEb1 and FULLb to OK - self.change_backup_status(backup_dir, 'node', page_id_b2, 'OK') - self.change_backup_status(backup_dir, 'node', page_id_b1, 'OK') - self.change_backup_status(backup_dir, 'node', backup_id_a, 'OK') - - page_id_b3 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # PAGEb3 OK - # PAGEa3 ERROR - # PAGEb2 OK - # PAGEa2 ERROR - # PAGEb1 OK - # PAGEa1 OK - # FULLb OK - # FULLa ERROR - - # Change PAGEa3, PAGEa2 and FULLa status to OK - self.change_backup_status(backup_dir, 'node', page_id_a3, 'OK') - self.change_backup_status(backup_dir, 'node', page_id_a2, 'OK') - self.change_backup_status(backup_dir, 'node', backup_id_a, 'OK') - - # PAGEb3 OK - # PAGEa3 OK - # PAGEb2 OK - # PAGEa2 OK - # PAGEb1 OK - # PAGEa1 OK - # FULLb OK - # FULLa OK - - # Check that page_id_a3 and page_id_a2 are both direct descendants of page_id_a1 - self.assertEqual( - self.show_pb(backup_dir, 'node', backup_id=page_id_a3)['parent-backup-id'], - page_id_a1) - - self.assertEqual( - self.show_pb(backup_dir, 'node', backup_id=page_id_a2)['parent-backup-id'], - page_id_a1) - - self.merge_backup( - backup_dir, 'node', page_id_a2, - options=['--merge-expired', '--log-level-console=log']) - - try: - self.merge_backup( - backup_dir, 'node', page_id_a3, - options=['--merge-expired', '--log-level-console=log']) - self.assertEqual( - 1, 0, - "Expecting Error because of parent FULL backup is missing.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - "ERROR: Failed to find parent full backup for {0}".format( - page_id_a3) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - # @unittest.skip("skip") - def test_smart_merge(self): - """ - make node, create database, take full backup, drop database, - take PAGE backup and merge it into FULL, - make sure that files from dropped database are not - copied during restore - https://github.com/postgrespro/pg_probackup/issues/63 - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # create database - node.safe_psql( - "postgres", - "CREATE DATABASE testdb") - - # take FULL backup - full_id = self.backup_node(backup_dir, 'node', node) - - # drop database - node.safe_psql( - "postgres", - "DROP DATABASE testdb") - - # take PAGE backup - page_id = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # get delta between FULL and PAGE filelists - filelist_full = self.get_backup_filelist( - backup_dir, 'node', full_id) - - filelist_page = self.get_backup_filelist( - backup_dir, 'node', page_id) - - filelist_diff = self.get_backup_filelist_diff( - filelist_full, filelist_page) - - # merge PAGE backup - self.merge_backup( - backup_dir, 'node', page_id, - options=['--log-level-file=VERBOSE']) - - logfile = os.path.join(backup_dir, 'log', 'pg_probackup.log') - with open(logfile, 'r') as f: - logfile_content = f.read() - - def test_idempotent_merge(self): - """ - """ - self._check_gdb_flag_or_skip_test() - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # add database - node.safe_psql( - 'postgres', - 'CREATE DATABASE testdb') - - # take FULL backup - full_id = self.backup_node( - backup_dir, 'node', node, options=['--stream']) - - # create database - node.safe_psql( - 'postgres', - 'create DATABASE testdb1') - - # take PAGE backup - page_id = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # create database - node.safe_psql( - 'postgres', - 'create DATABASE testdb2') - - page_id_2 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - gdb = self.merge_backup( - backup_dir, 'node', page_id_2, - gdb=True, options=['--log-level-console=verbose']) - - gdb.set_breakpoint('delete_backup_files') - gdb.run_until_break() - gdb.remove_all_breakpoints() - - gdb.set_breakpoint('rename') - gdb.continue_execution_until_break() - gdb.continue_execution_until_break(2) - - gdb._execute('signal SIGKILL') - - show_backups = self.show_pb(backup_dir, "node") - self.assertEqual(len(show_backups), 1) - - self.assertEqual( - 'MERGED', self.show_pb(backup_dir, 'node')[0]['status']) - - self.assertEqual( - full_id, self.show_pb(backup_dir, 'node')[0]['id']) - - self.merge_backup(backup_dir, 'node', page_id_2) - - self.assertEqual( - 'OK', self.show_pb(backup_dir, 'node')[0]['status']) - - self.assertEqual( - page_id_2, self.show_pb(backup_dir, 'node')[0]['id']) - - def test_merge_correct_inheritance(self): - """ - Make sure that backup metainformation fields - 'note' and 'expire-time' are correctly inherited - during merge - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # add database - node.safe_psql( - 'postgres', - 'CREATE DATABASE testdb') - - # take FULL backup - self.backup_node(backup_dir, 'node', node, options=['--stream']) - - # create database - node.safe_psql( - 'postgres', - 'create DATABASE testdb1') - - # take PAGE backup - page_id = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - self.set_backup( - backup_dir, 'node', page_id, options=['--note=hello', '--ttl=20d']) - - page_meta = self.show_pb(backup_dir, 'node', page_id) - - self.merge_backup(backup_dir, 'node', page_id) - - print(self.show_pb(backup_dir, 'node', page_id)) - - self.assertEqual( - page_meta['note'], - self.show_pb(backup_dir, 'node', page_id)['note']) - - self.assertEqual( - page_meta['expire-time'], - self.show_pb(backup_dir, 'node', page_id)['expire-time']) - - def test_merge_correct_inheritance_1(self): - """ - Make sure that backup metainformation fields - 'note' and 'expire-time' are correctly inherited - during merge - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # add database - node.safe_psql( - 'postgres', - 'CREATE DATABASE testdb') - - # take FULL backup - self.backup_node( - backup_dir, 'node', node, - options=['--stream', '--note=hello', '--ttl=20d']) - - # create database - node.safe_psql( - 'postgres', - 'create DATABASE testdb1') - - # take PAGE backup - page_id = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - self.merge_backup(backup_dir, 'node', page_id) - - self.assertNotIn( - 'note', - self.show_pb(backup_dir, 'node', page_id)) - - self.assertNotIn( - 'expire-time', - self.show_pb(backup_dir, 'node', page_id)) - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_multi_timeline_merge(self): - """ - Check that backup in PAGE mode choose - parent backup correctly: - t12 /---P--> - ... - t3 /----> - t2 /----> - t1 -F-----D-> - - P must have F as parent - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql("postgres", "create extension pageinspect") - - try: - node.safe_psql( - "postgres", - "create extension amcheck") - except QueryException as e: - node.safe_psql( - "postgres", - "create extension amcheck_next") - - node.pgbench_init(scale=20) - full_id = self.backup_node(backup_dir, 'node', node) - - pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) - pgbench.wait() - - self.backup_node(backup_dir, 'node', node, backup_type='delta') - - node.cleanup() - self.restore_node( - backup_dir, 'node', node, backup_id=full_id, - options=[ - '--recovery-target=immediate', - '--recovery-target-action=promote']) - - node.slow_start() - - pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) - pgbench.wait() - - # create timelines - for i in range(2, 7): - node.cleanup() - self.restore_node( - backup_dir, 'node', node, - options=[ - '--recovery-target=latest', - '--recovery-target-action=promote', - '--recovery-target-timeline={0}'.format(i)]) - node.slow_start() - - # at this point there is i+1 timeline - pgbench = node.pgbench(options=['-T', '20', '-c', '1', '--no-vacuum']) - pgbench.wait() - - # create backup at 2, 4 and 6 timeline - if i % 2 == 0: - self.backup_node(backup_dir, 'node', node, backup_type='page') - - page_id = self.backup_node(backup_dir, 'node', node, backup_type='page') - pgdata = self.pgdata_content(node.data_dir) - - self.merge_backup(backup_dir, 'node', page_id) - - result = node.safe_psql( - "postgres", "select * from pgbench_accounts") - - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - node_restored.cleanup() - - self.restore_node(backup_dir, 'node', node_restored) - pgdata_restored = self.pgdata_content(node_restored.data_dir) - - self.set_auto_conf(node_restored, {'port': node_restored.port}) - node_restored.slow_start() - - result_new = node_restored.safe_psql( - "postgres", "select * from pgbench_accounts") - - self.assertEqual(result, result_new) - - self.compare_pgdata(pgdata, pgdata_restored) - - self.checkdb_node( - backup_dir, - 'node', - options=[ - '--amcheck', - '-d', 'postgres', '-p', str(node.port)]) - - self.checkdb_node( - backup_dir, - 'node', - options=[ - '--amcheck', - '-d', 'postgres', '-p', str(node_restored.port)]) - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_merge_page_header_map_retry(self): - """ - page header map cannot be trusted when - running retry - """ - self._check_gdb_flag_or_skip_test() - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - node.pgbench_init(scale=20) - self.backup_node(backup_dir, 'node', node, options=['--stream']) - - pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) - pgbench.wait() - - delta_id = self.backup_node( - backup_dir, 'node', node, - backup_type='delta', options=['--stream']) - - pgdata = self.pgdata_content(node.data_dir) - - gdb = self.merge_backup(backup_dir, 'node', delta_id, gdb=True) - - # our goal here is to get full backup with merged data files, - # but with old page header map - gdb.set_breakpoint('cleanup_header_map') - gdb.run_until_break() - gdb._execute('signal SIGKILL') - - self.merge_backup(backup_dir, 'node', delta_id) - - node.cleanup() - - self.restore_node(backup_dir, 'node', node) - pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.skip("skip") - def test_missing_data_file(self): - """ - """ - self._check_gdb_flag_or_skip_test() - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # Add data - node.pgbench_init(scale=1) - - # FULL backup - self.backup_node(backup_dir, 'node', node) - - # Change data - pgbench = node.pgbench(options=['-T', '5', '-c', '1']) - pgbench.wait() - - # DELTA backup - delta_id = self.backup_node(backup_dir, 'node', node, backup_type='delta') - - path = node.safe_psql( - 'postgres', - "select pg_relation_filepath('pgbench_accounts')").decode('utf-8').rstrip() - - gdb = self.merge_backup( - backup_dir, "node", delta_id, - options=['--log-level-file=VERBOSE'], gdb=True) - gdb.set_breakpoint('merge_files') - gdb.run_until_break() - - # remove data file in incremental backup - file_to_remove = os.path.join( - backup_dir, 'backups', - 'node', delta_id, 'database', path) - - os.remove(file_to_remove) - - gdb.continue_execution_until_error() - - logfile = os.path.join(backup_dir, 'log', 'pg_probackup.log') - with open(logfile, 'r') as f: - logfile_content = f.read() - - self.assertIn( - 'ERROR: Cannot open backup file "{0}": No such file or directory'.format(file_to_remove), - logfile_content) - - # @unittest.skip("skip") - def test_missing_non_data_file(self): - """ - """ - self._check_gdb_flag_or_skip_test() - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # FULL backup - self.backup_node(backup_dir, 'node', node) - - # DELTA backup - delta_id = self.backup_node(backup_dir, 'node', node, backup_type='delta') - - gdb = self.merge_backup( - backup_dir, "node", delta_id, - options=['--log-level-file=VERBOSE'], gdb=True) - gdb.set_breakpoint('merge_files') - gdb.run_until_break() - - # remove data file in incremental backup - file_to_remove = os.path.join( - backup_dir, 'backups', - 'node', delta_id, 'database', 'backup_label') - - os.remove(file_to_remove) - - gdb.continue_execution_until_error() - - logfile = os.path.join(backup_dir, 'log', 'pg_probackup.log') - with open(logfile, 'r') as f: - logfile_content = f.read() - - self.assertIn( - 'ERROR: File "{0}" is not found'.format(file_to_remove), - logfile_content) - - self.assertIn( - 'ERROR: Backup files merging failed', - logfile_content) - - self.assertEqual( - 'MERGING', self.show_pb(backup_dir, 'node')[0]['status']) - - self.assertEqual( - 'MERGING', self.show_pb(backup_dir, 'node')[1]['status']) - - # @unittest.skip("skip") - def test_merge_remote_mode(self): - """ - """ - self._check_gdb_flag_or_skip_test() - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # FULL backup - full_id = self.backup_node(backup_dir, 'node', node) - - # DELTA backup - delta_id = self.backup_node(backup_dir, 'node', node, backup_type='delta') - - self.set_config(backup_dir, 'node', options=['--retention-window=1']) - - backups = os.path.join(backup_dir, 'backups', 'node') - with open( - os.path.join( - backups, full_id, "backup.control"), "a") as conf: - conf.write("recovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format( - datetime.now() - timedelta(days=5))) - - gdb = self.backup_node( - backup_dir, "node", node, - options=['--log-level-file=VERBOSE', '--merge-expired'], gdb=True) - gdb.set_breakpoint('merge_files') - gdb.run_until_break() - - logfile = os.path.join(backup_dir, 'log', 'pg_probackup.log') - - with open(logfile, "w+") as f: - f.truncate() - - gdb.continue_execution_until_exit() - - logfile = os.path.join(backup_dir, 'log', 'pg_probackup.log') - with open(logfile, 'r') as f: - logfile_content = f.read() - - self.assertNotIn( - 'SSH', logfile_content) - - self.assertEqual( - 'OK', self.show_pb(backup_dir, 'node')[0]['status']) - - def test_merge_pg_filenode_map(self): - """ - https://github.com/postgrespro/pg_probackup/issues/320 - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node1 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node1'), - initdb_params=['--data-checksums']) - node1.cleanup() - - node.pgbench_init(scale=5) - - # FULL backup - self.backup_node(backup_dir, 'node', node) - - pgbench = node.pgbench( - stdout=subprocess.PIPE, stderr=subprocess.STDOUT, - options=['-T', '10', '-c', '1']) - - self.backup_node(backup_dir, 'node', node, backup_type='delta') - - node.safe_psql( - 'postgres', - 'reindex index pg_type_oid_index') - - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type='delta') - - self.merge_backup(backup_dir, 'node', backup_id) - - node.cleanup() - - self.restore_node(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - 'postgres', - 'select 1') - -# 1. Need new test with corrupted FULL backup -# 2. different compression levels diff --git a/tests/option_test.py b/tests/option_test.py deleted file mode 100644 index eec1bab44..000000000 --- a/tests/option_test.py +++ /dev/null @@ -1,231 +0,0 @@ -import unittest -import os -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException -import locale - - -class OptionTest(ProbackupTest, unittest.TestCase): - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_help_1(self): - """help options""" - with open(os.path.join(self.dir_path, "expected/option_help.out"), "rb") as help_out: - self.assertEqual( - self.run_pb(["--help"]), - help_out.read().decode("utf-8") - ) - - # @unittest.skip("skip") - def test_version_2(self): - """help options""" - with open(os.path.join(self.dir_path, "expected/option_version.out"), "rb") as version_out: - self.assertIn( - version_out.read().decode("utf-8").strip(), - self.run_pb(["--version"]) - ) - - # @unittest.skip("skip") - def test_without_backup_path_3(self): - """backup command failure without backup mode option""" - try: - self.run_pb(["backup", "-b", "full"]) - self.assertEqual(1, 0, "Expecting Error because '-B' parameter is not specified.\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: required parameter not specified: BACKUP_PATH (-B, --backup-path)', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) - - # @unittest.skip("skip") - def test_options_4(self): - """check options test""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node')) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - - # backup command failure without instance option - try: - self.run_pb(["backup", "-B", backup_dir, "-D", node.data_dir, "-b", "full"]) - self.assertEqual(1, 0, "Expecting Error because 'instance' parameter is not specified.\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: required parameter not specified: --instance', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) - - # backup command failure without backup mode option - try: - self.run_pb(["backup", "-B", backup_dir, "--instance=node", "-D", node.data_dir]) - self.assertEqual(1, 0, "Expecting Error because '-b' parameter is not specified.\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: required parameter not specified: BACKUP_MODE (-b, --backup-mode)', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) - - # backup command failure with invalid backup mode option - try: - self.run_pb(["backup", "-B", backup_dir, "--instance=node", "-b", "bad"]) - self.assertEqual(1, 0, "Expecting Error because backup-mode parameter is invalid.\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: invalid backup-mode "bad"', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) - - # delete failure without delete options - try: - self.run_pb(["delete", "-B", backup_dir, "--instance=node"]) - # we should die here because exception is what we expect to happen - self.assertEqual(1, 0, "Expecting Error because delete options are omitted.\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: You must specify at least one of the delete options: ' - '--delete-expired |--delete-wal |--merge-expired |--status |(-i, --backup-id)', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) - - - # delete failure without ID - try: - self.run_pb(["delete", "-B", backup_dir, "--instance=node", '-i']) - # we should die here because exception is what we expect to happen - self.assertEqual(1, 0, "Expecting Error because backup ID is omitted.\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "option requires an argument -- 'i'", - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) - - # @unittest.skip("skip") - def test_options_5(self): - """check options test""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node')) - - output = self.init_pb(backup_dir) - self.assertIn( - "INFO: Backup catalog", - output) - - self.assertIn( - "successfully inited", - output) - self.add_instance(backup_dir, 'node', node) - - node.slow_start() - - # syntax error in pg_probackup.conf - conf_file = os.path.join(backup_dir, "backups", "node", "pg_probackup.conf") - with open(conf_file, "a") as conf: - conf.write(" = INFINITE\n") - try: - self.backup_node(backup_dir, 'node', node) - # we should die here because exception is what we expect to happen - self.assertEqual(1, 0, "Expecting Error because of garbage in pg_probackup.conf.\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Syntax error in " = INFINITE', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) - - self.clean_pb(backup_dir) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - - # invalid value in pg_probackup.conf - with open(conf_file, "a") as conf: - conf.write("BACKUP_MODE=\n") - - try: - self.backup_node(backup_dir, 'node', node, backup_type=None), - # we should die here because exception is what we expect to happen - self.assertEqual(1, 0, "Expecting Error because of invalid backup-mode in pg_probackup.conf.\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Invalid option "BACKUP_MODE" in file', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) - - self.clean_pb(backup_dir) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - - # Command line parameters should override file values - with open(conf_file, "a") as conf: - conf.write("retention-redundancy=1\n") - - self.assertEqual(self.show_config(backup_dir, 'node')['retention-redundancy'], '1') - - # User cannot send --system-identifier parameter via command line - try: - self.backup_node(backup_dir, 'node', node, options=["--system-identifier", "123"]), - # we should die here because exception is what we expect to happen - self.assertEqual(1, 0, "Expecting Error because option system-identifier cannot be specified in command line.\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Option system-identifier cannot be specified in command line', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) - - # invalid value in pg_probackup.conf - with open(conf_file, "a") as conf: - conf.write("SMOOTH_CHECKPOINT=FOO\n") - - try: - self.backup_node(backup_dir, 'node', node) - # we should die here because exception is what we expect to happen - self.assertEqual(1, 0, "Expecting Error because option -C should be boolean.\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Invalid option "SMOOTH_CHECKPOINT" in file', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) - - self.clean_pb(backup_dir) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - - # invalid option in pg_probackup.conf - with open(conf_file, "a") as conf: - conf.write("TIMELINEID=1\n") - - try: - self.backup_node(backup_dir, 'node', node) - # we should die here because exception is what we expect to happen - self.assertEqual(1, 0, 'Expecting Error because of invalid option "TIMELINEID".\n Output: {0} \n CMD: {1}'.format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Invalid option "TIMELINEID" in file', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) - - # @unittest.skip("skip") - def test_help_6(self): - """help options""" - if ProbackupTest.enable_nls: - self.test_env['LC_ALL'] = 'ru_RU.utf-8' - with open(os.path.join(self.dir_path, "expected/option_help_ru.out"), "rb") as help_out: - self.assertEqual( - self.run_pb(["--help"]), - help_out.read().decode("utf-8") - ) - else: - self.skipTest( - 'You need configure PostgreSQL with --enabled-nls option for this test') diff --git a/tests/page_test.py b/tests/page_test.py deleted file mode 100644 index e77e5c827..000000000 --- a/tests/page_test.py +++ /dev/null @@ -1,1424 +0,0 @@ -import os -import unittest -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException -from testgres import QueryException -from datetime import datetime, timedelta -import subprocess -import gzip -import shutil - -class PageTest(ProbackupTest, unittest.TestCase): - - # @unittest.skip("skip") - def test_basic_page_vacuum_truncate(self): - """ - make node, create table, take full backup, - delete last 3 pages, vacuum relation, - take page backup, take second page backup, - restore last page backup and check data correctness - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'checkpoint_timeout': '300s'}) - - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node_restored.cleanup() - node.slow_start() - self.create_tblspace_in_node(node, 'somedata') - - node.safe_psql( - "postgres", - "create sequence t_seq; " - "create table t_heap tablespace somedata as select i as id, " - "md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,1024) i;") - - node.safe_psql( - "postgres", - "vacuum t_heap") - - self.backup_node(backup_dir, 'node', node) - - # TODO: make it dynamic - node.safe_psql( - "postgres", - "delete from t_heap where ctid >= '(11,0)'") - node.safe_psql( - "postgres", - "vacuum t_heap") - - self.backup_node( - backup_dir, 'node', node, backup_type='page') - - self.backup_node( - backup_dir, 'node', node, backup_type='page') - - if self.paranoia: - pgdata = self.pgdata_content(node.data_dir) - - old_tablespace = self.get_tblspace_path(node, 'somedata') - new_tablespace = self.get_tblspace_path(node_restored, 'somedata_new') - - self.restore_node( - backup_dir, 'node', node_restored, - options=[ - "-j", "4", - "-T", "{0}={1}".format(old_tablespace, new_tablespace)]) - - # Physical comparison - if self.paranoia: - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - self.set_auto_conf(node_restored, {'port': node_restored.port}) - node_restored.slow_start() - - # Logical comparison - result1 = node.safe_psql( - "postgres", - "select * from t_heap") - - result2 = node_restored.safe_psql( - "postgres", - "select * from t_heap") - - self.assertEqual(result1, result2) - - # @unittest.skip("skip") - def test_page_vacuum_truncate_1(self): - """ - make node, create table, take full backup, - delete all data, vacuum relation, - take page backup, insert some data, - take second page backup and check data correctness - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - "postgres", - "create sequence t_seq; " - "create table t_heap as select i as id, " - "md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,1024) i") - - node.safe_psql( - "postgres", - "vacuum t_heap") - - self.backup_node(backup_dir, 'node', node) - - node.safe_psql( - "postgres", - "delete from t_heap") - - node.safe_psql( - "postgres", - "vacuum t_heap") - - self.backup_node( - backup_dir, 'node', node, backup_type='page') - - node.safe_psql( - "postgres", - "insert into t_heap select i as id, " - "md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,1) i") - - self.backup_node( - backup_dir, 'node', node, backup_type='page') - - pgdata = self.pgdata_content(node.data_dir) - - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - node_restored.cleanup() - - self.restore_node(backup_dir, 'node', node_restored) - - # Physical comparison - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - self.set_auto_conf(node_restored, {'port': node_restored.port}) - node_restored.slow_start() - - # @unittest.skip("skip") - def test_page_stream(self): - """ - make archive node, take full and page stream backups, - restore them and check data correctness - """ - self.maxDiff = None - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'checkpoint_timeout': '30s'} - ) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # FULL BACKUP - node.safe_psql( - "postgres", - "create table t_heap as select i as id, md5(i::text) as text, " - "md5(i::text)::tsvector as tsvector " - "from generate_series(0,100) i") - - full_result = node.execute("postgres", "SELECT * FROM t_heap") - full_backup_id = self.backup_node( - backup_dir, 'node', node, - backup_type='full', options=['--stream']) - - # PAGE BACKUP - node.safe_psql( - "postgres", - "insert into t_heap select i as id, md5(i::text) as text, " - "md5(i::text)::tsvector as tsvector " - "from generate_series(100,200) i") - page_result = node.execute("postgres", "SELECT * FROM t_heap") - page_backup_id = self.backup_node( - backup_dir, 'node', node, - backup_type='page', options=['--stream', '-j', '4']) - - if self.paranoia: - pgdata = self.pgdata_content(node.data_dir) - - # Drop Node - node.cleanup() - - # Check full backup - self.assertIn( - "INFO: Restore of backup {0} completed.".format(full_backup_id), - self.restore_node( - backup_dir, 'node', node, - backup_id=full_backup_id, options=["-j", "4"]), - '\n Unexpected Error Message: {0}\n' - ' CMD: {1}'.format(repr(self.output), self.cmd)) - - node.slow_start() - full_result_new = node.execute("postgres", "SELECT * FROM t_heap") - self.assertEqual(full_result, full_result_new) - node.cleanup() - - # Check page backup - self.assertIn( - "INFO: Restore of backup {0} completed.".format(page_backup_id), - self.restore_node( - backup_dir, 'node', node, - backup_id=page_backup_id, options=["-j", "4"]), - '\n Unexpected Error Message: {0}\n' - ' CMD: {1}'.format(repr(self.output), self.cmd)) - - # GET RESTORED PGDATA AND COMPARE - if self.paranoia: - pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - node.slow_start() - page_result_new = node.execute("postgres", "SELECT * FROM t_heap") - self.assertEqual(page_result, page_result_new) - node.cleanup() - - # @unittest.skip("skip") - def test_page_archive(self): - """ - make archive node, take full and page archive backups, - restore them and check data correctness - """ - self.maxDiff = None - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'checkpoint_timeout': '30s'} - ) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # FULL BACKUP - node.safe_psql( - "postgres", - "create table t_heap as select i as id, md5(i::text) as text, " - "md5(i::text)::tsvector as tsvector from generate_series(0,100) i") - full_result = node.execute("postgres", "SELECT * FROM t_heap") - full_backup_id = self.backup_node( - backup_dir, 'node', node, backup_type='full') - - # PAGE BACKUP - node.safe_psql( - "postgres", - "insert into t_heap select i as id, " - "md5(i::text) as text, md5(i::text)::tsvector as tsvector " - "from generate_series(100, 200) i") - page_result = node.execute("postgres", "SELECT * FROM t_heap") - page_backup_id = self.backup_node( - backup_dir, 'node', node, - backup_type='page', options=["-j", "4"]) - - if self.paranoia: - pgdata = self.pgdata_content(node.data_dir) - - # Drop Node - node.cleanup() - - # Restore and check full backup - self.assertIn("INFO: Restore of backup {0} completed.".format( - full_backup_id), - self.restore_node( - backup_dir, 'node', node, - backup_id=full_backup_id, - options=[ - "-j", "4", - "--immediate", - "--recovery-target-action=promote"]), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) - - node.slow_start() - - full_result_new = node.execute("postgres", "SELECT * FROM t_heap") - self.assertEqual(full_result, full_result_new) - node.cleanup() - - # Restore and check page backup - self.assertIn( - "INFO: Restore of backup {0} completed.".format(page_backup_id), - self.restore_node( - backup_dir, 'node', node, - backup_id=page_backup_id, - options=[ - "-j", "4", - "--immediate", - "--recovery-target-action=promote"]), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) - - # GET RESTORED PGDATA AND COMPARE - if self.paranoia: - pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - node.slow_start() - - page_result_new = node.execute("postgres", "SELECT * FROM t_heap") - self.assertEqual(page_result, page_result_new) - node.cleanup() - - # @unittest.skip("skip") - def test_page_multiple_segments(self): - """ - Make node, create table with multiple segments, - write some data to it, check page and data correctness - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'fsync': 'off', - 'shared_buffers': '1GB', - 'maintenance_work_mem': '1GB', - 'full_page_writes': 'off'}) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - self.create_tblspace_in_node(node, 'somedata') - - # CREATE TABLE - node.pgbench_init(scale=100, options=['--tablespace=somedata']) - # FULL BACKUP - self.backup_node(backup_dir, 'node', node) - - # PGBENCH STUFF - pgbench = node.pgbench(options=['-T', '50', '-c', '1', '--no-vacuum']) - pgbench.wait() - - # GET LOGICAL CONTENT FROM NODE - result = node.safe_psql("postgres", "select count(*) from pgbench_accounts") - # PAGE BACKUP - self.backup_node(backup_dir, 'node', node, backup_type='page') - - # GET PHYSICAL CONTENT FROM NODE - pgdata = self.pgdata_content(node.data_dir) - - # RESTORE NODE - restored_node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'restored_node')) - restored_node.cleanup() - tblspc_path = self.get_tblspace_path(node, 'somedata') - tblspc_path_new = self.get_tblspace_path( - restored_node, 'somedata_restored') - - self.restore_node( - backup_dir, 'node', restored_node, - options=[ - "-j", "4", - "-T", "{0}={1}".format(tblspc_path, tblspc_path_new)]) - - # GET PHYSICAL CONTENT FROM NODE_RESTORED - pgdata_restored = self.pgdata_content(restored_node.data_dir) - - # START RESTORED NODE - self.set_auto_conf(restored_node, {'port': restored_node.port}) - restored_node.slow_start() - - result_new = restored_node.safe_psql( - "postgres", "select count(*) from pgbench_accounts") - - # COMPARE RESTORED FILES - self.assertEqual(result, result_new, 'data is lost') - - if self.paranoia: - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.skip("skip") - def test_page_delete(self): - """ - Make node, create tablespace with table, take full backup, - delete everything from table, vacuum table, take page backup, - restore page backup, compare . - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, initdb_params=['--data-checksums'], - pg_options={ - 'checkpoint_timeout': '30s', - } - ) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - self.create_tblspace_in_node(node, 'somedata') - # FULL backup - self.backup_node(backup_dir, 'node', node) - node.safe_psql( - "postgres", - "create table t_heap tablespace somedata as select i as id," - " md5(i::text) as text, md5(i::text)::tsvector as tsvector" - " from generate_series(0,100) i") - - node.safe_psql( - "postgres", - "delete from t_heap") - - node.safe_psql( - "postgres", - "vacuum t_heap") - - # PAGE BACKUP - self.backup_node( - backup_dir, 'node', node, backup_type='page') - if self.paranoia: - pgdata = self.pgdata_content(node.data_dir) - - # RESTORE - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - node_restored.cleanup() - - self.restore_node( - backup_dir, 'node', node_restored, - options=[ - "-j", "4", - "-T", "{0}={1}".format( - self.get_tblspace_path(node, 'somedata'), - self.get_tblspace_path(node_restored, 'somedata')) - ] - ) - - # GET RESTORED PGDATA AND COMPARE - if self.paranoia: - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # START RESTORED NODE - self.set_auto_conf(node_restored, {'port': node_restored.port}) - node_restored.slow_start() - - # @unittest.skip("skip") - def test_page_delete_1(self): - """ - Make node, create tablespace with table, take full backup, - delete everything from table, vacuum table, take page backup, - restore page backup, compare . - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'checkpoint_timeout': '30s', - } - ) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - self.create_tblspace_in_node(node, 'somedata') - - node.safe_psql( - "postgres", - "create table t_heap tablespace somedata as select i as id," - " md5(i::text) as text, md5(i::text)::tsvector as tsvector" - " from generate_series(0,100) i" - ) - # FULL backup - self.backup_node(backup_dir, 'node', node) - - node.safe_psql( - "postgres", - "delete from t_heap" - ) - - node.safe_psql( - "postgres", - "vacuum t_heap" - ) - - # PAGE BACKUP - self.backup_node( - backup_dir, 'node', node, backup_type='page') - if self.paranoia: - pgdata = self.pgdata_content(node.data_dir) - - # RESTORE - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored') - ) - node_restored.cleanup() - - self.restore_node( - backup_dir, 'node', node_restored, - options=[ - "-j", "4", - "-T", "{0}={1}".format( - self.get_tblspace_path(node, 'somedata'), - self.get_tblspace_path(node_restored, 'somedata')) - ] - ) - - # GET RESTORED PGDATA AND COMPARE - if self.paranoia: - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # START RESTORED NODE - self.set_auto_conf(node_restored, {'port': node_restored.port}) - node_restored.slow_start() - - def test_parallel_pagemap(self): - """ - Test for parallel WAL segments reading, during which pagemap is built - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - - # Initialize instance and backup directory - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums'], - pg_options={ - "hot_standby": "on" - } - ) - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored'), - ) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node_restored.cleanup() - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # Do full backup - self.backup_node(backup_dir, 'node', node) - show_backup = self.show_pb(backup_dir, 'node')[0] - - self.assertEqual(show_backup['status'], "OK") - self.assertEqual(show_backup['backup-mode'], "FULL") - - # Fill instance with data and make several WAL segments ... - with node.connect() as conn: - conn.execute("create table test (id int)") - for x in range(0, 8): - conn.execute( - "insert into test select i from generate_series(1,100) s(i)") - conn.commit() - self.switch_wal_segment(conn) - count1 = conn.execute("select count(*) from test") - - # ... and do page backup with parallel pagemap - self.backup_node( - backup_dir, 'node', node, backup_type="page", options=["-j", "4"]) - show_backup = self.show_pb(backup_dir, 'node')[1] - - self.assertEqual(show_backup['status'], "OK") - self.assertEqual(show_backup['backup-mode'], "PAGE") - - if self.paranoia: - pgdata = self.pgdata_content(node.data_dir) - - # Restore it - self.restore_node(backup_dir, 'node', node_restored) - - # Physical comparison - if self.paranoia: - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - self.set_auto_conf(node_restored, {'port': node_restored.port}) - node_restored.slow_start() - - # Check restored node - count2 = node_restored.execute("postgres", "select count(*) from test") - - self.assertEqual(count1, count2) - - # Clean after yourself - node.cleanup() - node_restored.cleanup() - - def test_parallel_pagemap_1(self): - """ - Test for parallel WAL segments reading, during which pagemap is built - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - - # Initialize instance and backup directory - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums'], - pg_options={} - ) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # Do full backup - self.backup_node(backup_dir, 'node', node) - show_backup = self.show_pb(backup_dir, 'node')[0] - - self.assertEqual(show_backup['status'], "OK") - self.assertEqual(show_backup['backup-mode'], "FULL") - - # Fill instance with data and make several WAL segments ... - node.pgbench_init(scale=10) - - # do page backup in single thread - page_id = self.backup_node( - backup_dir, 'node', node, backup_type="page") - - self.delete_pb(backup_dir, 'node', page_id) - - # ... and do page backup with parallel pagemap - self.backup_node( - backup_dir, 'node', node, backup_type="page", options=["-j", "4"]) - show_backup = self.show_pb(backup_dir, 'node')[1] - - self.assertEqual(show_backup['status'], "OK") - self.assertEqual(show_backup['backup-mode'], "PAGE") - - # Drop node and restore it - node.cleanup() - self.restore_node(backup_dir, 'node', node) - node.slow_start() - - # Clean after yourself - node.cleanup() - - # @unittest.skip("skip") - def test_page_backup_with_lost_wal_segment(self): - """ - make node with archiving - make archive backup, then generate some wals with pgbench, - delete latest archived wal segment - run page backup, expecting error because of missing wal segment - make sure that backup status is 'ERROR' - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - self.backup_node(backup_dir, 'node', node) - - # make some wals - node.pgbench_init(scale=3) - - # delete last wal segment - wals_dir = os.path.join(backup_dir, 'wal', 'node') - wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join( - wals_dir, f)) and not f.endswith('.backup') and not f.endswith('.part')] - wals = map(str, wals) - file = os.path.join(wals_dir, max(wals)) - os.remove(file) - if self.archive_compress: - file = file[:-3] - - # Single-thread PAGE backup - try: - self.backup_node( - backup_dir, 'node', node, backup_type='page') - self.assertEqual( - 1, 0, - "Expecting Error because of wal segment disappearance.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'Could not read WAL record at' in e.message and - 'is absent' in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertEqual( - 'ERROR', - self.show_pb(backup_dir, 'node')[1]['status'], - 'Backup {0} should have STATUS "ERROR"') - - # Multi-thread PAGE backup - try: - self.backup_node( - backup_dir, 'node', node, - backup_type='page', - options=["-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because of wal segment disappearance.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'Could not read WAL record at' in e.message and - 'is absent' in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertEqual( - 'ERROR', - self.show_pb(backup_dir, 'node')[2]['status'], - 'Backup {0} should have STATUS "ERROR"') - - # @unittest.skip("skip") - def test_page_backup_with_corrupted_wal_segment(self): - """ - make node with archiving - make archive backup, then generate some wals with pgbench, - corrupt latest archived wal segment - run page backup, expecting error because of missing wal segment - make sure that backup status is 'ERROR' - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - self.backup_node(backup_dir, 'node', node) - - # make some wals - node.pgbench_init(scale=10) - - # delete last wal segment - wals_dir = os.path.join(backup_dir, 'wal', 'node') - wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join( - wals_dir, f)) and not f.endswith('.backup')] - wals = map(str, wals) - # file = os.path.join(wals_dir, max(wals)) - - if self.archive_compress: - original_file = os.path.join(wals_dir, '000000010000000000000004.gz') - tmp_file = os.path.join(backup_dir, '000000010000000000000004') - - with gzip.open(original_file, 'rb') as f_in, open(tmp_file, 'wb') as f_out: - shutil.copyfileobj(f_in, f_out) - - # drop healthy file - os.remove(original_file) - file = tmp_file - - else: - file = os.path.join(wals_dir, '000000010000000000000004') - - # corrupt file - print(file) - with open(file, "rb+", 0) as f: - f.seek(42) - f.write(b"blah") - f.flush() - f.close - - if self.archive_compress: - # compress corrupted file and replace with it old file - with open(file, 'rb') as f_in, gzip.open(original_file, 'wb', compresslevel=1) as f_out: - shutil.copyfileobj(f_in, f_out) - - file = os.path.join(wals_dir, '000000010000000000000004.gz') - - #if self.archive_compress: - # file = file[:-3] - - # Single-thread PAGE backup - try: - self.backup_node( - backup_dir, 'node', node, backup_type='page') - self.assertEqual( - 1, 0, - "Expecting Error because of wal segment disappearance.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'Could not read WAL record at' in e.message and - 'Possible WAL corruption. Error has occured during reading WAL segment' in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertEqual( - 'ERROR', - self.show_pb(backup_dir, 'node')[1]['status'], - 'Backup {0} should have STATUS "ERROR"') - - # Multi-thread PAGE backup - try: - self.backup_node( - backup_dir, 'node', node, - backup_type='page', options=["-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because of wal segment disappearance.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'Could not read WAL record at' in e.message and - 'Possible WAL corruption. Error has occured during reading WAL segment "{0}"'.format( - file) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertEqual( - 'ERROR', - self.show_pb(backup_dir, 'node')[2]['status'], - 'Backup {0} should have STATUS "ERROR"') - - # @unittest.skip("skip") - def test_page_backup_with_alien_wal_segment(self): - """ - make two nodes with archiving - take archive full backup from both nodes, - generate some wals with pgbench on both nodes, - move latest archived wal segment from second node to first node`s archive - run page backup on first node - expecting error because of alien wal segment - make sure that backup status is 'ERROR' - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - alien_node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'alien_node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - self.add_instance(backup_dir, 'alien_node', alien_node) - self.set_archiving(backup_dir, 'alien_node', alien_node) - alien_node.slow_start() - - self.backup_node( - backup_dir, 'node', node, options=['--stream']) - self.backup_node( - backup_dir, 'alien_node', alien_node, options=['--stream']) - - # make some wals - node.safe_psql( - "postgres", - "create sequence t_seq; " - "create table t_heap as select i as id, " - "md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,1000) i;") - - alien_node.safe_psql( - "postgres", - "create database alien") - - alien_node.safe_psql( - "alien", - "create sequence t_seq; " - "create table t_heap_alien as select i as id, " - "md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,100000) i;") - - # copy latest wal segment - wals_dir = os.path.join(backup_dir, 'wal', 'alien_node') - wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join( - wals_dir, f)) and not f.endswith('.backup')] - wals = map(str, wals) - filename = max(wals) - file = os.path.join(wals_dir, filename) - file_destination = os.path.join( - os.path.join(backup_dir, 'wal', 'node'), filename) -# file = os.path.join(wals_dir, '000000010000000000000004') - print(file) - print(file_destination) - os.remove(file_destination) - os.rename(file, file_destination) - - # Single-thread PAGE backup - try: - self.backup_node( - backup_dir, 'node', node, - backup_type='page') - self.assertEqual( - 1, 0, - "Expecting Error because of alien wal segment.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'Could not read WAL record at' in e.message and - 'Possible WAL corruption. Error has occured during reading WAL segment' in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertEqual( - 'ERROR', - self.show_pb(backup_dir, 'node')[1]['status'], - 'Backup {0} should have STATUS "ERROR"') - - # Multi-thread PAGE backup - try: - self.backup_node( - backup_dir, 'node', node, - backup_type='page', options=["-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because of alien wal segment.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn('Could not read WAL record at', e.message) - self.assertIn('WAL file is from different database system: ' - 'WAL file database system identifier is', e.message) - self.assertIn('pg_control database system identifier is', e.message) - self.assertIn('Possible WAL corruption. Error has occured ' - 'during reading WAL segment', e.message) - - self.assertEqual( - 'ERROR', - self.show_pb(backup_dir, 'node')[2]['status'], - 'Backup {0} should have STATUS "ERROR"') - - # @unittest.skip("skip") - def test_multithread_page_backup_with_toast(self): - """ - make node, create toast, do multithread PAGE backup - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - self.backup_node(backup_dir, 'node', node) - - # make some wals - node.safe_psql( - "postgres", - "create table t3 as select i, " - "repeat(md5(i::text),5006056) as fat_attr " - "from generate_series(0,70) i") - - # Multi-thread PAGE backup - self.backup_node( - backup_dir, 'node', node, - backup_type='page', options=["-j", "4"]) - - # @unittest.skip("skip") - def test_page_create_db(self): - """ - Make node, take full backup, create database db1, take page backup, - restore database and check it presense - """ - self.maxDiff = None - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'max_wal_size': '10GB', - 'checkpoint_timeout': '5min', - } - ) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # FULL BACKUP - node.safe_psql( - "postgres", - "create table t_heap as select i as id, md5(i::text) as text, " - "md5(i::text)::tsvector as tsvector from generate_series(0,100) i") - - self.backup_node( - backup_dir, 'node', node) - - # CREATE DATABASE DB1 - node.safe_psql("postgres", "create database db1") - node.safe_psql( - "db1", - "create table t_heap as select i as id, md5(i::text) as text, " - "md5(i::text)::tsvector as tsvector from generate_series(0,1000) i") - - # PAGE BACKUP - backup_id = self.backup_node(backup_dir, 'node', node, backup_type='page') - - if self.paranoia: - pgdata = self.pgdata_content(node.data_dir) - - # RESTORE - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - - node_restored.cleanup() - self.restore_node( - backup_dir, 'node', node_restored, - backup_id=backup_id, options=["-j", "4"]) - - # COMPARE PHYSICAL CONTENT - if self.paranoia: - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # START RESTORED NODE - self.set_auto_conf(node_restored, {'port': node_restored.port}) - node_restored.slow_start() - - node_restored.safe_psql('db1', 'select 1') - node_restored.cleanup() - - # DROP DATABASE DB1 - node.safe_psql( - "postgres", "drop database db1") - # SECOND PAGE BACKUP - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - if self.paranoia: - pgdata = self.pgdata_content(node.data_dir) - - # RESTORE SECOND PAGE BACKUP - self.restore_node( - backup_dir, 'node', node_restored, - backup_id=backup_id, options=["-j", "4"] - ) - - # COMPARE PHYSICAL CONTENT - if self.paranoia: - pgdata_restored = self.pgdata_content( - node_restored.data_dir, ignore_ptrack=False) - self.compare_pgdata(pgdata, pgdata_restored) - - # START RESTORED NODE - self.set_auto_conf(node_restored, {'port': node_restored.port}) - node_restored.slow_start() - - try: - node_restored.safe_psql('db1', 'select 1') - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because we are connecting to deleted database" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd) - ) - except QueryException as e: - self.assertTrue( - 'FATAL: database "db1" does not exist' in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd) - ) - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_multi_timeline_page(self): - """ - Check that backup in PAGE mode choose - parent backup correctly: - t12 /---P--> - ... - t3 /----> - t2 /----> - t1 -F-----D-> - - P must have F as parent - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql("postgres", "create extension pageinspect") - - try: - node.safe_psql( - "postgres", - "create extension amcheck") - except QueryException as e: - node.safe_psql( - "postgres", - "create extension amcheck_next") - - node.pgbench_init(scale=20) - full_id = self.backup_node(backup_dir, 'node', node) - - pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) - pgbench.wait() - - self.backup_node(backup_dir, 'node', node, backup_type='delta') - - node.cleanup() - self.restore_node( - backup_dir, 'node', node, backup_id=full_id, - options=[ - '--recovery-target=immediate', - '--recovery-target-action=promote']) - - node.slow_start() - - pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) - pgbench.wait() - - # create timelines - for i in range(2, 7): - node.cleanup() - self.restore_node( - backup_dir, 'node', node, - options=[ - '--recovery-target=latest', - '--recovery-target-action=promote', - '--recovery-target-timeline={0}'.format(i)]) - node.slow_start() - - # at this point there is i+1 timeline - pgbench = node.pgbench(options=['-T', '20', '-c', '1', '--no-vacuum']) - pgbench.wait() - - # create backup at 2, 4 and 6 timeline - if i % 2 == 0: - self.backup_node(backup_dir, 'node', node, backup_type='page') - - page_id = self.backup_node( - backup_dir, 'node', node, backup_type='page', - options=['--log-level-file=VERBOSE']) - - pgdata = self.pgdata_content(node.data_dir) - - result = node.safe_psql( - "postgres", "select * from pgbench_accounts") - - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - node_restored.cleanup() - - self.restore_node(backup_dir, 'node', node_restored) - pgdata_restored = self.pgdata_content(node_restored.data_dir) - - self.set_auto_conf(node_restored, {'port': node_restored.port}) - node_restored.slow_start() - - result_new = node_restored.safe_psql( - "postgres", "select * from pgbench_accounts") - - self.assertEqual(result, result_new) - - self.compare_pgdata(pgdata, pgdata_restored) - - self.checkdb_node( - backup_dir, - 'node', - options=[ - '--amcheck', - '-d', 'postgres', '-p', str(node.port)]) - - self.checkdb_node( - backup_dir, - 'node', - options=[ - '--amcheck', - '-d', 'postgres', '-p', str(node_restored.port)]) - - backup_list = self.show_pb(backup_dir, 'node') - - self.assertEqual( - backup_list[2]['parent-backup-id'], - backup_list[0]['id']) - self.assertEqual(backup_list[2]['current-tli'], 3) - - self.assertEqual( - backup_list[3]['parent-backup-id'], - backup_list[2]['id']) - self.assertEqual(backup_list[3]['current-tli'], 5) - - self.assertEqual( - backup_list[4]['parent-backup-id'], - backup_list[3]['id']) - self.assertEqual(backup_list[4]['current-tli'], 7) - - self.assertEqual( - backup_list[5]['parent-backup-id'], - backup_list[4]['id']) - self.assertEqual(backup_list[5]['current-tli'], 7) - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_multitimeline_page_1(self): - """ - Check that backup in PAGE mode choose - parent backup correctly: - t2 /----> - t1 -F--P---D-> - - P must have F as parent - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={'wal_log_hints': 'on'}) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql("postgres", "create extension pageinspect") - - try: - node.safe_psql( - "postgres", - "create extension amcheck") - except QueryException as e: - node.safe_psql( - "postgres", - "create extension amcheck_next") - - node.pgbench_init(scale=20) - full_id = self.backup_node(backup_dir, 'node', node) - - pgbench = node.pgbench(options=['-T', '20', '-c', '1']) - pgbench.wait() - - page1 = self.backup_node(backup_dir, 'node', node, backup_type='page') - - pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) - pgbench.wait() - - page1 = self.backup_node(backup_dir, 'node', node, backup_type='delta') - - node.cleanup() - self.restore_node( - backup_dir, 'node', node, backup_id=page1, - options=[ - '--recovery-target=immediate', - '--recovery-target-action=promote']) - - node.slow_start() - - pgbench = node.pgbench(options=['-T', '20', '-c', '1', '--no-vacuum']) - pgbench.wait() - - print(self.backup_node( - backup_dir, 'node', node, backup_type='page', - options=['--log-level-console=LOG'], return_id=False)) - - pgdata = self.pgdata_content(node.data_dir) - - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - node_restored.cleanup() - - self.restore_node(backup_dir, 'node', node_restored) - pgdata_restored = self.pgdata_content(node_restored.data_dir) - - self.set_auto_conf(node_restored, {'port': node_restored.port}) - node_restored.slow_start() - - self.compare_pgdata(pgdata, pgdata_restored) - - @unittest.skip("skip") - # @unittest.expectedFailure - def test_page_pg_resetxlog(self): - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'shared_buffers': '512MB', - 'max_wal_size': '3GB'}) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # Create table - node.safe_psql( - "postgres", - "create extension bloom; create sequence t_seq; " - "create table t_heap " - "as select nextval('t_seq')::int as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " -# "from generate_series(0,25600) i") - "from generate_series(0,2560) i") - - self.backup_node(backup_dir, 'node', node) - - node.safe_psql( - 'postgres', - "update t_heap set id = nextval('t_seq'), text = md5(text), " - "tsvector = md5(repeat(tsvector::text, 10))::tsvector") - - self.switch_wal_segment(node) - - # kill the bastard - if self.verbose: - print('Killing postmaster. Losing Ptrack changes') - node.stop(['-m', 'immediate', '-D', node.data_dir]) - - # now smack it with sledgehammer - if node.major_version >= 10: - pg_resetxlog_path = self.get_bin_path('pg_resetwal') - wal_dir = 'pg_wal' - else: - pg_resetxlog_path = self.get_bin_path('pg_resetxlog') - wal_dir = 'pg_xlog' - - self.run_binary( - [ - pg_resetxlog_path, - '-D', - node.data_dir, - '-o 42', - '-f' - ], - asynchronous=False) - - if not node.status(): - node.slow_start() - else: - print("Die! Die! Why won't you die?... Why won't you die?") - exit(1) - - # take ptrack backup -# self.backup_node( -# backup_dir, 'node', node, -# backup_type='page', options=['--stream']) - - try: - self.backup_node( - backup_dir, 'node', node, backup_type='page') - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because instance was brutalized by pg_resetxlog" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd) - ) - except ProbackupException as e: - self.assertIn( - 'Insert error message', - e.message, - '\n Unexpected Error Message: {0}\n' - ' CMD: {1}'.format(repr(e.message), self.cmd)) - -# pgdata = self.pgdata_content(node.data_dir) -# -# node_restored = self.make_simple_node( -# base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) -# node_restored.cleanup() -# -# self.restore_node( -# backup_dir, 'node', node_restored) -# -# pgdata_restored = self.pgdata_content(node_restored.data_dir) -# self.compare_pgdata(pgdata, pgdata_restored) diff --git a/tests/pgpro2068_test.py b/tests/pgpro2068_test.py deleted file mode 100644 index 434ce2800..000000000 --- a/tests/pgpro2068_test.py +++ /dev/null @@ -1,188 +0,0 @@ -import os -import unittest -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException, idx_ptrack -from datetime import datetime, timedelta -import subprocess -from time import sleep -import shutil -import signal -from testgres import ProcessType - - -class BugTest(ProbackupTest, unittest.TestCase): - - def test_minrecpoint_on_replica(self): - """ - https://jira.postgrespro.ru/browse/PGPRO-2068 - """ - self._check_gdb_flag_or_skip_test() - - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - # 'checkpoint_timeout': '60min', - 'checkpoint_completion_target': '0.9', - 'bgwriter_delay': '10ms', - 'bgwriter_lru_maxpages': '1000', - 'bgwriter_lru_multiplier': '4.0', - 'max_wal_size': '256MB'}) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # take full backup and restore it as replica - self.backup_node( - backup_dir, 'node', node, options=['--stream']) - - # start replica - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) - replica.cleanup() - - self.restore_node(backup_dir, 'node', replica, options=['-R']) - self.set_replica(node, replica) - self.add_instance(backup_dir, 'replica', replica) - self.set_archiving(backup_dir, 'replica', replica, replica=True) - - self.set_auto_conf( - replica, - {'port': replica.port, 'restart_after_crash': 'off'}) - - # we need those later - node.safe_psql( - "postgres", - "CREATE EXTENSION plpython3u") - - node.safe_psql( - "postgres", - "CREATE EXTENSION pageinspect") - - replica.slow_start(replica=True) - - # generate some data - node.pgbench_init(scale=10) - pgbench = node.pgbench( - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - options=["-c", "4", "-T", "20"]) - pgbench.wait() - pgbench.stdout.close() - - # generate some more data and leave it in background - pgbench = node.pgbench( - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - options=["-c", "4", "-j 4", "-T", "100"]) - - # wait for shared buffer on replica to be filled with dirty data - sleep(20) - - # get pids of replica background workers - startup_pid = replica.auxiliary_pids[ProcessType.Startup][0] - checkpointer_pid = replica.auxiliary_pids[ProcessType.Checkpointer][0] - bgwriter_pid = replica.auxiliary_pids[ProcessType.BackgroundWriter][0] - - # break checkpointer on UpdateLastRemovedPtr - gdb_checkpointer = self.gdb_attach(checkpointer_pid) - gdb_checkpointer._execute('handle SIGINT noprint nostop pass') - gdb_checkpointer._execute('handle SIGUSR1 noprint nostop pass') - gdb_checkpointer.set_breakpoint('UpdateLastRemovedPtr') - gdb_checkpointer.continue_execution_until_break() - - # break recovery on UpdateControlFile - gdb_recovery = self.gdb_attach(startup_pid) - gdb_recovery._execute('handle SIGINT noprint nostop pass') - gdb_recovery._execute('handle SIGUSR1 noprint nostop pass') - gdb_recovery.set_breakpoint('UpdateMinRecoveryPoint') - gdb_recovery.continue_execution_until_break() - gdb_recovery.set_breakpoint('UpdateControlFile') - gdb_recovery.continue_execution_until_break() - - # stop data generation - pgbench.wait() - pgbench.stdout.close() - - # kill someone, we need a crash - os.kill(int(bgwriter_pid), 9) - gdb_recovery._execute('detach') - gdb_checkpointer._execute('detach') - - # just to be sure - try: - replica.stop(['-m', 'immediate', '-D', replica.data_dir]) - except: - pass - - # MinRecLSN = replica.get_control_data()['Minimum recovery ending location'] - - # Promote replica with 'immediate' target action - if self.get_version(replica) >= self.version_to_num('12.0'): - recovery_config = 'postgresql.auto.conf' - else: - recovery_config = 'recovery.conf' - - replica.append_conf( - recovery_config, "recovery_target = 'immediate'") - replica.append_conf( - recovery_config, "recovery_target_action = 'pause'") - replica.slow_start(replica=True) - - if self.get_version(node) < 100000: - script = ''' -DO -$$ -relations = plpy.execute("select class.oid from pg_class class WHERE class.relkind IN ('r', 'i', 't', 'm') and class.relpersistence = 'p'") -current_xlog_lsn = plpy.execute("SELECT min_recovery_end_location as lsn FROM pg_control_recovery()")[0]['lsn'] -plpy.notice('CURRENT LSN: {0}'.format(current_xlog_lsn)) -found_corruption = False -for relation in relations: - pages_from_future = plpy.execute("with number_of_blocks as (select blknum from generate_series(0, pg_relation_size({0}) / 8192 -1) as blknum) select blknum, lsn, checksum, flags, lower, upper, special, pagesize, version, prune_xid from number_of_blocks, page_header(get_raw_page('{0}'::oid::regclass::text, number_of_blocks.blknum::int)) where lsn > '{1}'::pg_lsn".format(relation['oid'], current_xlog_lsn)) - - if pages_from_future.nrows() == 0: - continue - - for page in pages_from_future: - plpy.notice('Found page from future. OID: {0}, BLKNUM: {1}, LSN: {2}'.format(relation['oid'], page['blknum'], page['lsn'])) - found_corruption = True -if found_corruption: - plpy.error('Found Corruption') -$$ LANGUAGE plpython3u; -''' - else: - script = ''' -DO -$$ -relations = plpy.execute("select class.oid from pg_class class WHERE class.relkind IN ('r', 'i', 't', 'm') and class.relpersistence = 'p'") -current_xlog_lsn = plpy.execute("select pg_last_wal_replay_lsn() as lsn")[0]['lsn'] -plpy.notice('CURRENT LSN: {0}'.format(current_xlog_lsn)) -found_corruption = False -for relation in relations: - pages_from_future = plpy.execute("with number_of_blocks as (select blknum from generate_series(0, pg_relation_size({0}) / 8192 -1) as blknum) select blknum, lsn, checksum, flags, lower, upper, special, pagesize, version, prune_xid from number_of_blocks, page_header(get_raw_page('{0}'::oid::regclass::text, number_of_blocks.blknum::int)) where lsn > '{1}'::pg_lsn".format(relation['oid'], current_xlog_lsn)) - - if pages_from_future.nrows() == 0: - continue - - for page in pages_from_future: - plpy.notice('Found page from future. OID: {0}, BLKNUM: {1}, LSN: {2}'.format(relation['oid'], page['blknum'], page['lsn'])) - found_corruption = True -if found_corruption: - plpy.error('Found Corruption') -$$ LANGUAGE plpython3u; -''' - - # Find blocks from future - replica.safe_psql( - 'postgres', - script) - - # error is expected if version < 10.6 - # gdb_backup.continue_execution_until_exit() - - # do basebackup - - # do pg_probackup, expect error diff --git a/tests/pgpro560_test.py b/tests/pgpro560_test.py deleted file mode 100644 index b665fd200..000000000 --- a/tests/pgpro560_test.py +++ /dev/null @@ -1,123 +0,0 @@ -import os -import unittest -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException -from datetime import datetime, timedelta -import subprocess -from time import sleep - - -class CheckSystemID(ProbackupTest, unittest.TestCase): - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_pgpro560_control_file_loss(self): - """ - https://jira.postgrespro.ru/browse/PGPRO-560 - make node with stream support, delete control file - make backup - check that backup failed - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - file = os.path.join(node.base_dir, 'data', 'global', 'pg_control') - # Not delete this file permanently - os.rename(file, os.path.join(node.base_dir, 'data', 'global', 'pg_control_copy')) - - try: - self.backup_node(backup_dir, 'node', node, options=['--stream']) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because pg_control was deleted.\n " - "Output: {0} \n CMD: {1}".format(repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'ERROR: Could not open file' in e.message and - 'pg_control' in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - # Return this file to avoid Postger fail - os.rename(os.path.join(node.base_dir, 'data', 'global', 'pg_control_copy'), file) - - def test_pgpro560_systemid_mismatch(self): - """ - https://jira.postgrespro.ru/browse/PGPRO-560 - make node1 and node2 - feed to backup PGDATA from node1 and PGPORT from node2 - check that backup failed - """ - node1 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node1'), - set_replication=True, - initdb_params=['--data-checksums']) - - node1.slow_start() - node2 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node2'), - set_replication=True, - initdb_params=['--data-checksums']) - - node2.slow_start() - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node1', node1) - - try: - self.backup_node(backup_dir, 'node1', node2, options=['--stream']) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because of SYSTEM ID mismatch.\n " - "Output: {0} \n CMD: {1}".format(repr(self.output), self.cmd)) - except ProbackupException as e: - if self.get_version(node1) > 90600: - self.assertTrue( - 'ERROR: Backup data directory was ' - 'initialized for system id' in e.message and - 'but connected instance system id is' in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - else: - self.assertIn( - 'ERROR: System identifier mismatch. ' - 'Connected PostgreSQL instance has system id', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - sleep(1) - - try: - self.backup_node( - backup_dir, 'node1', node2, - data_dir=node1.data_dir, options=['--stream']) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because of of SYSTEM ID mismatch.\n " - "Output: {0} \n CMD: {1}".format(repr(self.output), self.cmd)) - except ProbackupException as e: - if self.get_version(node1) > 90600: - self.assertTrue( - 'ERROR: Backup data directory was initialized ' - 'for system id' in e.message and - 'but connected instance system id is' in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - else: - self.assertIn( - 'ERROR: System identifier mismatch. ' - 'Connected PostgreSQL instance has system id', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) diff --git a/tests/pgpro589_test.py b/tests/pgpro589_test.py deleted file mode 100644 index 8ce8e1f56..000000000 --- a/tests/pgpro589_test.py +++ /dev/null @@ -1,72 +0,0 @@ -import os -import unittest -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException, idx_ptrack -from datetime import datetime, timedelta -import subprocess - - -class ArchiveCheck(ProbackupTest, unittest.TestCase): - - def test_pgpro589(self): - """ - https://jira.postgrespro.ru/browse/PGPRO-589 - make node without archive support, make backup which should fail - check that backup status equal to ERROR - check that no files where copied to backup catalogue - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - - # make erroneous archive_command - self.set_auto_conf(node, {'archive_command': 'exit 0'}) - node.slow_start() - - node.pgbench_init(scale=5) - pgbench = node.pgbench( - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - options=["-c", "4", "-T", "10"] - ) - pgbench.wait() - pgbench.stdout.close() - path = node.safe_psql( - "postgres", - "select pg_relation_filepath('pgbench_accounts')").rstrip().decode( - "utf-8") - - try: - self.backup_node( - backup_dir, 'node', node, - options=['--archive-timeout=10']) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because of missing archive wal " - "segment with start_lsn.\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'INFO: Wait for WAL segment' in e.message and - 'ERROR: WAL segment' in e.message and - 'could not be archived in 10 seconds' in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - backup_id = self.show_pb(backup_dir, 'node')[0]['id'] - self.assertEqual( - 'ERROR', self.show_pb(backup_dir, 'node', backup_id)['status'], - 'Backup should have ERROR status') - file = os.path.join( - backup_dir, 'backups', 'node', - backup_id, 'database', path) - self.assertFalse( - os.path.isfile(file), - "\n Start LSN was not found in archive but datafiles where " - "copied to backup catalogue.\n For example: {0}\n " - "It is not optimal".format(file)) diff --git a/tests/ptrack_test.py b/tests/ptrack_test.py deleted file mode 100644 index 6e5786f8c..000000000 --- a/tests/ptrack_test.py +++ /dev/null @@ -1,4407 +0,0 @@ -import os -import unittest -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException, idx_ptrack -from datetime import datetime, timedelta -import subprocess -from testgres import QueryException, StartNodeException -import shutil -import sys -from time import sleep -from threading import Thread - - -class PtrackTest(ProbackupTest, unittest.TestCase): - def setUp(self): - if self.pg_config_version < self.version_to_num('11.0'): - self.skipTest('You need PostgreSQL >= 11 for this test') - self.fname = self.id().split('.')[3] - - # @unittest.skip("skip") - def test_drop_rel_during_backup_ptrack(self): - """ - drop relation during ptrack backup - """ - self._check_gdb_flag_or_skip_test() - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=self.ptrack, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") - - node.safe_psql( - "postgres", - "create table t_heap as select i" - " as id from generate_series(0,100) i") - - relative_path = node.safe_psql( - "postgres", - "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip() - - absolute_path = os.path.join(node.data_dir, relative_path) - - # FULL backup - self.backup_node(backup_dir, 'node', node, options=['--stream']) - - # PTRACK backup - gdb = self.backup_node( - backup_dir, 'node', node, backup_type='ptrack', - gdb=True, options=['--log-level-file=LOG']) - - gdb.set_breakpoint('backup_files') - gdb.run_until_break() - - # REMOVE file - os.remove(absolute_path) - - # File removed, we can proceed with backup - gdb.continue_execution_until_exit() - - pgdata = self.pgdata_content(node.data_dir) - - with open(os.path.join(backup_dir, 'log', 'pg_probackup.log')) as f: - log_content = f.read() - self.assertTrue( - 'LOG: File not found: "{0}"'.format(absolute_path) in log_content, - 'File "{0}" should be deleted but it`s not'.format(absolute_path)) - - node.cleanup() - self.restore_node(backup_dir, 'node', node, options=["-j", "4"]) - - # Physical comparison - pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.skip("skip") - def test_ptrack_without_full(self): - """ptrack backup without validated full backup""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums'], - ptrack_enable=True) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") - - try: - self.backup_node(backup_dir, 'node', node, backup_type="ptrack") - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because page backup should not be possible " - "without valid full backup.\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - "WARNING: Valid full backup on current timeline 1 is not found" in e.message and - "ERROR: Create new full backup before an incremental one" in e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - self.assertEqual( - self.show_pb(backup_dir, 'node')[0]['status'], - "ERROR") - - # @unittest.skip("skip") - def test_ptrack_threads(self): - """ptrack multi thread backup mode""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums'], - ptrack_enable=True) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") - - self.backup_node( - backup_dir, 'node', node, - backup_type="full", options=["-j", "4"]) - self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK") - - self.backup_node( - backup_dir, 'node', node, - backup_type="ptrack", options=["-j", "4"]) - self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK") - - # @unittest.skip("skip") - def test_ptrack_stop_pg(self): - """ - create node, take full backup, - restart node, check that ptrack backup - can be taken - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") - - node.pgbench_init(scale=1) - - # FULL backup - self.backup_node(backup_dir, 'node', node, options=['--stream']) - - node.stop() - node.slow_start() - - self.backup_node( - backup_dir, 'node', node, - backup_type='ptrack', options=['--stream']) - - # @unittest.skip("skip") - def test_ptrack_multi_timeline_backup(self): - """ - t2 /------P2 - t1 ------F---*-----P1 - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") - - node.pgbench_init(scale=5) - - # FULL backup - full_id = self.backup_node(backup_dir, 'node', node) - - pgbench = node.pgbench(options=['-T', '30', '-c', '1', '--no-vacuum']) - sleep(15) - - xid = node.safe_psql( - 'postgres', - 'SELECT txid_current()').decode('utf-8').rstrip() - pgbench.wait() - - self.backup_node(backup_dir, 'node', node, backup_type='ptrack') - - node.cleanup() - - # Restore from full backup to create Timeline 2 - print(self.restore_node( - backup_dir, 'node', node, - options=[ - '--recovery-target-xid={0}'.format(xid), - '--recovery-target-action=promote'])) - - node.slow_start() - - pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) - pgbench.wait() - - self.backup_node(backup_dir, 'node', node, backup_type='ptrack') - - pgdata = self.pgdata_content(node.data_dir) - - node.cleanup() - - self.restore_node(backup_dir, 'node', node) - - pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - node.slow_start() - - balance = node.safe_psql( - 'postgres', - 'select (select sum(tbalance) from pgbench_tellers) - ' - '( select sum(bbalance) from pgbench_branches) + ' - '( select sum(abalance) from pgbench_accounts ) - ' - '(select sum(delta) from pgbench_history) as must_be_zero').decode('utf-8').rstrip() - - self.assertEqual('0', balance) - - # @unittest.skip("skip") - def test_ptrack_multi_timeline_backup_1(self): - """ - t2 /------ - t1 ---F---P1---* - - # delete P1 - t2 /------P2 - t1 ---F--------* - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") - - node.pgbench_init(scale=5) - - # FULL backup - full_id = self.backup_node(backup_dir, 'node', node) - - pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) - pgbench.wait() - - ptrack_id = self.backup_node(backup_dir, 'node', node, backup_type='ptrack') - node.cleanup() - - self.restore_node(backup_dir, 'node', node) - - node.slow_start() - - pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) - pgbench.wait() - - # delete old PTRACK backup - self.delete_pb(backup_dir, 'node', backup_id=ptrack_id) - - # take new PTRACK backup - self.backup_node(backup_dir, 'node', node, backup_type='ptrack') - - pgdata = self.pgdata_content(node.data_dir) - - node.cleanup() - - self.restore_node(backup_dir, 'node', node) - - pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - node.slow_start() - - balance = node.safe_psql( - 'postgres', - 'select (select sum(tbalance) from pgbench_tellers) - ' - '( select sum(bbalance) from pgbench_branches) + ' - '( select sum(abalance) from pgbench_accounts ) - ' - '(select sum(delta) from pgbench_history) as must_be_zero').\ - decode('utf-8').rstrip() - - self.assertEqual('0', balance) - - # @unittest.skip("skip") - def test_ptrack_eat_my_data(self): - """ - PGPRO-4051 - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") - - node.pgbench_init(scale=50) - - self.backup_node(backup_dir, 'node', node) - - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - - pgbench = node.pgbench(options=['-T', '300', '-c', '1', '--no-vacuum']) - - for i in range(10): - print("Iteration: {0}".format(i)) - - sleep(2) - - self.backup_node(backup_dir, 'node', node, backup_type='ptrack') -# pgdata = self.pgdata_content(node.data_dir) -# -# node_restored.cleanup() -# -# self.restore_node(backup_dir, 'node', node_restored) -# pgdata_restored = self.pgdata_content(node_restored.data_dir) -# -# self.compare_pgdata(pgdata, pgdata_restored) - - pgbench.terminate() - pgbench.wait() - - self.switch_wal_segment(node) - - result = node.safe_psql("postgres", "SELECT * FROM pgbench_accounts") - - node_restored.cleanup() - self.restore_node(backup_dir, 'node', node_restored) - self.set_auto_conf( - node_restored, {'port': node_restored.port}) - - node_restored.slow_start() - - balance = node_restored.safe_psql( - 'postgres', - 'select (select sum(tbalance) from pgbench_tellers) - ' - '( select sum(bbalance) from pgbench_branches) + ' - '( select sum(abalance) from pgbench_accounts ) - ' - '(select sum(delta) from pgbench_history) as must_be_zero').decode('utf-8').rstrip() - - self.assertEqual('0', balance) - - # Logical comparison - self.assertEqual( - result, - node_restored.safe_psql( - 'postgres', - 'SELECT * FROM pgbench_accounts'), - 'Data loss') - - # @unittest.skip("skip") - def test_ptrack_simple(self): - """make node, make full and ptrack stream backups," - " restore them and check data correctness""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") - - self.backup_node(backup_dir, 'node', node, options=['--stream']) - - node.safe_psql( - "postgres", - "create table t_heap as select i" - " as id from generate_series(0,1) i") - - self.backup_node( - backup_dir, 'node', node, backup_type='ptrack', - options=['--stream']) - - node.safe_psql( - "postgres", - "update t_heap set id = 100500") - - self.backup_node( - backup_dir, 'node', node, - backup_type='ptrack', options=['--stream']) - - if self.paranoia: - pgdata = self.pgdata_content(node.data_dir) - - result = node.safe_psql("postgres", "SELECT * FROM t_heap") - - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - node_restored.cleanup() - - self.restore_node( - backup_dir, 'node', node_restored, options=["-j", "4"]) - - # Physical comparison - if self.paranoia: - pgdata_restored = self.pgdata_content( - node_restored.data_dir, ignore_ptrack=False) - self.compare_pgdata(pgdata, pgdata_restored) - - self.set_auto_conf( - node_restored, {'port': node_restored.port}) - - node_restored.slow_start() - - # Logical comparison - self.assertEqual( - result, - node_restored.safe_psql("postgres", "SELECT * FROM t_heap")) - - # @unittest.skip("skip") - def test_ptrack_unprivileged(self): - """""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - # self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - "postgres", - "CREATE DATABASE backupdb") - - # PG 9.5 - if self.get_version(node) < 90600: - node.safe_psql( - 'backupdb', - "REVOKE ALL ON DATABASE backupdb from PUBLIC; " - "REVOKE ALL ON SCHEMA public from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " - "CREATE ROLE backup WITH LOGIN REPLICATION; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") - # PG 9.6 - elif self.get_version(node) > 90600 and self.get_version(node) < 100000: - node.safe_psql( - 'backupdb', - "REVOKE ALL ON DATABASE backupdb from PUBLIC; " - "REVOKE ALL ON SCHEMA public from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " - "CREATE ROLE backup WITH LOGIN REPLICATION; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) - # >= 10 && < 15 - elif self.get_version(node) >= 100000 and self.get_version(node) < 150000: - node.safe_psql( - 'backupdb', - "REVOKE ALL ON DATABASE backupdb from PUBLIC; " - "REVOKE ALL ON SCHEMA public from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " - "CREATE ROLE backup WITH LOGIN REPLICATION; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) - # >= 15 - else: - node.safe_psql( - 'backupdb', - "REVOKE ALL ON DATABASE backupdb from PUBLIC; " - "REVOKE ALL ON SCHEMA public from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " - "CREATE ROLE backup WITH LOGIN REPLICATION; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_start(text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_stop(boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) - - node.safe_psql( - "backupdb", - "CREATE SCHEMA ptrack") - node.safe_psql( - "backupdb", - "CREATE EXTENSION ptrack WITH SCHEMA ptrack") - node.safe_psql( - "backupdb", - "GRANT USAGE ON SCHEMA ptrack TO backup") - - node.safe_psql( - "backupdb", - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup") - - if ProbackupTest.enterprise: - node.safe_psql( - "backupdb", - "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup; " - 'GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;') - - self.backup_node( - backup_dir, 'node', node, - datname='backupdb', options=['--stream', "-U", "backup"]) - - self.backup_node( - backup_dir, 'node', node, datname='backupdb', - backup_type='ptrack', options=['--stream', "-U", "backup"]) - - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_ptrack_enable(self): - """make ptrack without full backup, should result in error""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, initdb_params=['--data-checksums'], - pg_options={ - 'checkpoint_timeout': '30s', - 'shared_preload_libraries': 'ptrack'}) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") - - # PTRACK BACKUP - try: - self.backup_node( - backup_dir, 'node', node, - backup_type='ptrack', options=["--stream"] - ) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because ptrack disabled.\n" - " Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd - ) - ) - except ProbackupException as e: - self.assertIn( - 'ERROR: Ptrack is disabled\n', - e.message, - '\n Unexpected Error Message: {0}\n' - ' CMD: {1}'.format(repr(e.message), self.cmd) - ) - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_ptrack_disable(self): - """ - Take full backup, disable ptrack restart postgresql, - enable ptrack, restart postgresql, take ptrack backup - which should fail - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums'], - pg_options={'checkpoint_timeout': '30s'}) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") - - # FULL BACKUP - self.backup_node(backup_dir, 'node', node, options=['--stream']) - - # DISABLE PTRACK - node.safe_psql('postgres', "alter system set ptrack.map_size to 0") - node.stop() - node.slow_start() - - # ENABLE PTRACK - node.safe_psql('postgres', "alter system set ptrack.map_size to '128'") - node.safe_psql('postgres', "alter system set shared_preload_libraries to 'ptrack'") - node.stop() - node.slow_start() - - # PTRACK BACKUP - try: - self.backup_node( - backup_dir, 'node', node, - backup_type='ptrack', options=["--stream"] - ) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because ptrack_enable was set to OFF at some" - " point after previous backup.\n" - " Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd - ) - ) - except ProbackupException as e: - self.assertIn( - 'ERROR: LSN from ptrack_control', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd - ) - ) - - # @unittest.skip("skip") - def test_ptrack_uncommitted_xact(self): - """make ptrack backup while there is uncommitted open transaction""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums'], - pg_options={ - 'wal_level': 'replica'}) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") - - self.backup_node(backup_dir, 'node', node, options=['--stream']) - - con = node.connect("postgres") - con.execute( - "create table t_heap as select i" - " as id from generate_series(0,1) i") - - self.backup_node( - backup_dir, 'node', node, backup_type='ptrack', - options=['--stream']) - - if self.paranoia: - pgdata = self.pgdata_content(node.data_dir) - - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - node_restored.cleanup() - - self.restore_node( - backup_dir, 'node', node_restored, - node_restored.data_dir, options=["-j", "4"]) - - if self.paranoia: - pgdata_restored = self.pgdata_content( - node_restored.data_dir, ignore_ptrack=False) - - self.set_auto_conf( - node_restored, {'port': node_restored.port}) - - node_restored.slow_start() - - # Physical comparison - if self.paranoia: - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.skip("skip") - def test_ptrack_vacuum_full(self): - """make node, make full and ptrack stream backups, - restore them and check data correctness""" - self._check_gdb_flag_or_skip_test() - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - self.create_tblspace_in_node(node, 'somedata') - - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") - - self.backup_node(backup_dir, 'node', node, options=['--stream']) - - node.safe_psql( - "postgres", - "create table t_heap tablespace somedata as select i" - " as id from generate_series(0,1000000) i" - ) - - pg_connect = node.connect("postgres", autocommit=True) - - gdb = self.gdb_attach(pg_connect.pid) - gdb.set_breakpoint('reform_and_rewrite_tuple') - - gdb.continue_execution_until_running() - - process = Thread( - target=pg_connect.execute, args=["VACUUM FULL t_heap"]) - process.start() - - while not gdb.stopped_in_breakpoint: - sleep(1) - - gdb.continue_execution_until_break(20) - - self.backup_node( - backup_dir, 'node', node, backup_type='ptrack', options=['--stream']) - - self.backup_node( - backup_dir, 'node', node, backup_type='ptrack', options=['--stream']) - - if self.paranoia: - pgdata = self.pgdata_content(node.data_dir) - - gdb.remove_all_breakpoints() - gdb._execute('detach') - process.join() - - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - node_restored.cleanup() - - old_tablespace = self.get_tblspace_path(node, 'somedata') - new_tablespace = self.get_tblspace_path(node_restored, 'somedata_new') - - self.restore_node( - backup_dir, 'node', node_restored, - options=["-j", "4", "-T", "{0}={1}".format( - old_tablespace, new_tablespace)] - ) - - # Physical comparison - if self.paranoia: - pgdata_restored = self.pgdata_content( - node_restored.data_dir, ignore_ptrack=False) - self.compare_pgdata(pgdata, pgdata_restored) - - self.set_auto_conf( - node_restored, {'port': node_restored.port}) - - node_restored.slow_start() - - # @unittest.skip("skip") - def test_ptrack_vacuum_truncate(self): - """make node, create table, take full backup, - delete last 3 pages, vacuum relation, - take ptrack backup, take second ptrack backup, - restore last ptrack backup and check data correctness""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - self.create_tblspace_in_node(node, 'somedata') - - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") - - node.safe_psql( - "postgres", - "create sequence t_seq; " - "create table t_heap tablespace somedata as select i as id, " - "md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,1024) i;") - - node.safe_psql( - "postgres", - "vacuum t_heap") - - self.backup_node(backup_dir, 'node', node, options=['--stream']) - - node.safe_psql( - "postgres", - "delete from t_heap where ctid >= '(11,0)'") - - node.safe_psql( - "postgres", - "vacuum t_heap") - - self.backup_node( - backup_dir, 'node', node, backup_type='ptrack', options=['--stream']) - - self.backup_node( - backup_dir, 'node', node, backup_type='ptrack', options=['--stream']) - - if self.paranoia: - pgdata = self.pgdata_content(node.data_dir) - - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - node_restored.cleanup() - - old_tablespace = self.get_tblspace_path(node, 'somedata') - new_tablespace = self.get_tblspace_path(node_restored, 'somedata_new') - - self.restore_node( - backup_dir, 'node', node_restored, - options=["-j", "4", "-T", "{0}={1}".format( - old_tablespace, new_tablespace)] - ) - - # Physical comparison - if self.paranoia: - pgdata_restored = self.pgdata_content( - node_restored.data_dir, - ignore_ptrack=False - ) - self.compare_pgdata(pgdata, pgdata_restored) - - self.set_auto_conf( - node_restored, {'port': node_restored.port}) - - node_restored.slow_start() - - # @unittest.skip("skip") - def test_ptrack_get_block(self): - """ - make node, make full and ptrack stream backups, - restore them and check data correctness - """ - self._check_gdb_flag_or_skip_test() - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") - - node.safe_psql( - "postgres", - "create table t_heap as select i" - " as id from generate_series(0,1) i") - - self.backup_node(backup_dir, 'node', node, options=['--stream']) - gdb = self.backup_node( - backup_dir, 'node', node, backup_type='ptrack', - options=['--stream'], - gdb=True) - - gdb.set_breakpoint('make_pagemap_from_ptrack_2') - gdb.run_until_break() - - node.safe_psql( - "postgres", - "update t_heap set id = 100500") - - gdb.continue_execution_until_exit() - - self.backup_node( - backup_dir, 'node', node, - backup_type='ptrack', options=['--stream']) - - if self.paranoia: - pgdata = self.pgdata_content(node.data_dir) - - result = node.safe_psql("postgres", "SELECT * FROM t_heap") - node.cleanup() - self.restore_node(backup_dir, 'node', node, options=["-j", "4"]) - - # Physical comparison - if self.paranoia: - pgdata_restored = self.pgdata_content( - node.data_dir, ignore_ptrack=False) - self.compare_pgdata(pgdata, pgdata_restored) - - node.slow_start() - # Logical comparison - self.assertEqual( - result, - node.safe_psql("postgres", "SELECT * FROM t_heap")) - - # @unittest.skip("skip") - def test_ptrack_stream(self): - """make node, make full and ptrack stream backups, - restore them and check data correctness""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums'], - pg_options={ - 'checkpoint_timeout': '30s'}) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") - - # FULL BACKUP - node.safe_psql("postgres", "create sequence t_seq") - node.safe_psql( - "postgres", - "create table t_heap as select i as id, nextval('t_seq')" - " as t_seq, md5(i::text) as text, md5(i::text)::tsvector" - " as tsvector from generate_series(0,100) i") - - full_result = node.safe_psql("postgres", "SELECT * FROM t_heap") - full_backup_id = self.backup_node( - backup_dir, 'node', node, options=['--stream']) - - # PTRACK BACKUP - node.safe_psql( - "postgres", - "insert into t_heap select i as id, nextval('t_seq') as t_seq," - " md5(i::text) as text, md5(i::text)::tsvector as tsvector" - " from generate_series(100,200) i") - - ptrack_result = node.safe_psql("postgres", "SELECT * FROM t_heap") - ptrack_backup_id = self.backup_node( - backup_dir, 'node', node, - backup_type='ptrack', options=['--stream']) - - if self.paranoia: - pgdata = self.pgdata_content(node.data_dir) - - # Drop Node - node.cleanup() - - # Restore and check full backup - self.assertIn( - "INFO: Restore of backup {0} completed.".format(full_backup_id), - self.restore_node( - backup_dir, 'node', node, - backup_id=full_backup_id, options=["-j", "4"] - ), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd) - ) - node.slow_start() - full_result_new = node.safe_psql("postgres", "SELECT * FROM t_heap") - self.assertEqual(full_result, full_result_new) - node.cleanup() - - # Restore and check ptrack backup - self.assertIn( - "INFO: Restore of backup {0} completed.".format(ptrack_backup_id), - self.restore_node( - backup_dir, 'node', node, - backup_id=ptrack_backup_id, options=["-j", "4"] - ), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) - - if self.paranoia: - pgdata_restored = self.pgdata_content( - node.data_dir, ignore_ptrack=False) - self.compare_pgdata(pgdata, pgdata_restored) - - node.slow_start() - ptrack_result_new = node.safe_psql("postgres", "SELECT * FROM t_heap") - self.assertEqual(ptrack_result, ptrack_result_new) - - # @unittest.skip("skip") - def test_ptrack_archive(self): - """make archive node, make full and ptrack backups, - check data correctness in restored instance""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums'], - pg_options={ - 'checkpoint_timeout': '30s'}) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") - - # FULL BACKUP - node.safe_psql( - "postgres", - "create table t_heap as" - " select i as id," - " md5(i::text) as text," - " md5(i::text)::tsvector as tsvector" - " from generate_series(0,100) i") - - full_result = node.safe_psql("postgres", "SELECT * FROM t_heap") - full_backup_id = self.backup_node(backup_dir, 'node', node) - full_target_time = self.show_pb( - backup_dir, 'node', full_backup_id)['recovery-time'] - - # PTRACK BACKUP - node.safe_psql( - "postgres", - "insert into t_heap select i as id," - " md5(i::text) as text," - " md5(i::text)::tsvector as tsvector" - " from generate_series(100,200) i") - - ptrack_result = node.safe_psql("postgres", "SELECT * FROM t_heap") - ptrack_backup_id = self.backup_node( - backup_dir, 'node', node, backup_type='ptrack') - ptrack_target_time = self.show_pb( - backup_dir, 'node', ptrack_backup_id)['recovery-time'] - if self.paranoia: - pgdata = self.pgdata_content(node.data_dir) - - node.safe_psql( - "postgres", - "insert into t_heap select i as id," - " md5(i::text) as text," - " md5(i::text)::tsvector as tsvector" - " from generate_series(200, 300) i") - - # Drop Node - node.cleanup() - - # Check full backup - self.assertIn( - "INFO: Restore of backup {0} completed.".format(full_backup_id), - self.restore_node( - backup_dir, 'node', node, - backup_id=full_backup_id, - options=[ - "-j", "4", "--recovery-target-action=promote", - "--time={0}".format(full_target_time)] - ), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd) - ) - node.slow_start() - - full_result_new = node.safe_psql("postgres", "SELECT * FROM t_heap") - self.assertEqual(full_result, full_result_new) - node.cleanup() - - # Check ptrack backup - self.assertIn( - "INFO: Restore of backup {0} completed.".format(ptrack_backup_id), - self.restore_node( - backup_dir, 'node', node, - backup_id=ptrack_backup_id, - options=[ - "-j", "4", - "--time={0}".format(ptrack_target_time), - "--recovery-target-action=promote"] - ), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd) - ) - - if self.paranoia: - pgdata_restored = self.pgdata_content( - node.data_dir, ignore_ptrack=False) - self.compare_pgdata(pgdata, pgdata_restored) - - node.slow_start() - ptrack_result_new = node.safe_psql("postgres", "SELECT * FROM t_heap") - self.assertEqual(ptrack_result, ptrack_result_new) - - node.cleanup() - - @unittest.skip("skip") - def test_ptrack_pgpro417(self): - """ - Make node, take full backup, take ptrack backup, - delete ptrack backup. Try to take ptrack backup, - which should fail. Actual only for PTRACK 1.x - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums'], - pg_options={ - 'checkpoint_timeout': '30s'}) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - # FULL BACKUP - node.safe_psql( - "postgres", - "create table t_heap as select i as id, md5(i::text) as text, " - "md5(i::text)::tsvector as tsvector from generate_series(0,100) i") - node.safe_psql( - "postgres", - "SELECT * FROM t_heap") - - backup_id = self.backup_node( - backup_dir, 'node', node, - backup_type='full', options=["--stream"]) - - start_lsn_full = self.show_pb( - backup_dir, 'node', backup_id)['start-lsn'] - - # PTRACK BACKUP - node.safe_psql( - "postgres", - "insert into t_heap select i as id, md5(i::text) as text, " - "md5(i::text)::tsvector as tsvector " - "from generate_series(100,200) i") - node.safe_psql("postgres", "SELECT * FROM t_heap") - backup_id = self.backup_node( - backup_dir, 'node', node, - backup_type='ptrack', options=["--stream"]) - - start_lsn_ptrack = self.show_pb( - backup_dir, 'node', backup_id)['start-lsn'] - - self.delete_pb(backup_dir, 'node', backup_id) - - # SECOND PTRACK BACKUP - node.safe_psql( - "postgres", - "insert into t_heap select i as id, md5(i::text) as text, " - "md5(i::text)::tsvector as tsvector " - "from generate_series(200,300) i") - - try: - self.backup_node( - backup_dir, 'node', node, - backup_type='ptrack', options=["--stream"]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because of LSN mismatch from ptrack_control " - "and previous backup start_lsn.\n" - " Output: {0} \n CMD: {1}".format(repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'ERROR: LSN from ptrack_control' in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - @unittest.skip("skip") - def test_page_pgpro417(self): - """ - Make archive node, take full backup, take page backup, - delete page backup. Try to take ptrack backup, which should fail. - Actual only for PTRACK 1.x - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums'], - pg_options={ - 'checkpoint_timeout': '30s'}) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # FULL BACKUP - node.safe_psql( - "postgres", - "create table t_heap as select i as id, md5(i::text) as text, " - "md5(i::text)::tsvector as tsvector from generate_series(0,100) i") - node.safe_psql("postgres", "SELECT * FROM t_heap") - - # PAGE BACKUP - node.safe_psql( - "postgres", - "insert into t_heap select i as id, md5(i::text) as text, " - "md5(i::text)::tsvector as tsvector " - "from generate_series(100,200) i") - node.safe_psql("postgres", "SELECT * FROM t_heap") - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - self.delete_pb(backup_dir, 'node', backup_id) -# sys.exit(1) - - # PTRACK BACKUP - node.safe_psql( - "postgres", - "insert into t_heap select i as id, md5(i::text) as text, " - "md5(i::text)::tsvector as tsvector " - "from generate_series(200,300) i") - - try: - self.backup_node(backup_dir, 'node', node, backup_type='ptrack') - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because of LSN mismatch from ptrack_control " - "and previous backup start_lsn.\n " - "Output: {0}\n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'ERROR: LSN from ptrack_control' in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - @unittest.skip("skip") - def test_full_pgpro417(self): - """ - Make node, take two full backups, delete full second backup. - Try to take ptrack backup, which should fail. - Relevant only for PTRACK 1.x - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums'], - pg_options={ - 'checkpoint_timeout': '30s'}) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - # FULL BACKUP - node.safe_psql( - "postgres", - "create table t_heap as select i as id, md5(i::text) as text," - " md5(i::text)::tsvector as tsvector " - " from generate_series(0,100) i" - ) - node.safe_psql("postgres", "SELECT * FROM t_heap") - self.backup_node(backup_dir, 'node', node, options=["--stream"]) - - # SECOND FULL BACKUP - node.safe_psql( - "postgres", - "insert into t_heap select i as id, md5(i::text) as text," - " md5(i::text)::tsvector as tsvector" - " from generate_series(100,200) i" - ) - node.safe_psql("postgres", "SELECT * FROM t_heap") - backup_id = self.backup_node( - backup_dir, 'node', node, options=["--stream"]) - - self.delete_pb(backup_dir, 'node', backup_id) - - # PTRACK BACKUP - node.safe_psql( - "postgres", - "insert into t_heap select i as id, md5(i::text) as text, " - "md5(i::text)::tsvector as tsvector " - "from generate_series(200,300) i") - try: - self.backup_node( - backup_dir, 'node', node, - backup_type='ptrack', options=["--stream"]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because of LSN mismatch from ptrack_control " - "and previous backup start_lsn.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd) - ) - except ProbackupException as e: - self.assertTrue( - "ERROR: LSN from ptrack_control" in e.message and - "Create new full backup before " - "an incremental one" in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - # @unittest.skip("skip") - def test_create_db(self): - """ - Make node, take full backup, create database db1, take ptrack backup, - restore database and check it presense - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums'], - pg_options={ - 'max_wal_size': '10GB'}) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") - - # FULL BACKUP - node.safe_psql( - "postgres", - "create table t_heap as select i as id, md5(i::text) as text, " - "md5(i::text)::tsvector as tsvector from generate_series(0,100) i") - - node.safe_psql("postgres", "SELECT * FROM t_heap") - self.backup_node( - backup_dir, 'node', node, - options=["--stream"]) - - # CREATE DATABASE DB1 - node.safe_psql("postgres", "create database db1") - node.safe_psql( - "db1", - "create table t_heap as select i as id, md5(i::text) as text, " - "md5(i::text)::tsvector as tsvector from generate_series(0,100) i") - - # PTRACK BACKUP - backup_id = self.backup_node( - backup_dir, 'node', node, - backup_type='ptrack', options=["--stream"]) - - if self.paranoia: - pgdata = self.pgdata_content(node.data_dir) - - # RESTORE - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - - node_restored.cleanup() - self.restore_node( - backup_dir, 'node', node_restored, - backup_id=backup_id, options=["-j", "4"]) - - # COMPARE PHYSICAL CONTENT - if self.paranoia: - pgdata_restored = self.pgdata_content( - node_restored.data_dir, ignore_ptrack=False) - self.compare_pgdata(pgdata, pgdata_restored) - - # START RESTORED NODE - self.set_auto_conf( - node_restored, {'port': node_restored.port}) - node_restored.slow_start() - - # DROP DATABASE DB1 - node.safe_psql( - "postgres", "drop database db1") - # SECOND PTRACK BACKUP - backup_id = self.backup_node( - backup_dir, 'node', node, - backup_type='ptrack', options=["--stream"] - ) - - if self.paranoia: - pgdata = self.pgdata_content(node.data_dir) - - # RESTORE SECOND PTRACK BACKUP - node_restored.cleanup() - self.restore_node( - backup_dir, 'node', node_restored, - backup_id=backup_id, options=["-j", "4"]) - - # COMPARE PHYSICAL CONTENT - if self.paranoia: - pgdata_restored = self.pgdata_content( - node_restored.data_dir, ignore_ptrack=False) - self.compare_pgdata(pgdata, pgdata_restored) - - # START RESTORED NODE - self.set_auto_conf( - node_restored, {'port': node_restored.port}) - node_restored.slow_start() - - try: - node_restored.safe_psql('db1', 'select 1') - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because we are connecting to deleted database" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except QueryException as e: - self.assertTrue( - 'FATAL: database "db1" does not exist' in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - # @unittest.skip("skip") - def test_create_db_on_replica(self): - """ - Make node, take full backup, create replica from it, - take full backup from replica, - create database db1, take ptrack backup from replica, - restore database and check it presense - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums'], - pg_options={ - 'checkpoint_timeout': '30s'}) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") - - # FULL BACKUP - node.safe_psql( - "postgres", - "create table t_heap as select i as id, md5(i::text) as text, " - "md5(i::text)::tsvector as tsvector from generate_series(0,100) i") - - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) - replica.cleanup() - - self.backup_node( - backup_dir, 'node', node, options=['-j10', '--stream']) - - self.restore_node(backup_dir, 'node', replica) - - # Add replica - self.add_instance(backup_dir, 'replica', replica) - self.set_replica(node, replica, 'replica', synchronous=True) - replica.slow_start(replica=True) - - self.backup_node( - backup_dir, 'replica', replica, - options=[ - '-j10', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(node.port), - '--stream' - ] - ) - - # CREATE DATABASE DB1 - node.safe_psql("postgres", "create database db1") - node.safe_psql( - "db1", - "create table t_heap as select i as id, md5(i::text) as text, " - "md5(i::text)::tsvector as tsvector from generate_series(0,100) i") - - # Wait until replica catch up with master - self.wait_until_replica_catch_with_master(node, replica) - replica.safe_psql('postgres', 'checkpoint') - - # PTRACK BACKUP - backup_id = self.backup_node( - backup_dir, 'replica', - replica, backup_type='ptrack', - options=[ - '-j10', - '--stream', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(node.port) - ] - ) - - if self.paranoia: - pgdata = self.pgdata_content(replica.data_dir) - - # RESTORE - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - node_restored.cleanup() - - self.restore_node( - backup_dir, 'replica', node_restored, - backup_id=backup_id, options=["-j", "4"]) - - # COMPARE PHYSICAL CONTENT - if self.paranoia: - pgdata_restored = self.pgdata_content( - node_restored.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.skip("skip") - def test_alter_table_set_tablespace_ptrack(self): - """Make node, create tablespace with table, take full backup, - alter tablespace location, take ptrack backup, restore database.""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums'], - pg_options={ - 'checkpoint_timeout': '30s'}) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") - - # FULL BACKUP - self.create_tblspace_in_node(node, 'somedata') - node.safe_psql( - "postgres", - "create table t_heap tablespace somedata as select i as id," - " md5(i::text) as text, md5(i::text)::tsvector as tsvector" - " from generate_series(0,100) i") - # FULL backup - self.backup_node(backup_dir, 'node', node, options=["--stream"]) - - # ALTER TABLESPACE - self.create_tblspace_in_node(node, 'somedata_new') - node.safe_psql( - "postgres", - "alter table t_heap set tablespace somedata_new") - - # sys.exit(1) - # PTRACK BACKUP - #result = node.safe_psql( - # "postgres", "select * from t_heap") - self.backup_node( - backup_dir, 'node', node, - backup_type='ptrack', - options=["--stream"] - ) - if self.paranoia: - pgdata = self.pgdata_content(node.data_dir) - # node.stop() - # node.cleanup() - - # RESTORE - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - node_restored.cleanup() - - self.restore_node( - backup_dir, 'node', node_restored, - options=[ - "-j", "4", - "-T", "{0}={1}".format( - self.get_tblspace_path(node, 'somedata'), - self.get_tblspace_path(node_restored, 'somedata') - ), - "-T", "{0}={1}".format( - self.get_tblspace_path(node, 'somedata_new'), - self.get_tblspace_path(node_restored, 'somedata_new') - ) - ] - ) - - # GET RESTORED PGDATA AND COMPARE - if self.paranoia: - pgdata_restored = self.pgdata_content( - node_restored.data_dir, ignore_ptrack=False) - self.compare_pgdata(pgdata, pgdata_restored) - - # START RESTORED NODE - self.set_auto_conf( - node_restored, {'port': node_restored.port}) - node_restored.slow_start() - -# result_new = node_restored.safe_psql( -# "postgres", "select * from t_heap") -# -# self.assertEqual(result, result_new, 'lost some data after restore') - - # @unittest.skip("skip") - def test_alter_database_set_tablespace_ptrack(self): - """Make node, create tablespace with database," - " take full backup, alter tablespace location," - " take ptrack backup, restore database.""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums'], - pg_options={ - 'checkpoint_timeout': '30s'}) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") - - # FULL BACKUP - self.backup_node(backup_dir, 'node', node, options=["--stream"]) - - # CREATE TABLESPACE - self.create_tblspace_in_node(node, 'somedata') - - # ALTER DATABASE - node.safe_psql( - "template1", - "alter database postgres set tablespace somedata") - - # PTRACK BACKUP - self.backup_node( - backup_dir, 'node', node, backup_type='ptrack', - options=["--stream"]) - - if self.paranoia: - pgdata = self.pgdata_content(node.data_dir) - node.stop() - - # RESTORE - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - node_restored.cleanup() - self.restore_node( - backup_dir, 'node', - node_restored, - options=[ - "-j", "4", - "-T", "{0}={1}".format( - self.get_tblspace_path(node, 'somedata'), - self.get_tblspace_path(node_restored, 'somedata'))]) - - # GET PHYSICAL CONTENT and COMPARE PHYSICAL CONTENT - if self.paranoia: - pgdata_restored = self.pgdata_content( - node_restored.data_dir, ignore_ptrack=False) - self.compare_pgdata(pgdata, pgdata_restored) - - # START RESTORED NODE - node_restored.port = node.port - node_restored.slow_start() - - # @unittest.skip("skip") - def test_drop_tablespace(self): - """ - Make node, create table, alter table tablespace, take ptrack backup, - move table from tablespace, take ptrack backup - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums'], - pg_options={ - 'checkpoint_timeout': '30s'}) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") - - self.create_tblspace_in_node(node, 'somedata') - - # CREATE TABLE - node.safe_psql( - "postgres", - "create table t_heap as select i as id, md5(i::text) as text, " - "md5(i::text)::tsvector as tsvector from generate_series(0,100) i") - - result = node.safe_psql("postgres", "select * from t_heap") - # FULL BACKUP - self.backup_node(backup_dir, 'node', node, options=["--stream"]) - - # Move table to tablespace 'somedata' - node.safe_psql( - "postgres", "alter table t_heap set tablespace somedata") - # PTRACK BACKUP - self.backup_node( - backup_dir, 'node', node, - backup_type='ptrack', options=["--stream"]) - - # Move table back to default tablespace - node.safe_psql( - "postgres", "alter table t_heap set tablespace pg_default") - # SECOND PTRACK BACKUP - self.backup_node( - backup_dir, 'node', node, - backup_type='ptrack', options=["--stream"]) - - # DROP TABLESPACE 'somedata' - node.safe_psql( - "postgres", "drop tablespace somedata") - # THIRD PTRACK BACKUP - self.backup_node( - backup_dir, 'node', node, - backup_type='ptrack', options=["--stream"]) - - if self.paranoia: - pgdata = self.pgdata_content( - node.data_dir, ignore_ptrack=True) - - tblspace = self.get_tblspace_path(node, 'somedata') - node.cleanup() - shutil.rmtree(tblspace, ignore_errors=True) - self.restore_node(backup_dir, 'node', node, options=["-j", "4"]) - - if self.paranoia: - pgdata_restored = self.pgdata_content( - node.data_dir, ignore_ptrack=True) - - node.slow_start() - - tblspc_exist = node.safe_psql( - "postgres", - "select exists(select 1 from " - "pg_tablespace where spcname = 'somedata')") - - if tblspc_exist.rstrip() == 't': - self.assertEqual( - 1, 0, - "Expecting Error because " - "tablespace 'somedata' should not be present") - - result_new = node.safe_psql("postgres", "select * from t_heap") - self.assertEqual(result, result_new) - - if self.paranoia: - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.skip("skip") - def test_ptrack_alter_tablespace(self): - """ - Make node, create table, alter table tablespace, take ptrack backup, - move table from tablespace, take ptrack backup - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums'], - pg_options={ - 'checkpoint_timeout': '30s'}) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") - - self.create_tblspace_in_node(node, 'somedata') - tblspc_path = self.get_tblspace_path(node, 'somedata') - - # CREATE TABLE - node.safe_psql( - "postgres", - "create table t_heap as select i as id, md5(i::text) as text, " - "md5(i::text)::tsvector as tsvector from generate_series(0,100) i") - - result = node.safe_psql("postgres", "select * from t_heap") - # FULL BACKUP - self.backup_node(backup_dir, 'node', node, options=["--stream"]) - - # Move table to separate tablespace - node.safe_psql( - "postgres", - "alter table t_heap set tablespace somedata") - # GET LOGICAL CONTENT FROM NODE - result = node.safe_psql("postgres", "select * from t_heap") - - # FIRTS PTRACK BACKUP - self.backup_node( - backup_dir, 'node', node, backup_type='ptrack', - options=["--stream"]) - - # GET PHYSICAL CONTENT FROM NODE - if self.paranoia: - pgdata = self.pgdata_content(node.data_dir) - - # Restore ptrack backup - restored_node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'restored_node')) - restored_node.cleanup() - tblspc_path_new = self.get_tblspace_path( - restored_node, 'somedata_restored') - self.restore_node(backup_dir, 'node', restored_node, options=[ - "-j", "4", "-T", "{0}={1}".format(tblspc_path, tblspc_path_new)]) - - # GET PHYSICAL CONTENT FROM RESTORED NODE and COMPARE PHYSICAL CONTENT - if self.paranoia: - pgdata_restored = self.pgdata_content( - restored_node.data_dir, ignore_ptrack=False) - self.compare_pgdata(pgdata, pgdata_restored) - - # START RESTORED NODE - self.set_auto_conf( - restored_node, {'port': restored_node.port}) - restored_node.slow_start() - - # COMPARE LOGICAL CONTENT - result_new = restored_node.safe_psql( - "postgres", "select * from t_heap") - self.assertEqual(result, result_new) - - restored_node.cleanup() - shutil.rmtree(tblspc_path_new, ignore_errors=True) - - # Move table to default tablespace - node.safe_psql( - "postgres", "alter table t_heap set tablespace pg_default") - # SECOND PTRACK BACKUP - self.backup_node( - backup_dir, 'node', node, backup_type='ptrack', - options=["--stream"]) - - if self.paranoia: - pgdata = self.pgdata_content(node.data_dir) - - # Restore second ptrack backup and check table consistency - self.restore_node( - backup_dir, 'node', restored_node, - options=[ - "-j", "4", "-T", "{0}={1}".format(tblspc_path, tblspc_path_new)]) - - # GET PHYSICAL CONTENT FROM RESTORED NODE and COMPARE PHYSICAL CONTENT - if self.paranoia: - pgdata_restored = self.pgdata_content( - restored_node.data_dir, ignore_ptrack=False) - self.compare_pgdata(pgdata, pgdata_restored) - - # START RESTORED NODE - self.set_auto_conf( - restored_node, {'port': restored_node.port}) - restored_node.slow_start() - - result_new = restored_node.safe_psql( - "postgres", "select * from t_heap") - self.assertEqual(result, result_new) - - # @unittest.skip("skip") - def test_ptrack_multiple_segments(self): - """ - Make node, create table, alter table tablespace, - take ptrack backup, move table from tablespace, take ptrack backup - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums'], - pg_options={ - 'full_page_writes': 'off'}) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") - - self.create_tblspace_in_node(node, 'somedata') - - # CREATE TABLE - node.pgbench_init(scale=100, options=['--tablespace=somedata']) - # FULL BACKUP - self.backup_node(backup_dir, 'node', node, options=['--stream']) - - # PTRACK STUFF - if node.major_version < 11: - idx_ptrack = {'type': 'heap'} - idx_ptrack['path'] = self.get_fork_path(node, 'pgbench_accounts') - idx_ptrack['old_size'] = self.get_fork_size(node, 'pgbench_accounts') - idx_ptrack['old_pages'] = self.get_md5_per_page_for_fork( - idx_ptrack['path'], idx_ptrack['old_size']) - - pgbench = node.pgbench( - options=['-T', '30', '-c', '1', '--no-vacuum']) - pgbench.wait() - - node.safe_psql("postgres", "checkpoint") - - if node.major_version < 11: - idx_ptrack['new_size'] = self.get_fork_size( - node, - 'pgbench_accounts') - - idx_ptrack['new_pages'] = self.get_md5_per_page_for_fork( - idx_ptrack['path'], - idx_ptrack['new_size']) - - idx_ptrack['ptrack'] = self.get_ptrack_bits_per_page_for_fork( - node, - idx_ptrack['path']) - - if not self.check_ptrack_sanity(idx_ptrack): - self.assertTrue( - False, 'Ptrack has failed to register changes in data files') - - # GET LOGICAL CONTENT FROM NODE - # it`s stupid, because hint`s are ignored by ptrack - result = node.safe_psql("postgres", "select * from pgbench_accounts") - # FIRTS PTRACK BACKUP - self.backup_node( - backup_dir, 'node', node, backup_type='ptrack', options=['--stream']) - - # GET PHYSICAL CONTENT FROM NODE - pgdata = self.pgdata_content(node.data_dir) - - # RESTORE NODE - restored_node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'restored_node')) - restored_node.cleanup() - tblspc_path = self.get_tblspace_path(node, 'somedata') - tblspc_path_new = self.get_tblspace_path( - restored_node, - 'somedata_restored') - - self.restore_node( - backup_dir, 'node', restored_node, - options=[ - "-j", "4", "-T", "{0}={1}".format( - tblspc_path, tblspc_path_new)]) - - # GET PHYSICAL CONTENT FROM NODE_RESTORED - if self.paranoia: - pgdata_restored = self.pgdata_content( - restored_node.data_dir, ignore_ptrack=False) - - # START RESTORED NODE - self.set_auto_conf( - restored_node, {'port': restored_node.port}) - restored_node.slow_start() - - result_new = restored_node.safe_psql( - "postgres", - "select * from pgbench_accounts") - - # COMPARE RESTORED FILES - self.assertEqual(result, result_new, 'data is lost') - - if self.paranoia: - self.compare_pgdata(pgdata, pgdata_restored) - - @unittest.skip("skip") - def test_atexit_fail(self): - """ - Take backups of every available types and check that PTRACK is clean. - Relevant only for PTRACK 1.x - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums'], - pg_options={ - 'max_connections': '15'}) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - # Take FULL backup to clean every ptrack - self.backup_node( - backup_dir, 'node', node, options=['--stream']) - - try: - self.backup_node( - backup_dir, 'node', node, backup_type='ptrack', - options=["--stream", "-j 30"]) - - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because we are opening too many connections" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd) - ) - except ProbackupException as e: - self.assertIn( - 'setting its status to ERROR', - e.message, - '\n Unexpected Error Message: {0}\n' - ' CMD: {1}'.format(repr(e.message), self.cmd) - ) - - self.assertEqual( - node.safe_psql( - "postgres", - "select * from pg_is_in_backup()").rstrip(), - "f") - - @unittest.skip("skip") - # @unittest.expectedFailure - def test_ptrack_clean(self): - """ - Take backups of every available types and check that PTRACK is clean - Relevant only for PTRACK 1.x - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - self.create_tblspace_in_node(node, 'somedata') - - # Create table and indexes - node.safe_psql( - "postgres", - "create extension bloom; create sequence t_seq; " - "create table t_heap tablespace somedata " - "as select i as id, nextval('t_seq') as t_seq, " - "md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,2560) i") - for i in idx_ptrack: - if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq': - node.safe_psql( - "postgres", - "create index {0} on {1} using {2}({3}) " - "tablespace somedata".format( - i, idx_ptrack[i]['relation'], - idx_ptrack[i]['type'], - idx_ptrack[i]['column'])) - - # Take FULL backup to clean every ptrack - self.backup_node( - backup_dir, 'node', node, - options=['-j10', '--stream']) - node.safe_psql('postgres', 'checkpoint') - - for i in idx_ptrack: - # get fork size and calculate it in pages - idx_ptrack[i]['size'] = self.get_fork_size(node, i) - # get path to heap and index files - idx_ptrack[i]['path'] = self.get_fork_path(node, i) - # get ptrack for every idx - idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork( - node, idx_ptrack[i]['path'], [idx_ptrack[i]['size']]) - self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size']) - - # Update everything and vacuum it - node.safe_psql( - 'postgres', - "update t_heap set t_seq = nextval('t_seq'), " - "text = md5(text), " - "tsvector = md5(repeat(tsvector::text, 10))::tsvector;") - node.safe_psql('postgres', 'vacuum t_heap') - - # Take PTRACK backup to clean every ptrack - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type='ptrack', options=['-j10', '--stream']) - - node.safe_psql('postgres', 'checkpoint') - - for i in idx_ptrack: - # get new size of heap and indexes and calculate it in pages - idx_ptrack[i]['size'] = self.get_fork_size(node, i) - # update path to heap and index files in case they`ve changed - idx_ptrack[i]['path'] = self.get_fork_path(node, i) - # # get ptrack for every idx - idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork( - node, idx_ptrack[i]['path'], [idx_ptrack[i]['size']]) - # check that ptrack bits are cleaned - self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size']) - - # Update everything and vacuum it - node.safe_psql( - 'postgres', - "update t_heap set t_seq = nextval('t_seq'), " - "text = md5(text), " - "tsvector = md5(repeat(tsvector::text, 10))::tsvector;") - node.safe_psql('postgres', 'vacuum t_heap') - - # Take PAGE backup to clean every ptrack - self.backup_node( - backup_dir, 'node', node, - backup_type='page', options=['-j10', '--stream']) - node.safe_psql('postgres', 'checkpoint') - - for i in idx_ptrack: - # get new size of heap and indexes and calculate it in pages - idx_ptrack[i]['size'] = self.get_fork_size(node, i) - # update path to heap and index files in case they`ve changed - idx_ptrack[i]['path'] = self.get_fork_path(node, i) - # # get ptrack for every idx - idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork( - node, idx_ptrack[i]['path'], [idx_ptrack[i]['size']]) - # check that ptrack bits are cleaned - self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size']) - - @unittest.skip("skip") - def test_ptrack_clean_replica(self): - """ - Take backups of every available types from - master and check that PTRACK on replica is clean. - Relevant only for PTRACK 1.x - """ - master = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'master'), - set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums'], - pg_options={ - 'archive_timeout': '30s'}) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'master', master) - master.slow_start() - - self.backup_node(backup_dir, 'master', master, options=['--stream']) - - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) - replica.cleanup() - - self.restore_node(backup_dir, 'master', replica) - - self.add_instance(backup_dir, 'replica', replica) - self.set_replica(master, replica, synchronous=True) - replica.slow_start(replica=True) - - # Create table and indexes - master.safe_psql( - "postgres", - "create extension bloom; create sequence t_seq; " - "create table t_heap as select i as id, " - "nextval('t_seq') as t_seq, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,2560) i") - for i in idx_ptrack: - if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq': - master.safe_psql( - "postgres", - "create index {0} on {1} using {2}({3})".format( - i, idx_ptrack[i]['relation'], - idx_ptrack[i]['type'], - idx_ptrack[i]['column'])) - - # Take FULL backup to clean every ptrack - self.backup_node( - backup_dir, - 'replica', - replica, - options=[ - '-j10', '--stream', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port)]) - master.safe_psql('postgres', 'checkpoint') - - for i in idx_ptrack: - # get fork size and calculate it in pages - idx_ptrack[i]['size'] = self.get_fork_size(replica, i) - # get path to heap and index files - idx_ptrack[i]['path'] = self.get_fork_path(replica, i) - # get ptrack for every idx - idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork( - replica, idx_ptrack[i]['path'], [idx_ptrack[i]['size']]) - self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size']) - - # Update everything and vacuum it - master.safe_psql( - 'postgres', - "update t_heap set t_seq = nextval('t_seq'), " - "text = md5(text), " - "tsvector = md5(repeat(tsvector::text, 10))::tsvector;") - master.safe_psql('postgres', 'vacuum t_heap') - - # Take PTRACK backup to clean every ptrack - backup_id = self.backup_node( - backup_dir, - 'replica', - replica, - backup_type='ptrack', - options=[ - '-j10', '--stream', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port)]) - master.safe_psql('postgres', 'checkpoint') - - for i in idx_ptrack: - # get new size of heap and indexes and calculate it in pages - idx_ptrack[i]['size'] = self.get_fork_size(replica, i) - # update path to heap and index files in case they`ve changed - idx_ptrack[i]['path'] = self.get_fork_path(replica, i) - # # get ptrack for every idx - idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork( - replica, idx_ptrack[i]['path'], [idx_ptrack[i]['size']]) - # check that ptrack bits are cleaned - self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size']) - - # Update everything and vacuum it - master.safe_psql( - 'postgres', - "update t_heap set t_seq = nextval('t_seq'), text = md5(text), " - "tsvector = md5(repeat(tsvector::text, 10))::tsvector;") - master.safe_psql('postgres', 'vacuum t_heap') - master.safe_psql('postgres', 'checkpoint') - - # Take PAGE backup to clean every ptrack - self.backup_node( - backup_dir, - 'replica', - replica, - backup_type='page', - options=[ - '-j10', '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port), - '--stream']) - master.safe_psql('postgres', 'checkpoint') - - for i in idx_ptrack: - # get new size of heap and indexes and calculate it in pages - idx_ptrack[i]['size'] = self.get_fork_size(replica, i) - # update path to heap and index files in case they`ve changed - idx_ptrack[i]['path'] = self.get_fork_path(replica, i) - # # get ptrack for every idx - idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork( - replica, idx_ptrack[i]['path'], [idx_ptrack[i]['size']]) - # check that ptrack bits are cleaned - self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size']) - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_ptrack_cluster_on_btree(self): - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") - - self.create_tblspace_in_node(node, 'somedata') - - # Create table and indexes - node.safe_psql( - "postgres", - "create extension bloom; create sequence t_seq; " - "create table t_heap tablespace somedata " - "as select i as id, nextval('t_seq') as t_seq, " - "md5(i::text) as text, md5(repeat(i::text,10))::tsvector " - "as tsvector from generate_series(0,2560) i") - for i in idx_ptrack: - if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq': - node.safe_psql( - "postgres", - "create index {0} on {1} using {2}({3}) " - "tablespace somedata".format( - i, idx_ptrack[i]['relation'], - idx_ptrack[i]['type'], idx_ptrack[i]['column'])) - - node.safe_psql('postgres', 'vacuum t_heap') - node.safe_psql('postgres', 'checkpoint') - - if node.major_version < 11: - for i in idx_ptrack: - # get size of heap and indexes. size calculated in pages - idx_ptrack[i]['old_size'] = self.get_fork_size(node, i) - # get path to heap and index files - idx_ptrack[i]['path'] = self.get_fork_path(node, i) - # calculate md5sums of pages - idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork( - idx_ptrack[i]['path'], idx_ptrack[i]['old_size']) - - self.backup_node( - backup_dir, 'node', node, options=['-j10', '--stream']) - - node.safe_psql('postgres', 'delete from t_heap where id%2 = 1') - node.safe_psql('postgres', 'cluster t_heap using t_btree') - node.safe_psql('postgres', 'checkpoint') - - # CHECK PTRACK SANITY - if node.major_version < 11: - self.check_ptrack_map_sanity(node, idx_ptrack) - - # @unittest.skip("skip") - def test_ptrack_cluster_on_gist(self): - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") - - # Create table and indexes - node.safe_psql( - "postgres", - "create extension bloom; create sequence t_seq; " - "create table t_heap as select i as id, " - "nextval('t_seq') as t_seq, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,2560) i") - for i in idx_ptrack: - if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq': - node.safe_psql( - "postgres", - "create index {0} on {1} using {2}({3})".format( - i, idx_ptrack[i]['relation'], - idx_ptrack[i]['type'], idx_ptrack[i]['column'])) - - node.safe_psql('postgres', 'vacuum t_heap') - node.safe_psql('postgres', 'checkpoint') - - for i in idx_ptrack: - # get size of heap and indexes. size calculated in pages - idx_ptrack[i]['old_size'] = self.get_fork_size(node, i) - # get path to heap and index files - idx_ptrack[i]['path'] = self.get_fork_path(node, i) - # calculate md5sums of pages - idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork( - idx_ptrack[i]['path'], idx_ptrack[i]['old_size']) - - self.backup_node( - backup_dir, 'node', node, options=['-j10', '--stream']) - - node.safe_psql('postgres', 'delete from t_heap where id%2 = 1') - node.safe_psql('postgres', 'cluster t_heap using t_gist') - node.safe_psql('postgres', 'checkpoint') - - # CHECK PTRACK SANITY - if node.major_version < 11: - self.check_ptrack_map_sanity(node, idx_ptrack) - - self.backup_node( - backup_dir, 'node', node, - backup_type='ptrack', options=['-j10', '--stream']) - - pgdata = self.pgdata_content(node.data_dir) - node.cleanup() - - self.restore_node(backup_dir, 'node', node) - - pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.skip("skip") - def test_ptrack_cluster_on_btree_replica(self): - master = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'master'), - set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'master', master) - master.slow_start() - - if master.major_version >= 11: - master.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") - - self.backup_node(backup_dir, 'master', master, options=['--stream']) - - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) - replica.cleanup() - - self.restore_node(backup_dir, 'master', replica) - - self.add_instance(backup_dir, 'replica', replica) - self.set_replica(master, replica, synchronous=True) - replica.slow_start(replica=True) - - # Create table and indexes - master.safe_psql( - "postgres", - "create extension bloom; create sequence t_seq; " - "create table t_heap as select i as id, " - "nextval('t_seq') as t_seq, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,2560) i") - - for i in idx_ptrack: - if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq': - master.safe_psql( - "postgres", - "create index {0} on {1} using {2}({3})".format( - i, idx_ptrack[i]['relation'], - idx_ptrack[i]['type'], - idx_ptrack[i]['column'])) - - master.safe_psql('postgres', 'vacuum t_heap') - master.safe_psql('postgres', 'checkpoint') - - self.backup_node( - backup_dir, 'replica', replica, options=[ - '-j10', '--stream', '--master-host=localhost', - '--master-db=postgres', '--master-port={0}'.format( - master.port)]) - - for i in idx_ptrack: - # get size of heap and indexes. size calculated in pages - idx_ptrack[i]['old_size'] = self.get_fork_size(replica, i) - # get path to heap and index files - idx_ptrack[i]['path'] = self.get_fork_path(replica, i) - # calculate md5sums of pages - idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork( - idx_ptrack[i]['path'], idx_ptrack[i]['old_size']) - - master.safe_psql('postgres', 'delete from t_heap where id%2 = 1') - master.safe_psql('postgres', 'cluster t_heap using t_btree') - master.safe_psql('postgres', 'checkpoint') - - # Sync master and replica - self.wait_until_replica_catch_with_master(master, replica) - replica.safe_psql('postgres', 'checkpoint') - - # CHECK PTRACK SANITY - if master.major_version < 11: - self.check_ptrack_map_sanity(replica, idx_ptrack) - - self.backup_node( - backup_dir, 'replica', replica, - backup_type='ptrack', options=['-j10', '--stream']) - - pgdata = self.pgdata_content(replica.data_dir) - - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node')) - node.cleanup() - - self.restore_node(backup_dir, 'replica', node) - - pgdata_restored = self.pgdata_content(replica.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.skip("skip") - def test_ptrack_cluster_on_gist_replica(self): - master = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'master'), - set_replication=True, - ptrack_enable=True) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'master', master) - master.slow_start() - - if master.major_version >= 11: - master.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") - - self.backup_node(backup_dir, 'master', master, options=['--stream']) - - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) - replica.cleanup() - - self.restore_node(backup_dir, 'master', replica) - - self.add_instance(backup_dir, 'replica', replica) - self.set_replica(master, replica, 'replica', synchronous=True) - replica.slow_start(replica=True) - - # Create table and indexes - master.safe_psql( - "postgres", - "create extension bloom; create sequence t_seq; " - "create table t_heap as select i as id, " - "nextval('t_seq') as t_seq, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,2560) i") - - for i in idx_ptrack: - if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq': - master.safe_psql( - "postgres", - "create index {0} on {1} using {2}({3})".format( - i, idx_ptrack[i]['relation'], - idx_ptrack[i]['type'], - idx_ptrack[i]['column'])) - - master.safe_psql('postgres', 'vacuum t_heap') - master.safe_psql('postgres', 'checkpoint') - - # Sync master and replica - self.wait_until_replica_catch_with_master(master, replica) - replica.safe_psql('postgres', 'checkpoint') - - self.backup_node( - backup_dir, 'replica', replica, options=[ - '-j10', '--stream', '--master-host=localhost', - '--master-db=postgres', '--master-port={0}'.format( - master.port)]) - - for i in idx_ptrack: - # get size of heap and indexes. size calculated in pages - idx_ptrack[i]['old_size'] = self.get_fork_size(replica, i) - # get path to heap and index files - idx_ptrack[i]['path'] = self.get_fork_path(replica, i) - # calculate md5sums of pages - idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork( - idx_ptrack[i]['path'], idx_ptrack[i]['old_size']) - - master.safe_psql('postgres', 'DELETE FROM t_heap WHERE id%2 = 1') - master.safe_psql('postgres', 'CLUSTER t_heap USING t_gist') - - if master.major_version < 11: - master.safe_psql('postgres', 'CHECKPOINT') - - # Sync master and replica - self.wait_until_replica_catch_with_master(master, replica) - - if master.major_version < 11: - replica.safe_psql('postgres', 'CHECKPOINT') - self.check_ptrack_map_sanity(replica, idx_ptrack) - - self.backup_node( - backup_dir, 'replica', replica, - backup_type='ptrack', options=['-j10', '--stream']) - - if self.paranoia: - pgdata = self.pgdata_content(replica.data_dir) - - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node')) - node.cleanup() - - self.restore_node(backup_dir, 'replica', node) - - if self.paranoia: - pgdata_restored = self.pgdata_content(replica.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_ptrack_empty(self): - """Take backups of every available types and check that PTRACK is clean""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") - - self.create_tblspace_in_node(node, 'somedata') - - # Create table - node.safe_psql( - "postgres", - "create extension bloom; create sequence t_seq; " - "create table t_heap " - "(id int DEFAULT nextval('t_seq'), text text, tsvector tsvector) " - "tablespace somedata") - - # Take FULL backup to clean every ptrack - self.backup_node( - backup_dir, 'node', node, - options=['-j10', '--stream']) - - # Create indexes - for i in idx_ptrack: - if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq': - node.safe_psql( - "postgres", - "create index {0} on {1} using {2}({3}) " - "tablespace somedata".format( - i, idx_ptrack[i]['relation'], - idx_ptrack[i]['type'], - idx_ptrack[i]['column'])) - - node.safe_psql('postgres', 'checkpoint') - - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - node_restored.cleanup() - - tblspace1 = self.get_tblspace_path(node, 'somedata') - tblspace2 = self.get_tblspace_path(node_restored, 'somedata') - - # Take PTRACK backup - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type='ptrack', - options=['-j10', '--stream']) - - if self.paranoia: - pgdata = self.pgdata_content(node.data_dir) - - self.restore_node( - backup_dir, 'node', node_restored, - backup_id=backup_id, - options=[ - "-j", "4", - "-T{0}={1}".format(tblspace1, tblspace2)]) - - if self.paranoia: - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_ptrack_empty_replica(self): - """ - Take backups of every available types from master - and check that PTRACK on replica is clean - """ - master = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'master'), - set_replication=True, - initdb_params=['--data-checksums'], - ptrack_enable=True) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'master', master) - master.slow_start() - - if master.major_version >= 11: - master.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") - - self.backup_node(backup_dir, 'master', master, options=['--stream']) - - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) - replica.cleanup() - - self.restore_node(backup_dir, 'master', replica) - - self.add_instance(backup_dir, 'replica', replica) - self.set_replica(master, replica, synchronous=True) - replica.slow_start(replica=True) - - # Create table - master.safe_psql( - "postgres", - "create extension bloom; create sequence t_seq; " - "create table t_heap " - "(id int DEFAULT nextval('t_seq'), text text, tsvector tsvector)") - self.wait_until_replica_catch_with_master(master, replica) - - # Take FULL backup - self.backup_node( - backup_dir, - 'replica', - replica, - options=[ - '-j10', '--stream', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port)]) - - # Create indexes - for i in idx_ptrack: - if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq': - master.safe_psql( - "postgres", - "create index {0} on {1} using {2}({3})".format( - i, idx_ptrack[i]['relation'], - idx_ptrack[i]['type'], - idx_ptrack[i]['column'])) - - self.wait_until_replica_catch_with_master(master, replica) - - # Take PTRACK backup - backup_id = self.backup_node( - backup_dir, - 'replica', - replica, - backup_type='ptrack', - options=[ - '-j1', '--stream', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port)]) - - if self.paranoia: - pgdata = self.pgdata_content(replica.data_dir) - - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - node_restored.cleanup() - - self.restore_node( - backup_dir, 'replica', node_restored, - backup_id=backup_id, options=["-j", "4"]) - - if self.paranoia: - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_ptrack_truncate(self): - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") - - self.create_tblspace_in_node(node, 'somedata') - - # Create table and indexes - node.safe_psql( - "postgres", - "create extension bloom; create sequence t_seq; " - "create table t_heap tablespace somedata " - "as select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,2560) i") - - if node.major_version < 11: - for i in idx_ptrack: - if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq': - node.safe_psql( - "postgres", - "create index {0} on {1} using {2}({3}) " - "tablespace somedata".format( - i, idx_ptrack[i]['relation'], - idx_ptrack[i]['type'], idx_ptrack[i]['column'])) - - self.backup_node( - backup_dir, 'node', node, options=['--stream']) - - node.safe_psql('postgres', 'truncate t_heap') - node.safe_psql('postgres', 'checkpoint') - - if node.major_version < 11: - for i in idx_ptrack: - # get fork size and calculate it in pages - idx_ptrack[i]['old_size'] = self.get_fork_size(node, i) - # get path to heap and index files - idx_ptrack[i]['path'] = self.get_fork_path(node, i) - # calculate md5sums for every page of this fork - idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork( - idx_ptrack[i]['path'], idx_ptrack[i]['old_size']) - - # Make backup to clean every ptrack - self.backup_node( - backup_dir, 'node', node, - backup_type='ptrack', options=['-j10', '--stream']) - - pgdata = self.pgdata_content(node.data_dir) - - if node.major_version < 11: - for i in idx_ptrack: - idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork( - node, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size']]) - self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['old_size']) - - node.cleanup() - shutil.rmtree( - self.get_tblspace_path(node, 'somedata'), - ignore_errors=True) - - self.restore_node(backup_dir, 'node', node) - - pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.skip("skip") - def test_basic_ptrack_truncate_replica(self): - master = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'master'), - set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums'], - pg_options={ - 'max_wal_size': '32MB', - 'archive_timeout': '10s', - 'checkpoint_timeout': '5min'}) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'master', master) - master.slow_start() - - if master.major_version >= 11: - master.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") - - self.backup_node(backup_dir, 'master', master, options=['--stream']) - - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) - replica.cleanup() - - self.restore_node(backup_dir, 'master', replica) - - self.add_instance(backup_dir, 'replica', replica) - self.set_replica(master, replica, 'replica', synchronous=True) - replica.slow_start(replica=True) - - # Create table and indexes - master.safe_psql( - "postgres", - "create extension bloom; create sequence t_seq; " - "create table t_heap " - "as select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,2560) i") - - for i in idx_ptrack: - if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq': - master.safe_psql( - "postgres", - "create index {0} on {1} using {2}({3}) ".format( - i, idx_ptrack[i]['relation'], - idx_ptrack[i]['type'], idx_ptrack[i]['column'])) - - # Sync master and replica - self.wait_until_replica_catch_with_master(master, replica) - replica.safe_psql('postgres', 'checkpoint') - - if replica.major_version < 11: - for i in idx_ptrack: - # get fork size and calculate it in pages - idx_ptrack[i]['old_size'] = self.get_fork_size(replica, i) - # get path to heap and index files - idx_ptrack[i]['path'] = self.get_fork_path(replica, i) - # calculate md5sums for every page of this fork - idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork( - idx_ptrack[i]['path'], idx_ptrack[i]['old_size']) - - # Make backup to clean every ptrack - self.backup_node( - backup_dir, 'replica', replica, - options=[ - '-j10', - '--stream', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port)]) - - if replica.major_version < 11: - for i in idx_ptrack: - idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork( - replica, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size']]) - self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['old_size']) - - master.safe_psql('postgres', 'truncate t_heap') - - # Sync master and replica - self.wait_until_replica_catch_with_master(master, replica) - - if replica.major_version < 10: - replica.safe_psql( - "postgres", - "select pg_xlog_replay_pause()") - else: - replica.safe_psql( - "postgres", - "select pg_wal_replay_pause()") - - self.backup_node( - backup_dir, 'replica', replica, backup_type='ptrack', - options=[ - '-j10', - '--stream', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port)]) - - pgdata = self.pgdata_content(replica.data_dir) - - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node')) - node.cleanup() - - self.restore_node(backup_dir, 'replica', node, data_dir=node.data_dir) - - pgdata_restored = self.pgdata_content(node.data_dir) - - if self.paranoia: - self.compare_pgdata(pgdata, pgdata_restored) - - self.set_auto_conf(node, {'port': node.port}) - - node.slow_start() - - node.safe_psql( - 'postgres', - 'select 1') - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_ptrack_vacuum(self): - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") - - self.create_tblspace_in_node(node, 'somedata') - - # Create table and indexes - node.safe_psql( - "postgres", - "create extension bloom; create sequence t_seq; " - "create table t_heap tablespace somedata " - "as select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,2560) i") - for i in idx_ptrack: - if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq': - node.safe_psql( - "postgres", - "create index {0} on {1} using {2}({3}) " - "tablespace somedata".format( - i, idx_ptrack[i]['relation'], - idx_ptrack[i]['type'], - idx_ptrack[i]['column'])) - - comparision_exclusion = self.get_known_bugs_comparision_exclusion_dict(node) - - node.safe_psql('postgres', 'vacuum t_heap') - node.safe_psql('postgres', 'checkpoint') - - # Make full backup to clean every ptrack - self.backup_node( - backup_dir, 'node', node, options=['-j10', '--stream']) - - if node.major_version < 11: - for i in idx_ptrack: - # get fork size and calculate it in pages - idx_ptrack[i]['old_size'] = self.get_fork_size(node, i) - # get path to heap and index files - idx_ptrack[i]['path'] = self.get_fork_path(node, i) - # calculate md5sums for every page of this fork - idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork( - idx_ptrack[i]['path'], idx_ptrack[i]['old_size']) - idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork( - node, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size']]) - self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['old_size']) - - # Delete some rows, vacuum it and make checkpoint - node.safe_psql('postgres', 'delete from t_heap where id%2 = 1') - node.safe_psql('postgres', 'vacuum t_heap') - node.safe_psql('postgres', 'checkpoint') - - # CHECK PTRACK SANITY - if node.major_version < 11: - self.check_ptrack_map_sanity(node, idx_ptrack) - - self.backup_node( - backup_dir, 'node', node, - backup_type='ptrack', options=['-j10', '--stream']) - - pgdata = self.pgdata_content(node.data_dir) - node.cleanup() - - shutil.rmtree( - self.get_tblspace_path(node, 'somedata'), - ignore_errors=True) - - self.restore_node(backup_dir, 'node', node) - - pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(pgdata, pgdata_restored, comparision_exclusion) - - # @unittest.skip("skip") - def test_ptrack_vacuum_replica(self): - master = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'master'), - set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums'], - pg_options={ - 'checkpoint_timeout': '30'}) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'master', master) - master.slow_start() - - if master.major_version >= 11: - master.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") - - self.backup_node(backup_dir, 'master', master, options=['--stream']) - - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) - replica.cleanup() - - self.restore_node(backup_dir, 'master', replica) - - self.add_instance(backup_dir, 'replica', replica) - self.set_replica(master, replica, 'replica', synchronous=True) - replica.slow_start(replica=True) - - # Create table and indexes - master.safe_psql( - "postgres", - "create extension bloom; create sequence t_seq; " - "create table t_heap as select i as id, " - "md5(i::text) as text, md5(repeat(i::text,10))::tsvector " - "as tsvector from generate_series(0,2560) i") - - for i in idx_ptrack: - if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq': - master.safe_psql( - "postgres", - "create index {0} on {1} using {2}({3})".format( - i, idx_ptrack[i]['relation'], - idx_ptrack[i]['type'], idx_ptrack[i]['column'])) - - master.safe_psql('postgres', 'vacuum t_heap') - master.safe_psql('postgres', 'checkpoint') - - # Sync master and replica - self.wait_until_replica_catch_with_master(master, replica) - replica.safe_psql('postgres', 'checkpoint') - - # Make FULL backup to clean every ptrack - self.backup_node( - backup_dir, 'replica', replica, options=[ - '-j10', '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port), - '--stream']) - - if replica.major_version < 11: - for i in idx_ptrack: - # get fork size and calculate it in pages - idx_ptrack[i]['old_size'] = self.get_fork_size(replica, i) - # get path to heap and index files - idx_ptrack[i]['path'] = self.get_fork_path(replica, i) - # calculate md5sums for every page of this fork - idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork( - idx_ptrack[i]['path'], idx_ptrack[i]['old_size']) - idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork( - replica, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size']]) - self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['old_size']) - - # Delete some rows, vacuum it and make checkpoint - master.safe_psql('postgres', 'delete from t_heap where id%2 = 1') - master.safe_psql('postgres', 'vacuum t_heap') - master.safe_psql('postgres', 'checkpoint') - - # Sync master and replica - self.wait_until_replica_catch_with_master(master, replica) - replica.safe_psql('postgres', 'checkpoint') - - # CHECK PTRACK SANITY - if replica.major_version < 11: - self.check_ptrack_map_sanity(master, idx_ptrack) - - self.backup_node( - backup_dir, 'replica', replica, - backup_type='ptrack', options=['-j10', '--stream']) - - pgdata = self.pgdata_content(replica.data_dir) - - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node')) - node.cleanup() - - self.restore_node(backup_dir, 'replica', node, data_dir=node.data_dir) - - pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_ptrack_vacuum_bits_frozen(self): - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") - - self.create_tblspace_in_node(node, 'somedata') - - # Create table and indexes - res = node.safe_psql( - "postgres", - "create extension bloom; create sequence t_seq; " - "create table t_heap tablespace somedata " - "as select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,2560) i") - for i in idx_ptrack: - if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq': - node.safe_psql( - "postgres", - "create index {0} on {1} using {2}({3}) " - "tablespace somedata".format( - i, idx_ptrack[i]['relation'], - idx_ptrack[i]['type'], - idx_ptrack[i]['column'])) - - comparision_exclusion = self.get_known_bugs_comparision_exclusion_dict(node) - node.safe_psql('postgres', 'checkpoint') - - self.backup_node( - backup_dir, 'node', node, options=['-j10', '--stream']) - - node.safe_psql('postgres', 'vacuum freeze t_heap') - node.safe_psql('postgres', 'checkpoint') - - if node.major_version < 11: - for i in idx_ptrack: - # get size of heap and indexes. size calculated in pages - idx_ptrack[i]['old_size'] = self.get_fork_size(node, i) - # get path to heap and index files - idx_ptrack[i]['path'] = self.get_fork_path(node, i) - # calculate md5sums of pages - idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork( - idx_ptrack[i]['path'], idx_ptrack[i]['old_size']) - - # CHECK PTRACK SANITY - if node.major_version < 11: - self.check_ptrack_map_sanity(node, idx_ptrack) - - self.backup_node( - backup_dir, 'node', node, - backup_type='ptrack', options=['-j10', '--stream']) - - pgdata = self.pgdata_content(node.data_dir) - node.cleanup() - shutil.rmtree( - self.get_tblspace_path(node, 'somedata'), - ignore_errors=True) - - self.restore_node(backup_dir, 'node', node) - - pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(pgdata, pgdata_restored, comparision_exclusion) - - # @unittest.skip("skip") - def test_ptrack_vacuum_bits_frozen_replica(self): - master = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'master'), - set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'master', master) - master.slow_start() - - if master.major_version >= 11: - master.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") - - self.backup_node(backup_dir, 'master', master, options=['--stream']) - - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) - replica.cleanup() - - self.restore_node(backup_dir, 'master', replica) - - self.add_instance(backup_dir, 'replica', replica) - self.set_replica(master, replica, synchronous=True) - replica.slow_start(replica=True) - - # Create table and indexes - master.safe_psql( - "postgres", - "create extension bloom; create sequence t_seq; " - "create table t_heap as select i as id, " - "md5(i::text) as text, md5(repeat(i::text,10))::tsvector " - "as tsvector from generate_series(0,2560) i") - for i in idx_ptrack: - if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq': - master.safe_psql( - "postgres", - "create index {0} on {1} using {2}({3})".format( - i, idx_ptrack[i]['relation'], - idx_ptrack[i]['type'], - idx_ptrack[i]['column'])) - - master.safe_psql('postgres', 'checkpoint') - - # Sync master and replica - self.wait_until_replica_catch_with_master(master, replica) - replica.safe_psql('postgres', 'checkpoint') - - # Take backup to clean every ptrack - self.backup_node( - backup_dir, 'replica', replica, - options=[ - '-j10', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port), - '--stream']) - - if replica.major_version < 11: - for i in idx_ptrack: - # get size of heap and indexes. size calculated in pages - idx_ptrack[i]['old_size'] = self.get_fork_size(replica, i) - # get path to heap and index files - idx_ptrack[i]['path'] = self.get_fork_path(replica, i) - # calculate md5sums of pages - idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork( - idx_ptrack[i]['path'], idx_ptrack[i]['old_size']) - - master.safe_psql('postgres', 'vacuum freeze t_heap') - master.safe_psql('postgres', 'checkpoint') - - # Sync master and replica - self.wait_until_replica_catch_with_master(master, replica) - replica.safe_psql('postgres', 'checkpoint') - - # CHECK PTRACK SANITY - if replica.major_version < 11: - self.check_ptrack_map_sanity(master, idx_ptrack) - - self.backup_node( - backup_dir, 'replica', replica, backup_type='ptrack', - options=['-j10', '--stream']) - - pgdata = self.pgdata_content(replica.data_dir) - replica.cleanup() - - self.restore_node(backup_dir, 'replica', replica) - - pgdata_restored = self.pgdata_content(replica.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_ptrack_vacuum_bits_visibility(self): - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") - - self.create_tblspace_in_node(node, 'somedata') - - # Create table and indexes - res = node.safe_psql( - "postgres", - "create extension bloom; create sequence t_seq; " - "create table t_heap tablespace somedata " - "as select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,2560) i") - - for i in idx_ptrack: - if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq': - node.safe_psql( - "postgres", - "create index {0} on {1} using {2}({3}) " - "tablespace somedata".format( - i, idx_ptrack[i]['relation'], - idx_ptrack[i]['type'], idx_ptrack[i]['column'])) - - comparision_exclusion = self.get_known_bugs_comparision_exclusion_dict(node) - node.safe_psql('postgres', 'checkpoint') - - self.backup_node( - backup_dir, 'node', node, options=['-j10', '--stream']) - - if node.major_version < 11: - for i in idx_ptrack: - # get size of heap and indexes. size calculated in pages - idx_ptrack[i]['old_size'] = self.get_fork_size(node, i) - # get path to heap and index files - idx_ptrack[i]['path'] = self.get_fork_path(node, i) - # calculate md5sums of pages - idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork( - idx_ptrack[i]['path'], idx_ptrack[i]['old_size']) - - node.safe_psql('postgres', 'vacuum t_heap') - node.safe_psql('postgres', 'checkpoint') - - # CHECK PTRACK SANITY - if node.major_version < 11: - self.check_ptrack_map_sanity(node, idx_ptrack) - - self.backup_node( - backup_dir, 'node', node, - backup_type='ptrack', options=['-j10', '--stream']) - - pgdata = self.pgdata_content(node.data_dir) - node.cleanup() - shutil.rmtree( - self.get_tblspace_path(node, 'somedata'), - ignore_errors=True) - - self.restore_node(backup_dir, 'node', node) - - pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(pgdata, pgdata_restored, comparision_exclusion) - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_ptrack_vacuum_full_2(self): - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=True, - pg_options={ 'wal_log_hints': 'on' }) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") - - self.create_tblspace_in_node(node, 'somedata') - - # Create table and indexes - res = node.safe_psql( - "postgres", - "create extension bloom; create sequence t_seq; " - "create table t_heap tablespace somedata " - "as select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,2560) i") - for i in idx_ptrack: - if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq': - node.safe_psql( - "postgres", "create index {0} on {1} " - "using {2}({3}) tablespace somedata".format( - i, idx_ptrack[i]['relation'], - idx_ptrack[i]['type'], idx_ptrack[i]['column'])) - - node.safe_psql('postgres', 'vacuum t_heap') - node.safe_psql('postgres', 'checkpoint') - - self.backup_node( - backup_dir, 'node', node, options=['-j10', '--stream']) - - if node.major_version < 11: - for i in idx_ptrack: - # get size of heap and indexes. size calculated in pages - idx_ptrack[i]['old_size'] = self.get_fork_size(node, i) - # get path to heap and index files - idx_ptrack[i]['path'] = self.get_fork_path(node, i) - # calculate md5sums of pages - idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork( - idx_ptrack[i]['path'], idx_ptrack[i]['old_size']) - - node.safe_psql('postgres', 'delete from t_heap where id%2 = 1') - node.safe_psql('postgres', 'vacuum full t_heap') - node.safe_psql('postgres', 'checkpoint') - - if node.major_version < 11: - self.check_ptrack_map_sanity(node, idx_ptrack) - - self.backup_node( - backup_dir, 'node', node, - backup_type='ptrack', options=['-j10', '--stream']) - - pgdata = self.pgdata_content(node.data_dir) - node.cleanup() - - shutil.rmtree( - self.get_tblspace_path(node, 'somedata'), - ignore_errors=True) - - self.restore_node(backup_dir, 'node', node) - - pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_ptrack_vacuum_full_replica(self): - master = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'master'), - set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'master', master) - master.slow_start() - - if master.major_version >= 11: - master.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") - - self.backup_node(backup_dir, 'master', master, options=['--stream']) - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) - replica.cleanup() - - self.restore_node(backup_dir, 'master', replica) - - self.add_instance(backup_dir, 'replica', replica) - self.set_replica(master, replica, 'replica', synchronous=True) - replica.slow_start(replica=True) - - # Create table and indexes - master.safe_psql( - "postgres", - "create extension bloom; create sequence t_seq; " - "create table t_heap as select i as id, " - "md5(i::text) as text, md5(repeat(i::text,10))::tsvector as " - "tsvector from generate_series(0,256000) i") - - if master.major_version < 11: - for i in idx_ptrack: - if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq': - master.safe_psql( - "postgres", - "create index {0} on {1} using {2}({3})".format( - i, idx_ptrack[i]['relation'], - idx_ptrack[i]['type'], - idx_ptrack[i]['column'])) - - master.safe_psql('postgres', 'vacuum t_heap') - master.safe_psql('postgres', 'checkpoint') - - # Sync master and replica - self.wait_until_replica_catch_with_master(master, replica) - replica.safe_psql('postgres', 'checkpoint') - - # Take FULL backup to clean every ptrack - self.backup_node( - backup_dir, 'replica', replica, - options=[ - '-j10', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port), - '--stream']) - - if replica.major_version < 11: - for i in idx_ptrack: - # get size of heap and indexes. size calculated in pages - idx_ptrack[i]['old_size'] = self.get_fork_size(replica, i) - # get path to heap and index files - idx_ptrack[i]['path'] = self.get_fork_path(replica, i) - # calculate md5sums of pages - idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork( - idx_ptrack[i]['path'], idx_ptrack[i]['old_size']) - - master.safe_psql('postgres', 'delete from t_heap where id%2 = 1') - master.safe_psql('postgres', 'vacuum full t_heap') - master.safe_psql('postgres', 'checkpoint') - - # Sync master and replica - self.wait_until_replica_catch_with_master(master, replica) - replica.safe_psql('postgres', 'checkpoint') - - if replica.major_version < 11: - self.check_ptrack_map_sanity(master, idx_ptrack) - - self.backup_node( - backup_dir, 'replica', replica, - backup_type='ptrack', options=['-j10', '--stream']) - - pgdata = self.pgdata_content(replica.data_dir) - replica.cleanup() - - self.restore_node(backup_dir, 'replica', replica) - - pgdata_restored = self.pgdata_content(replica.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_ptrack_vacuum_truncate_2(self): - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") - - # Create table and indexes - res = node.safe_psql( - "postgres", - "create extension bloom; create sequence t_seq; " - "create table t_heap " - "as select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,2560) i") - - if node.major_version < 11: - for i in idx_ptrack: - if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq': - node.safe_psql( - "postgres", "create index {0} on {1} using {2}({3})".format( - i, idx_ptrack[i]['relation'], - idx_ptrack[i]['type'], idx_ptrack[i]['column'])) - - node.safe_psql('postgres', 'VACUUM t_heap') - - self.backup_node( - backup_dir, 'node', node, options=['-j10', '--stream']) - - if node.major_version < 11: - for i in idx_ptrack: - # get size of heap and indexes. size calculated in pages - idx_ptrack[i]['old_size'] = self.get_fork_size(node, i) - # get path to heap and index files - idx_ptrack[i]['path'] = self.get_fork_path(node, i) - # calculate md5sums of pages - idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork( - idx_ptrack[i]['path'], idx_ptrack[i]['old_size']) - - node.safe_psql('postgres', 'DELETE FROM t_heap WHERE id > 128') - node.safe_psql('postgres', 'VACUUM t_heap') - node.safe_psql('postgres', 'CHECKPOINT') - - # CHECK PTRACK SANITY - if node.major_version < 11: - self.check_ptrack_map_sanity(node, idx_ptrack) - - self.backup_node( - backup_dir, 'node', node, - backup_type='ptrack', options=['--stream']) - - pgdata = self.pgdata_content(node.data_dir) - - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - node_restored.cleanup() - - self.restore_node(backup_dir, 'node', node_restored) - - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_ptrack_vacuum_truncate_replica(self): - master = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'master'), - set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'master', master) - master.slow_start() - - if master.major_version >= 11: - master.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") - - self.backup_node(backup_dir, 'master', master, options=['--stream']) - - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) - replica.cleanup() - - self.restore_node(backup_dir, 'master', replica) - - self.add_instance(backup_dir, 'replica', replica) - self.set_replica(master, replica, 'replica', synchronous=True) - replica.slow_start(replica=True) - - # Create table and indexes - master.safe_psql( - "postgres", - "create extension bloom; create sequence t_seq; " - "create table t_heap as select i as id, " - "md5(i::text) as text, md5(repeat(i::text,10))::tsvector " - "as tsvector from generate_series(0,2560) i") - - for i in idx_ptrack: - if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq': - master.safe_psql( - "postgres", "create index {0} on {1} " - "using {2}({3})".format( - i, idx_ptrack[i]['relation'], - idx_ptrack[i]['type'], idx_ptrack[i]['column'])) - - master.safe_psql('postgres', 'vacuum t_heap') - master.safe_psql('postgres', 'checkpoint') - - # Take FULL backup to clean every ptrack - self.backup_node( - backup_dir, 'replica', replica, - options=[ - '-j10', - '--stream', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port) - ] - ) - - if master.major_version < 11: - for i in idx_ptrack: - # get size of heap and indexes. size calculated in pages - idx_ptrack[i]['old_size'] = self.get_fork_size(replica, i) - # get path to heap and index files - idx_ptrack[i]['path'] = self.get_fork_path(replica, i) - # calculate md5sums of pages - idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork( - idx_ptrack[i]['path'], idx_ptrack[i]['old_size']) - - master.safe_psql('postgres', 'DELETE FROM t_heap WHERE id > 128;') - master.safe_psql('postgres', 'VACUUM t_heap') - master.safe_psql('postgres', 'CHECKPOINT') - - # Sync master and replica - self.wait_until_replica_catch_with_master(master, replica) - replica.safe_psql('postgres', 'CHECKPOINT') - - # CHECK PTRACK SANITY - if master.major_version < 11: - self.check_ptrack_map_sanity(master, idx_ptrack) - - self.backup_node( - backup_dir, 'replica', replica, backup_type='ptrack', - options=[ - '--stream', - '--log-level-file=INFO', - '--archive-timeout=30']) - - pgdata = self.pgdata_content(replica.data_dir) - - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - node_restored.cleanup() - - self.restore_node(backup_dir, 'replica', node_restored) - - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - @unittest.skip("skip") - def test_ptrack_recovery(self): - """ - Check that ptrack map contain correct bits after recovery. - Actual only for PTRACK 1.x - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - self.create_tblspace_in_node(node, 'somedata') - - # Create table - node.safe_psql( - "postgres", - "create extension bloom; create sequence t_seq; " - "create table t_heap tablespace somedata " - "as select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,2560) i") - - # Create indexes - for i in idx_ptrack: - if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq': - node.safe_psql( - "postgres", "create index {0} on {1} using {2}({3}) " - "tablespace somedata".format( - i, idx_ptrack[i]['relation'], - idx_ptrack[i]['type'], idx_ptrack[i]['column'])) - - # get size of heap and indexes. size calculated in pages - idx_ptrack[i]['size'] = int(self.get_fork_size(node, i)) - # get path to heap and index files - idx_ptrack[i]['path'] = self.get_fork_path(node, i) - - if self.verbose: - print('Killing postmaster. Losing Ptrack changes') - node.stop(['-m', 'immediate', '-D', node.data_dir]) - if not node.status(): - node.slow_start() - else: - print("Die! Die! Why won't you die?... Why won't you die?") - exit(1) - - for i in idx_ptrack: - # get ptrack for every idx - idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork( - node, idx_ptrack[i]['path'], [idx_ptrack[i]['size']]) - # check that ptrack has correct bits after recovery - self.check_ptrack_recovery(idx_ptrack[i]) - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_ptrack_recovery_1(self): - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums'], - pg_options={ - 'shared_buffers': '512MB', - 'max_wal_size': '3GB'}) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") - - # Create table - node.safe_psql( - "postgres", - "create extension bloom; create sequence t_seq; " - "create table t_heap " - "as select nextval('t_seq')::int as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " -# "from generate_series(0,25600) i") - "from generate_series(0,2560) i") - - self.backup_node( - backup_dir, 'node', node, options=['--stream']) - - # Create indexes - for i in idx_ptrack: - if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq': - node.safe_psql( - "postgres", - "CREATE INDEX {0} ON {1} USING {2}({3})".format( - i, idx_ptrack[i]['relation'], - idx_ptrack[i]['type'], idx_ptrack[i]['column'])) - - node.safe_psql( - 'postgres', - "update t_heap set id = nextval('t_seq'), text = md5(text), " - "tsvector = md5(repeat(tsvector::text, 10))::tsvector") - - node.safe_psql( - 'postgres', - "create extension pg_buffercache") - - #print(node.safe_psql( - # 'postgres', - # "SELECT count(*) FROM pg_buffercache WHERE isdirty")) - - if self.verbose: - print('Killing postmaster. Losing Ptrack changes') - node.stop(['-m', 'immediate', '-D', node.data_dir]) - - if not node.status(): - node.slow_start() - else: - print("Die! Die! Why won't you die?... Why won't you die?") - exit(1) - - self.backup_node( - backup_dir, 'node', node, - backup_type='ptrack', options=['--stream']) - - pgdata = self.pgdata_content(node.data_dir) - - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - node_restored.cleanup() - - self.restore_node( - backup_dir, 'node', node_restored) - - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_ptrack_zero_changes(self): - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") - - # Create table - node.safe_psql( - "postgres", - "create table t_heap " - "as select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,2560) i") - - self.backup_node( - backup_dir, 'node', node, options=['--stream']) - - self.backup_node( - backup_dir, 'node', node, - backup_type='ptrack', options=['--stream']) - - pgdata = self.pgdata_content(node.data_dir) - node.cleanup() - - self.restore_node(backup_dir, 'node', node) - - pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_ptrack_pg_resetxlog(self): - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums'], - pg_options={ - 'shared_buffers': '512MB', - 'max_wal_size': '3GB'}) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") - - # Create table - node.safe_psql( - "postgres", - "create extension bloom; create sequence t_seq; " - "create table t_heap " - "as select nextval('t_seq')::int as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " -# "from generate_series(0,25600) i") - "from generate_series(0,2560) i") - - self.backup_node( - backup_dir, 'node', node, options=['--stream']) - - # Create indexes - for i in idx_ptrack: - if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq': - node.safe_psql( - "postgres", - "CREATE INDEX {0} ON {1} USING {2}({3})".format( - i, idx_ptrack[i]['relation'], - idx_ptrack[i]['type'], idx_ptrack[i]['column'])) - - node.safe_psql( - 'postgres', - "update t_heap set id = nextval('t_seq'), text = md5(text), " - "tsvector = md5(repeat(tsvector::text, 10))::tsvector") - -# node.safe_psql( -# 'postgres', -# "create extension pg_buffercache") -# -# print(node.safe_psql( -# 'postgres', -# "SELECT count(*) FROM pg_buffercache WHERE isdirty")) - - # kill the bastard - if self.verbose: - print('Killing postmaster. Losing Ptrack changes') - node.stop(['-m', 'immediate', '-D', node.data_dir]) - - # now smack it with sledgehammer - if node.major_version >= 10: - pg_resetxlog_path = self.get_bin_path('pg_resetwal') - wal_dir = 'pg_wal' - else: - pg_resetxlog_path = self.get_bin_path('pg_resetxlog') - wal_dir = 'pg_xlog' - - self.run_binary( - [ - pg_resetxlog_path, - '-D', - node.data_dir, - '-o 42', - '-f' - ], - asynchronous=False) - - if not node.status(): - node.slow_start() - else: - print("Die! Die! Why won't you die?... Why won't you die?") - exit(1) - - # take ptrack backup -# self.backup_node( -# backup_dir, 'node', node, -# backup_type='ptrack', options=['--stream']) - - try: - self.backup_node( - backup_dir, 'node', node, - backup_type='ptrack', options=['--stream']) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because instance was brutalized by pg_resetxlog" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd) - ) - except ProbackupException as e: - self.assertTrue( - 'ERROR: LSN from ptrack_control ' in e.message and - 'is greater than Start LSN of previous backup' in e.message, - '\n Unexpected Error Message: {0}\n' - ' CMD: {1}'.format(repr(e.message), self.cmd)) - -# pgdata = self.pgdata_content(node.data_dir) -# -# node_restored = self.make_simple_node( -# base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) -# node_restored.cleanup() -# -# self.restore_node( -# backup_dir, 'node', node_restored) -# -# pgdata_restored = self.pgdata_content(node_restored.data_dir) -# self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_corrupt_ptrack_map(self): - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") - - ptrack_version = self.get_ptrack_version(node) - - # Create table - node.safe_psql( - "postgres", - "create extension bloom; create sequence t_seq; " - "create table t_heap " - "as select nextval('t_seq')::int as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,2560) i") - - self.backup_node( - backup_dir, 'node', node, options=['--stream']) - - node.safe_psql( - 'postgres', - "update t_heap set id = nextval('t_seq'), text = md5(text), " - "tsvector = md5(repeat(tsvector::text, 10))::tsvector") - - # kill the bastard - if self.verbose: - print('Killing postmaster. Losing Ptrack changes') - - node.stop(['-m', 'immediate', '-D', node.data_dir]) - - ptrack_map = os.path.join(node.data_dir, 'global', 'ptrack.map') - - # Let`s do index corruption. ptrack.map - with open(ptrack_map, "rb+", 0) as f: - f.seek(42) - f.write(b"blablahblahs") - f.flush() - f.close - -# os.remove(os.path.join(node.logs_dir, node.pg_log_name)) - - if self.verbose: - print('Ptrack version:', ptrack_version) - if ptrack_version >= self.version_to_num("2.3"): - node.slow_start() - - log_file = os.path.join(node.logs_dir, 'postgresql.log') - with open(log_file, 'r') as f: - log_content = f.read() - - self.assertIn( - 'WARNING: ptrack read map: incorrect checksum of file "{0}"'.format(ptrack_map), - log_content) - - node.stop(['-D', node.data_dir]) - else: - try: - node.slow_start() - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because ptrack.map is corrupted" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except StartNodeException as e: - self.assertIn( - 'Cannot start node', - e.message, - '\n Unexpected Error Message: {0}\n' - ' CMD: {1}'.format(repr(e.message), self.cmd)) - - log_file = os.path.join(node.logs_dir, 'postgresql.log') - with open(log_file, 'r') as f: - log_content = f.read() - - self.assertIn( - 'FATAL: ptrack init: incorrect checksum of file "{0}"'.format(ptrack_map), - log_content) - - self.set_auto_conf(node, {'ptrack.map_size': '0'}) - node.slow_start() - - try: - self.backup_node( - backup_dir, 'node', node, - backup_type='ptrack', options=['--stream']) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because instance ptrack is disabled" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Ptrack is disabled', - e.message, - '\n Unexpected Error Message: {0}\n' - ' CMD: {1}'.format(repr(e.message), self.cmd)) - - node.safe_psql( - 'postgres', - "update t_heap set id = nextval('t_seq'), text = md5(text), " - "tsvector = md5(repeat(tsvector::text, 10))::tsvector") - - node.stop(['-m', 'immediate', '-D', node.data_dir]) - - self.set_auto_conf(node, {'ptrack.map_size': '32', 'shared_preload_libraries': 'ptrack'}) - node.slow_start() - - try: - self.backup_node( - backup_dir, 'node', node, - backup_type='ptrack', options=['--stream']) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because ptrack map is from future" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: LSN from ptrack_control', - e.message, - '\n Unexpected Error Message: {0}\n' - ' CMD: {1}'.format(repr(e.message), self.cmd)) - - self.backup_node( - backup_dir, 'node', node, - backup_type='delta', options=['--stream']) - - node.safe_psql( - 'postgres', - "update t_heap set id = nextval('t_seq'), text = md5(text), " - "tsvector = md5(repeat(tsvector::text, 10))::tsvector") - - self.backup_node( - backup_dir, 'node', node, - backup_type='ptrack', options=['--stream']) - - pgdata = self.pgdata_content(node.data_dir) - - node.cleanup() - - self.restore_node(backup_dir, 'node', node) - - pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.skip("skip") - def test_horizon_lsn_ptrack(self): - """ - https://github.com/postgrespro/pg_probackup/pull/386 - """ - if not self.probackup_old_path: - self.skipTest("You must specify PGPROBACKUPBIN_OLD" - " for run this test") - self.assertLessEqual( - self.version_to_num(self.old_probackup_version), - self.version_to_num('2.4.15'), - 'You need pg_probackup old_binary =< 2.4.15 for this test') - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") - - self.assertGreaterEqual( - self.get_ptrack_version(node), - self.version_to_num("2.1"), - "You need ptrack >=2.1 for this test") - - # set map_size to a minimal value - self.set_auto_conf(node, {'ptrack.map_size': '1'}) - node.restart() - - node.pgbench_init(scale=100) - - # FULL backup - full_id = self.backup_node(backup_dir, 'node', node, options=['--stream'], old_binary=True) - - # enable archiving so the WAL size to do interfere with data bytes comparison later - self.set_archiving(backup_dir, 'node', node) - node.restart() - - # change data - pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) - pgbench.wait() - - # DELTA is exemplar - delta_id = self.backup_node( - backup_dir, 'node', node, backup_type='delta') - delta_bytes = self.show_pb(backup_dir, 'node', backup_id=delta_id)["data-bytes"] - self.delete_pb(backup_dir, 'node', backup_id=delta_id) - - # PTRACK with current binary - ptrack_id = self.backup_node(backup_dir, 'node', node, backup_type='ptrack') - ptrack_bytes = self.show_pb(backup_dir, 'node', backup_id=ptrack_id)["data-bytes"] - - # make sure that backup size is exactly the same - self.assertEqual(delta_bytes, ptrack_bytes) diff --git a/tests/remote_test.py b/tests/remote_test.py deleted file mode 100644 index 2d36d7346..000000000 --- a/tests/remote_test.py +++ /dev/null @@ -1,43 +0,0 @@ -import unittest -import os -from time import sleep -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException -from .helpers.cfs_helpers import find_by_name - - -class RemoteTest(ProbackupTest, unittest.TestCase): - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_remote_sanity(self): - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - output = self.backup_node( - backup_dir, 'node', node, - options=['--stream'], no_remote=True, return_id=False) - self.assertIn('remote: false', output) - - # try: - # self.backup_node( - # backup_dir, 'node', - # node, options=['--remote-proto=ssh', '--stream'], no_remote=True) - # # we should die here because exception is what we expect to happen - # self.assertEqual( - # 1, 0, - # "Expecting Error because remote-host option is missing." - # "\n Output: {0} \n CMD: {1}".format( - # repr(self.output), self.cmd)) - # except ProbackupException as e: - # self.assertIn( - # "Insert correct error", - # e.message, - # "\n Unexpected Error Message: {0}\n CMD: {1}".format( - # repr(e.message), self.cmd)) diff --git a/tests/replica_test.py b/tests/replica_test.py deleted file mode 100644 index 9c68de366..000000000 --- a/tests/replica_test.py +++ /dev/null @@ -1,1654 +0,0 @@ -import os -import unittest -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException, idx_ptrack -from datetime import datetime, timedelta -import subprocess -import time -from distutils.dir_util import copy_tree -from testgres import ProcessType -from time import sleep - - -class ReplicaTest(ProbackupTest, unittest.TestCase): - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_replica_switchover(self): - """ - check that archiving on replica works correctly - over the course of several switchovers - https://www.postgresql.org/message-id/54b059d4-2b48-13a4-6f43-95a087c92367%40postgrespro.ru - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node1 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node1'), - set_replication=True, - initdb_params=['--data-checksums']) - - if self.get_version(node1) < self.version_to_num('9.6.0'): - self.skipTest( - 'Skipped because backup from replica is not supported in PG 9.5') - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node1', node1) - - node1.slow_start() - - # take full backup and restore it - self.backup_node(backup_dir, 'node1', node1, options=['--stream']) - node2 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node2')) - node2.cleanup() - - # create replica - self.restore_node(backup_dir, 'node1', node2) - - # setup replica - self.add_instance(backup_dir, 'node2', node2) - self.set_archiving(backup_dir, 'node2', node2, replica=True) - self.set_replica(node1, node2, synchronous=False) - self.set_auto_conf(node2, {'port': node2.port}) - - node2.slow_start(replica=True) - - # generate some data - node1.pgbench_init(scale=5) - - # take full backup on replica - self.backup_node(backup_dir, 'node2', node2, options=['--stream']) - - # first switchover - node1.stop() - node2.promote() - - self.set_replica(node2, node1, synchronous=False) - node2.reload() - node1.slow_start(replica=True) - - # take incremental backup from new master - self.backup_node( - backup_dir, 'node2', node2, - backup_type='delta', options=['--stream']) - - # second switchover - node2.stop() - node1.promote() - self.set_replica(node1, node2, synchronous=False) - node1.reload() - node2.slow_start(replica=True) - - # generate some more data - node1.pgbench_init(scale=5) - - # take incremental backup from replica - self.backup_node( - backup_dir, 'node2', node2, - backup_type='delta', options=['--stream']) - - # https://github.com/postgrespro/pg_probackup/issues/251 - self.validate_pb(backup_dir) - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_replica_stream_ptrack_backup(self): - """ - make node, take full backup, restore it and make replica from it, - take full stream backup from replica - """ - if not self.ptrack: - self.skipTest('Skipped because ptrack support is disabled') - - if self.pg_config_version > self.version_to_num('9.6.0'): - self.skipTest( - 'Skipped because backup from replica is not supported in PG 9.5') - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - master = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'master'), - set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'master', master) - - master.slow_start() - - if master.major_version >= 12: - master.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") - - # CREATE TABLE - master.psql( - "postgres", - "create table t_heap as select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,256) i") - before = master.safe_psql("postgres", "SELECT * FROM t_heap") - - # take full backup and restore it - self.backup_node(backup_dir, 'master', master, options=['--stream']) - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) - replica.cleanup() - self.restore_node(backup_dir, 'master', replica) - self.set_replica(master, replica) - - # Check data correctness on replica - replica.slow_start(replica=True) - after = replica.safe_psql("postgres", "SELECT * FROM t_heap") - self.assertEqual(before, after) - - # Change data on master, take FULL backup from replica, - # restore taken backup and check that restored data equal - # to original data - master.psql( - "postgres", - "insert into t_heap select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(256,512) i") - before = master.safe_psql("postgres", "SELECT * FROM t_heap") - self.add_instance(backup_dir, 'replica', replica) - - backup_id = self.backup_node( - backup_dir, 'replica', replica, - options=[ - '--stream', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port)]) - self.validate_pb(backup_dir, 'replica') - self.assertEqual( - 'OK', self.show_pb(backup_dir, 'replica', backup_id)['status']) - - # RESTORE FULL BACKUP TAKEN FROM PREVIOUS STEP - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node')) - node.cleanup() - self.restore_node(backup_dir, 'replica', data_dir=node.data_dir) - - self.set_auto_conf(node, {'port': node.port}) - - node.slow_start() - - # CHECK DATA CORRECTNESS - after = node.safe_psql("postgres", "SELECT * FROM t_heap") - self.assertEqual(before, after) - - # Change data on master, take PTRACK backup from replica, - # restore taken backup and check that restored data equal - # to original data - master.psql( - "postgres", - "insert into t_heap select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(512,768) i") - - before = master.safe_psql("postgres", "SELECT * FROM t_heap") - - backup_id = self.backup_node( - backup_dir, 'replica', replica, backup_type='ptrack', - options=[ - '--stream', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port)]) - self.validate_pb(backup_dir, 'replica') - self.assertEqual( - 'OK', self.show_pb(backup_dir, 'replica', backup_id)['status']) - - # RESTORE PTRACK BACKUP TAKEN FROM replica - node.cleanup() - self.restore_node( - backup_dir, 'replica', data_dir=node.data_dir, backup_id=backup_id) - - self.set_auto_conf(node, {'port': node.port}) - - node.slow_start() - - # CHECK DATA CORRECTNESS - after = node.safe_psql("postgres", "SELECT * FROM t_heap") - self.assertEqual(before, after) - - # @unittest.skip("skip") - def test_replica_archive_page_backup(self): - """ - make archive master, take full and page archive backups from master, - set replica, make archive backup from replica - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - master = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'master'), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'archive_timeout': '10s', - 'checkpoint_timeout': '30s', - 'max_wal_size': '32MB'}) - - if self.get_version(master) < self.version_to_num('9.6.0'): - self.skipTest( - 'Skipped because backup from replica is not supported in PG 9.5') - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'master', master) - self.set_archiving(backup_dir, 'master', master) - master.slow_start() - - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) - replica.cleanup() - - self.backup_node(backup_dir, 'master', master) - - master.psql( - "postgres", - "create table t_heap as select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,2560) i") - - before = master.safe_psql("postgres", "SELECT * FROM t_heap") - - backup_id = self.backup_node( - backup_dir, 'master', master, backup_type='page') - self.restore_node(backup_dir, 'master', replica) - - # Settings for Replica - self.add_instance(backup_dir, 'replica', replica) - self.set_replica(master, replica, synchronous=True) - self.set_archiving(backup_dir, 'replica', replica, replica=True) - - replica.slow_start(replica=True) - - # Check data correctness on replica - after = replica.safe_psql("postgres", "SELECT * FROM t_heap") - self.assertEqual(before, after) - - # Change data on master, take FULL backup from replica, - # restore taken backup and check that restored data - # equal to original data - master.psql( - "postgres", - "insert into t_heap select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(256,25120) i") - - before = master.safe_psql("postgres", "SELECT * FROM t_heap") - - self.wait_until_replica_catch_with_master(master, replica) - - backup_id = self.backup_node( - backup_dir, 'replica', replica, - options=[ - '--archive-timeout=60', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port)]) - - self.validate_pb(backup_dir, 'replica') - self.assertEqual( - 'OK', self.show_pb(backup_dir, 'replica', backup_id)['status']) - - # RESTORE FULL BACKUP TAKEN FROM replica - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node')) - node.cleanup() - self.restore_node(backup_dir, 'replica', data_dir=node.data_dir) - - self.set_auto_conf(node, {'port': node.port, 'archive_mode': 'off'}) - - node.slow_start() - - # CHECK DATA CORRECTNESS - after = node.safe_psql("postgres", "SELECT * FROM t_heap") - self.assertEqual(before, after) - node.cleanup() - - # Change data on master, make PAGE backup from replica, - # restore taken backup and check that restored data equal - # to original data - master.pgbench_init(scale=5) - - pgbench = master.pgbench( - options=['-T', '30', '-c', '2', '--no-vacuum']) - - backup_id = self.backup_node( - backup_dir, 'replica', - replica, backup_type='page', - options=[ - '--archive-timeout=60', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port)]) - - pgbench.wait() - - self.switch_wal_segment(master) - - before = master.safe_psql("postgres", "SELECT * FROM pgbench_accounts") - - self.validate_pb(backup_dir, 'replica') - self.assertEqual( - 'OK', self.show_pb(backup_dir, 'replica', backup_id)['status']) - - # RESTORE PAGE BACKUP TAKEN FROM replica - self.restore_node( - backup_dir, 'replica', data_dir=node.data_dir, - backup_id=backup_id) - - self.set_auto_conf(node, {'port': node.port, 'archive_mode': 'off'}) - - node.slow_start() - - # CHECK DATA CORRECTNESS - after = node.safe_psql("postgres", "SELECT * FROM pgbench_accounts") - self.assertEqual( - before, after, 'Restored data is not equal to original') - - self.add_instance(backup_dir, 'node', node) - self.backup_node( - backup_dir, 'node', node, options=['--stream']) - - # @unittest.skip("skip") - def test_basic_make_replica_via_restore(self): - """ - make archive master, take full and page archive backups from master, - set replica, make archive backup from replica - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - master = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'master'), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'archive_timeout': '10s'}) - - if self.get_version(master) < self.version_to_num('9.6.0'): - self.skipTest( - 'Skipped because backup from replica is not supported in PG 9.5') - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'master', master) - self.set_archiving(backup_dir, 'master', master) - master.slow_start() - - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) - replica.cleanup() - - self.backup_node(backup_dir, 'master', master) - - master.psql( - "postgres", - "create table t_heap as select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,8192) i") - - before = master.safe_psql("postgres", "SELECT * FROM t_heap") - - backup_id = self.backup_node( - backup_dir, 'master', master, backup_type='page') - self.restore_node( - backup_dir, 'master', replica, options=['-R']) - - # Settings for Replica - self.add_instance(backup_dir, 'replica', replica) - self.set_archiving(backup_dir, 'replica', replica, replica=True) - self.set_replica(master, replica, synchronous=True) - - replica.slow_start(replica=True) - - self.backup_node( - backup_dir, 'replica', replica, - options=['--archive-timeout=30s', '--stream']) - - # @unittest.skip("skip") - def test_take_backup_from_delayed_replica(self): - """ - make archive master, take full backups from master, - restore full backup as delayed replica, launch pgbench, - take FULL, PAGE and DELTA backups from replica - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - master = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'master'), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={'archive_timeout': '10s'}) - - if self.get_version(master) < self.version_to_num('9.6.0'): - self.skipTest( - 'Skipped because backup from replica is not supported in PG 9.5') - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'master', master) - self.set_archiving(backup_dir, 'master', master) - master.slow_start() - - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) - replica.cleanup() - - self.backup_node(backup_dir, 'master', master) - - master.psql( - "postgres", - "create table t_heap as select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,165000) i") - - master.psql( - "postgres", - "create table t_heap_1 as select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,165000) i") - - self.restore_node( - backup_dir, 'master', replica, options=['-R']) - - # Settings for Replica - self.add_instance(backup_dir, 'replica', replica) - self.set_archiving(backup_dir, 'replica', replica, replica=True) - - self.set_auto_conf(replica, {'port': replica.port}) - - replica.slow_start(replica=True) - - self.wait_until_replica_catch_with_master(master, replica) - - if self.get_version(master) >= self.version_to_num('12.0'): - self.set_auto_conf( - replica, {'recovery_min_apply_delay': '300s'}) - else: - replica.append_conf( - 'recovery.conf', - 'recovery_min_apply_delay = 300s') - - replica.stop() - replica.slow_start(replica=True) - - master.pgbench_init(scale=10) - - pgbench = master.pgbench( - options=['-T', '60', '-c', '2', '--no-vacuum']) - - self.backup_node( - backup_dir, 'replica', - replica, options=['--archive-timeout=60s']) - - self.backup_node( - backup_dir, 'replica', replica, - data_dir=replica.data_dir, - backup_type='page', options=['--archive-timeout=60s']) - - sleep(1) - - self.backup_node( - backup_dir, 'replica', replica, - backup_type='delta', options=['--archive-timeout=60s']) - - pgbench.wait() - - pgbench = master.pgbench( - options=['-T', '30', '-c', '2', '--no-vacuum']) - - self.backup_node( - backup_dir, 'replica', replica, - options=['--stream']) - - self.backup_node( - backup_dir, 'replica', replica, - backup_type='page', options=['--stream']) - - self.backup_node( - backup_dir, 'replica', replica, - backup_type='delta', options=['--stream']) - - pgbench.wait() - - # @unittest.skip("skip") - def test_replica_promote(self): - """ - start backup from replica, during backup promote replica - check that backup is failed - """ - self._check_gdb_flag_or_skip_test() - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - master = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'master'), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'archive_timeout': '10s', - 'checkpoint_timeout': '30s', - 'max_wal_size': '32MB'}) - - if self.get_version(master) < self.version_to_num('9.6.0'): - self.skipTest( - 'Skipped because backup from replica is not supported in PG 9.5') - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'master', master) - self.set_archiving(backup_dir, 'master', master) - master.slow_start() - - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) - replica.cleanup() - - self.backup_node(backup_dir, 'master', master) - - master.psql( - "postgres", - "create table t_heap as select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,165000) i") - - self.restore_node( - backup_dir, 'master', replica, options=['-R']) - - # Settings for Replica - self.add_instance(backup_dir, 'replica', replica) - self.set_archiving(backup_dir, 'replica', replica, replica=True) - self.set_replica( - master, replica, replica_name='replica', synchronous=True) - - replica.slow_start(replica=True) - - master.psql( - "postgres", - "create table t_heap_1 as select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,165000) i") - - self.wait_until_replica_catch_with_master(master, replica) - - # start backup from replica - gdb = self.backup_node( - backup_dir, 'replica', replica, gdb=True, - options=['--log-level-file=verbose']) - - gdb.set_breakpoint('backup_data_file') - gdb.run_until_break() - gdb.continue_execution_until_break(20) - - replica.promote() - - gdb.remove_all_breakpoints() - gdb.continue_execution_until_exit() - - backup_id = self.show_pb( - backup_dir, 'replica')[0]["id"] - - # read log file content - with open(os.path.join(backup_dir, 'log', 'pg_probackup.log')) as f: - log_content = f.read() - f.close - - self.assertIn( - 'ERROR: the standby was promoted during online backup', - log_content) - - self.assertIn( - 'WARNING: Backup {0} is running, ' - 'setting its status to ERROR'.format(backup_id), - log_content) - - # @unittest.skip("skip") - def test_replica_stop_lsn_null_offset(self): - """ - """ - self._check_gdb_flag_or_skip_test() - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - master = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'master'), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'checkpoint_timeout': '1h', - 'wal_level': 'replica'}) - - if self.get_version(master) < self.version_to_num('9.6.0'): - self.skipTest( - 'Skipped because backup from replica is not supported in PG 9.5') - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', master) - self.set_archiving(backup_dir, 'node', master) - master.slow_start() - - # freeze bgwriter to get rid of RUNNING XACTS records - bgwriter_pid = master.auxiliary_pids[ProcessType.BackgroundWriter][0] - gdb_checkpointer = self.gdb_attach(bgwriter_pid) - - self.backup_node(backup_dir, 'node', master) - - # Create replica - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) - replica.cleanup() - self.restore_node(backup_dir, 'node', replica) - - # Settings for Replica - self.set_replica(master, replica, synchronous=True) - self.set_archiving(backup_dir, 'node', replica, replica=True) - - replica.slow_start(replica=True) - - self.switch_wal_segment(master) - self.switch_wal_segment(master) - - output = self.backup_node( - backup_dir, 'node', replica, replica.data_dir, - options=[ - '--archive-timeout=30', - '--log-level-console=LOG', - '--no-validate', - '--stream'], - return_id=False) - - self.assertIn( - 'LOG: Invalid offset in stop_lsn value 0/4000000', - output) - - self.assertIn( - 'WARNING: WAL segment 000000010000000000000004 could not be streamed in 30 seconds', - output) - - self.assertIn( - 'WARNING: Failed to get next WAL record after 0/4000000, looking for previous WAL record', - output) - - self.assertIn( - 'LOG: Looking for LSN 0/4000000 in segment: 000000010000000000000003', - output) - - self.assertIn( - 'has endpoint 0/4000000 which is ' - 'equal or greater than requested LSN 0/4000000', - output) - - self.assertIn( - 'LOG: Found prior LSN:', - output) - - # Clean after yourself - gdb_checkpointer.kill() - - # @unittest.skip("skip") - def test_replica_stop_lsn_null_offset_next_record(self): - """ - """ - self._check_gdb_flag_or_skip_test() - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - master = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'master'), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'checkpoint_timeout': '1h', - 'wal_level': 'replica'}) - - if self.get_version(master) < self.version_to_num('9.6.0'): - self.skipTest( - 'Skipped because backup from replica is not supported in PG 9.5') - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'master', master) - self.set_archiving(backup_dir, 'master', master) - master.slow_start() - - # freeze bgwriter to get rid of RUNNING XACTS records - bgwriter_pid = master.auxiliary_pids[ProcessType.BackgroundWriter][0] - - self.backup_node(backup_dir, 'master', master) - - # Create replica - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) - replica.cleanup() - self.restore_node(backup_dir, 'master', replica) - - # Settings for Replica - self.add_instance(backup_dir, 'replica', replica) - self.set_replica(master, replica, synchronous=True) - self.set_archiving(backup_dir, 'replica', replica, replica=True) - - copy_tree( - os.path.join(backup_dir, 'wal', 'master'), - os.path.join(backup_dir, 'wal', 'replica')) - - replica.slow_start(replica=True) - - self.switch_wal_segment(master) - self.switch_wal_segment(master) - - # open connection to master - conn = master.connect() - - gdb = self.backup_node( - backup_dir, 'replica', replica, - options=[ - '--archive-timeout=40', - '--log-level-file=LOG', - '--no-validate', - '--stream'], - gdb=True) - - # Attention! this breakpoint is set to a probackup internal function, not a postgres core one - gdb.set_breakpoint('pg_stop_backup') - gdb.run_until_break() - gdb.remove_all_breakpoints() - gdb.continue_execution_until_running() - - sleep(5) - - conn.execute("create table t1()") - conn.commit() - - while 'RUNNING' in self.show_pb(backup_dir, 'replica')[0]['status']: - sleep(5) - - file = os.path.join(backup_dir, 'log', 'pg_probackup.log') - - with open(file) as f: - log_content = f.read() - - self.assertIn( - 'LOG: Invalid offset in stop_lsn value 0/4000000', - log_content) - - self.assertIn( - 'LOG: Looking for segment: 000000010000000000000004', - log_content) - - self.assertIn( - 'LOG: First record in WAL segment "000000010000000000000004": 0/4000028', - log_content) - - self.assertIn( - 'INFO: stop_lsn: 0/4000000', - log_content) - - self.assertTrue(self.show_pb(backup_dir, 'replica')[0]['status'] == 'DONE') - - # @unittest.skip("skip") - def test_archive_replica_null_offset(self): - """ - """ - self._check_gdb_flag_or_skip_test() - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - master = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'master'), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'checkpoint_timeout': '1h', - 'wal_level': 'replica'}) - - if self.get_version(master) < self.version_to_num('9.6.0'): - self.skipTest( - 'Skipped because backup from replica is not supported in PG 9.5') - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', master) - self.set_archiving(backup_dir, 'node', master) - master.slow_start() - - self.backup_node(backup_dir, 'node', master) - - # Create replica - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) - replica.cleanup() - self.restore_node(backup_dir, 'node', replica) - - # Settings for Replica - self.set_replica(master, replica, synchronous=True) - self.set_archiving(backup_dir, 'node', replica, replica=True) - - # freeze bgwriter to get rid of RUNNING XACTS records - bgwriter_pid = master.auxiliary_pids[ProcessType.BackgroundWriter][0] - gdb_checkpointer = self.gdb_attach(bgwriter_pid) - - replica.slow_start(replica=True) - - self.switch_wal_segment(master) - self.switch_wal_segment(master) - - # take backup from replica - output = self.backup_node( - backup_dir, 'node', replica, replica.data_dir, - options=[ - '--archive-timeout=30', - '--log-level-console=LOG', - '--no-validate'], - return_id=False) - - self.assertIn( - 'LOG: Invalid offset in stop_lsn value 0/4000000', - output) - - self.assertIn( - 'WARNING: WAL segment 000000010000000000000004 could not be archived in 30 seconds', - output) - - self.assertIn( - 'WARNING: Failed to get next WAL record after 0/4000000, looking for previous WAL record', - output) - - self.assertIn( - 'LOG: Looking for LSN 0/4000000 in segment: 000000010000000000000003', - output) - - self.assertIn( - 'has endpoint 0/4000000 which is ' - 'equal or greater than requested LSN 0/4000000', - output) - - self.assertIn( - 'LOG: Found prior LSN:', - output) - - print(output) - - # @unittest.skip("skip") - def test_archive_replica_not_null_offset(self): - """ - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - master = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'master'), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'checkpoint_timeout': '1h', - 'wal_level': 'replica'}) - - if self.get_version(master) < self.version_to_num('9.6.0'): - self.skipTest( - 'Skipped because backup from replica is not supported in PG 9.5') - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', master) - self.set_archiving(backup_dir, 'node', master) - master.slow_start() - - self.backup_node(backup_dir, 'node', master) - - # Create replica - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) - replica.cleanup() - self.restore_node(backup_dir, 'node', replica) - - # Settings for Replica - self.set_replica(master, replica, synchronous=True) - self.set_archiving(backup_dir, 'node', replica, replica=True) - - replica.slow_start(replica=True) - - # take backup from replica - self.backup_node( - backup_dir, 'node', replica, replica.data_dir, - options=[ - '--archive-timeout=30', - '--log-level-console=LOG', - '--no-validate'], - return_id=False) - - try: - self.backup_node( - backup_dir, 'node', replica, replica.data_dir, - options=[ - '--archive-timeout=30', - '--log-level-console=LOG', - '--no-validate']) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because of archive timeout. " - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - # vanilla -- 0/4000060 - # pgproee -- 0/4000078 - self.assertRegex( - e.message, - r'LOG: Looking for LSN (0/4000060|0/4000078) in segment: 000000010000000000000004', - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - self.assertRegex( - e.message, - r'INFO: Wait for LSN (0/4000060|0/4000078) in archived WAL segment', - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - self.assertIn( - 'ERROR: WAL segment 000000010000000000000004 could not be archived in 30 seconds', - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - # @unittest.skip("skip") - def test_replica_toast(self): - """ - make archive master, take full and page archive backups from master, - set replica, make archive backup from replica - """ - self._check_gdb_flag_or_skip_test() - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - master = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'master'), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'checkpoint_timeout': '1h', - 'wal_level': 'replica', - 'shared_buffers': '128MB'}) - - if self.get_version(master) < self.version_to_num('9.6.0'): - self.skipTest( - 'Skipped because backup from replica is not supported in PG 9.5') - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'master', master) - self.set_archiving(backup_dir, 'master', master) - master.slow_start() - - # freeze bgwriter to get rid of RUNNING XACTS records - bgwriter_pid = master.auxiliary_pids[ProcessType.BackgroundWriter][0] - gdb_checkpointer = self.gdb_attach(bgwriter_pid) - - self.backup_node(backup_dir, 'master', master) - - # Create replica - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) - replica.cleanup() - self.restore_node(backup_dir, 'master', replica) - - # Settings for Replica - self.add_instance(backup_dir, 'replica', replica) - self.set_replica(master, replica, synchronous=True) - self.set_archiving(backup_dir, 'replica', replica, replica=True) - - copy_tree( - os.path.join(backup_dir, 'wal', 'master'), - os.path.join(backup_dir, 'wal', 'replica')) - - replica.slow_start(replica=True) - - self.switch_wal_segment(master) - self.switch_wal_segment(master) - - master.safe_psql( - 'postgres', - 'CREATE TABLE t1 AS ' - 'SELECT i, repeat(md5(i::text),5006056) AS fat_attr ' - 'FROM generate_series(0,10) i') - - self.wait_until_replica_catch_with_master(master, replica) - - output = self.backup_node( - backup_dir, 'replica', replica, - options=[ - '--archive-timeout=30', - '--log-level-console=LOG', - '--no-validate', - '--stream'], - return_id=False) - - pgdata = self.pgdata_content(replica.data_dir) - - self.assertIn( - 'WARNING: Could not read WAL record at', - output) - - self.assertIn( - 'LOG: Found prior LSN:', - output) - - res1 = replica.safe_psql( - 'postgres', - 'select md5(fat_attr) from t1') - - replica.cleanup() - - self.restore_node(backup_dir, 'replica', replica) - pgdata_restored = self.pgdata_content(replica.data_dir) - - replica.slow_start() - - res2 = replica.safe_psql( - 'postgres', - 'select md5(fat_attr) from t1') - - self.assertEqual(res1, res2) - - self.compare_pgdata(pgdata, pgdata_restored) - - # Clean after yourself - gdb_checkpointer.kill() - - # @unittest.skip("skip") - def test_start_stop_lsn_in_the_same_segno(self): - """ - """ - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - master = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'master'), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'checkpoint_timeout': '1h', - 'wal_level': 'replica', - 'shared_buffers': '128MB'}) - - if self.get_version(master) < self.version_to_num('9.6.0'): - self.skipTest( - 'Skipped because backup from replica is not supported in PG 9.5') - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'master', master) - master.slow_start() - - # freeze bgwriter to get rid of RUNNING XACTS records - bgwriter_pid = master.auxiliary_pids[ProcessType.BackgroundWriter][0] - - self.backup_node(backup_dir, 'master', master, options=['--stream']) - - # Create replica - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) - replica.cleanup() - self.restore_node(backup_dir, 'master', replica) - - # Settings for Replica - self.add_instance(backup_dir, 'replica', replica) - self.set_replica(master, replica, synchronous=True) - - replica.slow_start(replica=True) - - self.switch_wal_segment(master) - self.switch_wal_segment(master) - - master.safe_psql( - 'postgres', - 'CREATE TABLE t1 AS ' - 'SELECT i, repeat(md5(i::text),5006056) AS fat_attr ' - 'FROM generate_series(0,10) i') - - master.safe_psql( - 'postgres', - 'CHECKPOINT') - - self.wait_until_replica_catch_with_master(master, replica) - - sleep(60) - - self.backup_node( - backup_dir, 'replica', replica, - options=[ - '--archive-timeout=30', - '--log-level-console=LOG', - '--no-validate', - '--stream'], - return_id=False) - - self.backup_node( - backup_dir, 'replica', replica, - options=[ - '--archive-timeout=30', - '--log-level-console=LOG', - '--no-validate', - '--stream'], - return_id=False) - - @unittest.skip("skip") - def test_replica_promote_1(self): - """ - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - master = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'master'), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'checkpoint_timeout': '1h', - 'wal_level': 'replica'}) - - if self.get_version(master) < self.version_to_num('9.6.0'): - self.skipTest( - 'Skipped because backup from replica is not supported in PG 9.5') - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'master', master) - # set replica True, so archive_mode 'always' is used. - self.set_archiving(backup_dir, 'master', master, replica=True) - master.slow_start() - - self.backup_node(backup_dir, 'master', master) - - # Create replica - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) - replica.cleanup() - self.restore_node(backup_dir, 'master', replica) - - # Settings for Replica - self.set_replica(master, replica) - - replica.slow_start(replica=True) - - master.safe_psql( - 'postgres', - 'CREATE TABLE t1 AS ' - 'SELECT i, repeat(md5(i::text),5006056) AS fat_attr ' - 'FROM generate_series(0,10) i') - - self.wait_until_replica_catch_with_master(master, replica) - - wal_file = os.path.join( - backup_dir, 'wal', 'master', '000000010000000000000004') - - wal_file_partial = os.path.join( - backup_dir, 'wal', 'master', '000000010000000000000004.partial') - - self.assertFalse(os.path.exists(wal_file)) - - replica.promote() - - while not os.path.exists(wal_file_partial): - sleep(1) - - self.switch_wal_segment(master) - - # sleep to be sure, that any partial timeout is expired - sleep(70) - - self.assertTrue( - os.path.exists(wal_file_partial), - "File {0} disappeared".format(wal_file)) - - self.assertTrue( - os.path.exists(wal_file_partial), - "File {0} disappeared".format(wal_file_partial)) - - # @unittest.skip("skip") - def test_replica_promote_2(self): - """ - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - master = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'master'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'master', master) - # set replica True, so archive_mode 'always' is used. - self.set_archiving( - backup_dir, 'master', master, replica=True) - master.slow_start() - - self.backup_node(backup_dir, 'master', master) - - # Create replica - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) - replica.cleanup() - self.restore_node(backup_dir, 'master', replica) - - # Settings for Replica - self.set_replica(master, replica) - self.set_auto_conf(replica, {'port': replica.port}) - - replica.slow_start(replica=True) - - master.safe_psql( - 'postgres', - 'CREATE TABLE t1 AS ' - 'SELECT i, repeat(md5(i::text),5006056) AS fat_attr ' - 'FROM generate_series(0,1) i') - - self.wait_until_replica_catch_with_master(master, replica) - - replica.promote() - - self.backup_node( - backup_dir, 'master', replica, data_dir=replica.data_dir, - backup_type='page') - - # @unittest.skip("skip") - def test_replica_promote_archive_delta(self): - """ - t3 /---D3--> - t2 /-------> - t1 --F---D1--D2-- - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node1 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node1'), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'checkpoint_timeout': '30s', - 'archive_timeout': '30s'}) - - if self.get_version(node1) < self.version_to_num('9.6.0'): - self.skipTest( - 'Skipped because backup from replica is not supported in PG 9.5') - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node1) - self.set_config( - backup_dir, 'node', options=['--archive-timeout=60s']) - self.set_archiving(backup_dir, 'node', node1) - - node1.slow_start() - - self.backup_node(backup_dir, 'node', node1, options=['--stream']) - - # Create replica - node2 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node2')) - node2.cleanup() - self.restore_node(backup_dir, 'node', node2, node2.data_dir) - - # Settings for Replica - self.set_replica(node1, node2) - self.set_auto_conf(node2, {'port': node2.port}) - self.set_archiving(backup_dir, 'node', node2, replica=True) - - node2.slow_start(replica=True) - - node1.safe_psql( - 'postgres', - 'CREATE TABLE t1 AS ' - 'SELECT i, repeat(md5(i::text),5006056) AS fat_attr ' - 'FROM generate_series(0,20) i') - self.wait_until_replica_catch_with_master(node1, node2) - - node1.safe_psql( - 'postgres', - 'CREATE TABLE t2 AS ' - 'SELECT i, repeat(md5(i::text),5006056) AS fat_attr ' - 'FROM generate_series(0,20) i') - self.wait_until_replica_catch_with_master(node1, node2) - - # delta backup on replica on timeline 1 - delta1_id = self.backup_node( - backup_dir, 'node', node2, node2.data_dir, - 'delta', options=['--stream']) - - # delta backup on replica on timeline 1 - delta2_id = self.backup_node( - backup_dir, 'node', node2, node2.data_dir, 'delta') - - self.change_backup_status( - backup_dir, 'node', delta2_id, 'ERROR') - - # node2 is now master - node2.promote() - - node2.safe_psql( - 'postgres', - 'CREATE TABLE t3 AS ' - 'SELECT i, repeat(md5(i::text),5006056) AS fat_attr ' - 'FROM generate_series(0,20) i') - - # node1 is now replica - node1.cleanup() - # kludge "backup_id=delta1_id" - self.restore_node( - backup_dir, 'node', node1, node1.data_dir, - backup_id=delta1_id, - options=[ - '--recovery-target-timeline=2', - '--recovery-target=latest']) - - # Settings for Replica - self.set_replica(node2, node1) - self.set_auto_conf(node1, {'port': node1.port}) - self.set_archiving(backup_dir, 'node', node1, replica=True) - - node1.slow_start(replica=True) - - node2.safe_psql( - 'postgres', - 'CREATE TABLE t4 AS ' - 'SELECT i, repeat(md5(i::text),5006056) AS fat_attr ' - 'FROM generate_series(0,30) i') - self.wait_until_replica_catch_with_master(node2, node1) - - # node1 is back to be a master - node1.promote() - - sleep(5) - - # delta backup on timeline 3 - self.backup_node( - backup_dir, 'node', node1, node1.data_dir, 'delta', - options=['--archive-timeout=60']) - - pgdata = self.pgdata_content(node1.data_dir) - - node1.cleanup() - self.restore_node(backup_dir, 'node', node1, node1.data_dir) - - pgdata_restored = self.pgdata_content(node1.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.skip("skip") - def test_replica_promote_archive_page(self): - """ - t3 /---P3--> - t2 /-------> - t1 --F---P1--P2-- - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node1 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node1'), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'checkpoint_timeout': '30s', - 'archive_timeout': '30s'}) - - if self.get_version(node1) < self.version_to_num('9.6.0'): - self.skipTest( - 'Skipped because backup from replica is not supported in PG 9.5') - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node1) - self.set_archiving(backup_dir, 'node', node1) - self.set_config( - backup_dir, 'node', options=['--archive-timeout=60s']) - - node1.slow_start() - - self.backup_node(backup_dir, 'node', node1, options=['--stream']) - - # Create replica - node2 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node2')) - node2.cleanup() - self.restore_node(backup_dir, 'node', node2, node2.data_dir) - - # Settings for Replica - self.set_replica(node1, node2) - self.set_auto_conf(node2, {'port': node2.port}) - self.set_archiving(backup_dir, 'node', node2, replica=True) - - node2.slow_start(replica=True) - - node1.safe_psql( - 'postgres', - 'CREATE TABLE t1 AS ' - 'SELECT i, repeat(md5(i::text),5006056) AS fat_attr ' - 'FROM generate_series(0,20) i') - self.wait_until_replica_catch_with_master(node1, node2) - - node1.safe_psql( - 'postgres', - 'CREATE TABLE t2 AS ' - 'SELECT i, repeat(md5(i::text),5006056) AS fat_attr ' - 'FROM generate_series(0,20) i') - self.wait_until_replica_catch_with_master(node1, node2) - - # page backup on replica on timeline 1 - page1_id = self.backup_node( - backup_dir, 'node', node2, node2.data_dir, - 'page', options=['--stream']) - - # page backup on replica on timeline 1 - page2_id = self.backup_node( - backup_dir, 'node', node2, node2.data_dir, 'page') - - self.change_backup_status( - backup_dir, 'node', page2_id, 'ERROR') - - # node2 is now master - node2.promote() - - node2.safe_psql( - 'postgres', - 'CREATE TABLE t3 AS ' - 'SELECT i, repeat(md5(i::text),5006056) AS fat_attr ' - 'FROM generate_series(0,20) i') - - # node1 is now replica - node1.cleanup() - # kludge "backup_id=page1_id" - self.restore_node( - backup_dir, 'node', node1, node1.data_dir, - backup_id=page1_id, - options=[ - '--recovery-target-timeline=2', - '--recovery-target=latest']) - - # Settings for Replica - self.set_replica(node2, node1) - self.set_auto_conf(node1, {'port': node1.port}) - self.set_archiving(backup_dir, 'node', node1, replica=True) - - node1.slow_start(replica=True) - - node2.safe_psql( - 'postgres', - 'CREATE TABLE t4 AS ' - 'SELECT i, repeat(md5(i::text),5006056) AS fat_attr ' - 'FROM generate_series(0,30) i') - self.wait_until_replica_catch_with_master(node2, node1) - - # node1 is back to be a master - node1.promote() - self.switch_wal_segment(node1) - - sleep(5) - - # delta3_id = self.backup_node( - # backup_dir, 'node', node2, node2.data_dir, 'delta') - # page backup on timeline 3 - page3_id = self.backup_node( - backup_dir, 'node', node1, node1.data_dir, 'page', - options=['--archive-timeout=60']) - - pgdata = self.pgdata_content(node1.data_dir) - - node1.cleanup() - self.restore_node(backup_dir, 'node', node1, node1.data_dir) - - pgdata_restored = self.pgdata_content(node1.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.skip("skip") - def test_parent_choosing(self): - """ - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - master = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'master'), - set_replication=True, - initdb_params=['--data-checksums']) - - if self.get_version(master) < self.version_to_num('9.6.0'): - self.skipTest( - 'Skipped because backup from replica is not supported in PG 9.5') - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'master', master) - - master.slow_start() - - self.backup_node(backup_dir, 'master', master, options=['--stream']) - - # Create replica - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) - replica.cleanup() - self.restore_node(backup_dir, 'master', replica) - - # Settings for Replica - self.set_replica(master, replica) - self.set_auto_conf(replica, {'port': replica.port}) - - replica.slow_start(replica=True) - - master.safe_psql( - 'postgres', - 'CREATE TABLE t1 AS ' - 'SELECT i, repeat(md5(i::text),5006056) AS fat_attr ' - 'FROM generate_series(0,20) i') - self.wait_until_replica_catch_with_master(master, replica) - - self.add_instance(backup_dir, 'replica', replica) - - full_id = self.backup_node( - backup_dir, 'replica', - replica, options=['--stream']) - - master.safe_psql( - 'postgres', - 'CREATE TABLE t2 AS ' - 'SELECT i, repeat(md5(i::text),5006056) AS fat_attr ' - 'FROM generate_series(0,20) i') - self.wait_until_replica_catch_with_master(master, replica) - - self.backup_node( - backup_dir, 'replica', replica, - backup_type='delta', options=['--stream']) - - replica.promote() - - # failing, because without archving, it is impossible to - # take multi-timeline backup. - self.backup_node( - backup_dir, 'replica', replica, - backup_type='delta', options=['--stream']) - - # @unittest.skip("skip") - def test_instance_from_the_past(self): - """ - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - - node.slow_start() - - full_id = self.backup_node(backup_dir, 'node', node, options=['--stream']) - - node.pgbench_init(scale=10) - self.backup_node(backup_dir, 'node', node, options=['--stream']) - node.cleanup() - - self.restore_node(backup_dir, 'node', node, backup_id=full_id) - node.slow_start() - - try: - self.backup_node( - backup_dir, 'node', node, - backup_type='delta', options=['--stream']) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because instance is from the past " - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'ERROR: Current START LSN' in e.message and - 'is lower than START LSN' in e.message and - 'It may indicate that we are trying to backup ' - 'PostgreSQL instance from the past' in e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - # @unittest.skip("skip") - def test_replica_via_basebackup(self): - """ - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={'hot_standby': 'on'}) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - - node.slow_start() - - node.pgbench_init(scale=10) - - #FULL backup - full_id = self.backup_node(backup_dir, 'node', node) - - pgbench = node.pgbench( - options=['-T', '10', '-c', '1', '--no-vacuum']) - pgbench.wait() - - node.cleanup() - - self.restore_node( - backup_dir, 'node', node, - options=['--recovery-target=latest', '--recovery-target-action=promote']) - node.slow_start() - - # Timeline 2 - # Take stream page backup from instance in timeline2 - self.backup_node( - backup_dir, 'node', node, backup_type='full', - options=['--stream', '--log-level-file=verbose']) - - node.cleanup() - - # restore stream backup - self.restore_node(backup_dir, 'node', node) - - xlog_dir = 'pg_wal' - if self.get_version(node) < 100000: - xlog_dir = 'pg_xlog' - - filepath = os.path.join(node.data_dir, xlog_dir, "00000002.history") - self.assertTrue( - os.path.exists(filepath), - "History file do not exists: {0}".format(filepath)) - - node.slow_start() - - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - node_restored.cleanup() - - pg_basebackup_path = self.get_bin_path('pg_basebackup') - - self.run_binary( - [ - pg_basebackup_path, '-p', str(node.port), '-h', 'localhost', - '-R', '-X', 'stream', '-D', node_restored.data_dir - ]) - - self.set_auto_conf(node_restored, {'port': node_restored.port}) - node_restored.slow_start(replica=True) - -# TODO: -# null offset STOP LSN and latest record in previous segment is conrecord (manual only) -# archiving from promoted delayed replica diff --git a/tests/restore_test.py b/tests/restore_test.py deleted file mode 100644 index 2de3ecc0f..000000000 --- a/tests/restore_test.py +++ /dev/null @@ -1,3822 +0,0 @@ -import os -import unittest -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException -import subprocess -import sys -from time import sleep -from datetime import datetime, timedelta, timezone -import hashlib -import shutil -import json -from shutil import copyfile -from testgres import QueryException, StartNodeException -from stat import S_ISDIR - - -class RestoreTest(ProbackupTest, unittest.TestCase): - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_restore_full_to_latest(self): - """recovery to latest from full backup""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.pgbench_init(scale=2) - pgbench = node.pgbench( - stdout=subprocess.PIPE, stderr=subprocess.STDOUT) - pgbench.wait() - pgbench.stdout.close() - before = node.execute("postgres", "SELECT * FROM pgbench_branches") - backup_id = self.backup_node(backup_dir, 'node', node) - - node.stop() - node.cleanup() - - # 1 - Test recovery from latest - self.assertIn( - "INFO: Restore of backup {0} completed.".format(backup_id), - self.restore_node( - backup_dir, 'node', node, options=["-j", "4"]), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) - - # 2 - Test that recovery.conf was created - # TODO update test - if self.get_version(node) >= self.version_to_num('12.0'): - recovery_conf = os.path.join(node.data_dir, 'postgresql.auto.conf') - with open(recovery_conf, 'r') as f: - print(f.read()) - else: - recovery_conf = os.path.join(node.data_dir, 'recovery.conf') - self.assertEqual(os.path.isfile(recovery_conf), True) - - node.slow_start() - - after = node.execute("postgres", "SELECT * FROM pgbench_branches") - self.assertEqual(before, after) - - # @unittest.skip("skip") - def test_restore_full_page_to_latest(self): - """recovery to latest from full + page backups""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.pgbench_init(scale=2) - - self.backup_node(backup_dir, 'node', node) - - pgbench = node.pgbench( - stdout=subprocess.PIPE, stderr=subprocess.STDOUT) - pgbench.wait() - pgbench.stdout.close() - - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type="page") - - before = node.execute("postgres", "SELECT * FROM pgbench_branches") - - node.stop() - node.cleanup() - - self.assertIn( - "INFO: Restore of backup {0} completed.".format(backup_id), - self.restore_node( - backup_dir, 'node', node, options=["-j", "4"]), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) - - node.slow_start() - - after = node.execute("postgres", "SELECT * FROM pgbench_branches") - self.assertEqual(before, after) - - # @unittest.skip("skip") - def test_restore_to_specific_timeline(self): - """recovery to target timeline""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.pgbench_init(scale=2) - - before = node.execute("postgres", "SELECT * FROM pgbench_branches") - - backup_id = self.backup_node(backup_dir, 'node', node) - - target_tli = int( - node.get_control_data()["Latest checkpoint's TimeLineID"]) - node.stop() - node.cleanup() - - self.assertIn( - "INFO: Restore of backup {0} completed.".format(backup_id), - self.restore_node( - backup_dir, 'node', node, options=["-j", "4"]), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) - - node.slow_start() - pgbench = node.pgbench( - stdout=subprocess.PIPE, stderr=subprocess.STDOUT, - options=['-T', '10', '-c', '2', '--no-vacuum']) - pgbench.wait() - pgbench.stdout.close() - - self.backup_node(backup_dir, 'node', node) - - node.stop() - node.cleanup() - - # Correct Backup must be choosen for restore - self.assertIn( - "INFO: Restore of backup {0} completed.".format(backup_id), - self.restore_node( - backup_dir, 'node', node, - options=[ - "-j", "4", "--timeline={0}".format(target_tli)] - ), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) - - recovery_target_timeline = self.get_recovery_conf( - node)["recovery_target_timeline"] - self.assertEqual(int(recovery_target_timeline), target_tli) - - node.slow_start() - after = node.execute("postgres", "SELECT * FROM pgbench_branches") - self.assertEqual(before, after) - - # @unittest.skip("skip") - def test_restore_to_time(self): - """recovery to target time""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums'], - pg_options={'TimeZone': 'GMT'}) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.pgbench_init(scale=2) - before = node.execute("postgres", "SELECT * FROM pgbench_branches") - - backup_id = self.backup_node(backup_dir, 'node', node) - - target_time = node.execute( - "postgres", "SELECT to_char(now(), 'YYYY-MM-DD HH24:MI:SS+00')" - )[0][0] - pgbench = node.pgbench( - stdout=subprocess.PIPE, stderr=subprocess.STDOUT) - pgbench.wait() - pgbench.stdout.close() - - node.stop() - node.cleanup() - - self.assertIn( - "INFO: Restore of backup {0} completed.".format(backup_id), - self.restore_node( - backup_dir, 'node', node, - options=[ - "-j", "4", '--time={0}'.format(target_time), - "--recovery-target-action=promote" - ] - ), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) - - node.slow_start() - after = node.execute("postgres", "SELECT * FROM pgbench_branches") - self.assertEqual(before, after) - - # @unittest.skip("skip") - def test_restore_to_xid_inclusive(self): - """recovery to target xid""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.pgbench_init(scale=2) - with node.connect("postgres") as con: - con.execute("CREATE TABLE tbl0005 (a text)") - con.commit() - - backup_id = self.backup_node(backup_dir, 'node', node) - - pgbench = node.pgbench( - stdout=subprocess.PIPE, stderr=subprocess.STDOUT) - pgbench.wait() - pgbench.stdout.close() - - before = node.safe_psql("postgres", "SELECT * FROM pgbench_branches") - with node.connect("postgres") as con: - res = con.execute("INSERT INTO tbl0005 VALUES ('inserted') RETURNING (xmin)") - con.commit() - target_xid = res[0][0] - - pgbench = node.pgbench( - stdout=subprocess.PIPE, stderr=subprocess.STDOUT) - pgbench.wait() - pgbench.stdout.close() - - node.stop() - node.cleanup() - - self.assertIn( - "INFO: Restore of backup {0} completed.".format(backup_id), - self.restore_node( - backup_dir, 'node', node, - options=[ - "-j", "4", '--xid={0}'.format(target_xid), - "--recovery-target-action=promote"] - ), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) - - node.slow_start() - after = node.safe_psql("postgres", "SELECT * FROM pgbench_branches") - self.assertEqual(before, after) - self.assertEqual( - len(node.execute("postgres", "SELECT * FROM tbl0005")), 1) - - # @unittest.skip("skip") - def test_restore_to_xid_not_inclusive(self): - """recovery with target inclusive false""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.pgbench_init(scale=2) - with node.connect("postgres") as con: - con.execute("CREATE TABLE tbl0005 (a text)") - con.commit() - - backup_id = self.backup_node(backup_dir, 'node', node) - - pgbench = node.pgbench( - stdout=subprocess.PIPE, stderr=subprocess.STDOUT) - pgbench.wait() - pgbench.stdout.close() - - before = node.execute("postgres", "SELECT * FROM pgbench_branches") - with node.connect("postgres") as con: - result = con.execute("INSERT INTO tbl0005 VALUES ('inserted') RETURNING (xmin)") - con.commit() - target_xid = result[0][0] - - pgbench = node.pgbench( - stdout=subprocess.PIPE, stderr=subprocess.STDOUT) - pgbench.wait() - pgbench.stdout.close() - - node.stop() - node.cleanup() - - self.assertIn( - "INFO: Restore of backup {0} completed.".format(backup_id), - self.restore_node( - backup_dir, 'node', node, - options=[ - "-j", "4", - '--xid={0}'.format(target_xid), - "--inclusive=false", - "--recovery-target-action=promote"]), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) - - node.slow_start() - after = node.execute("postgres", "SELECT * FROM pgbench_branches") - self.assertEqual(before, after) - self.assertEqual( - len(node.execute("postgres", "SELECT * FROM tbl0005")), 0) - - # @unittest.skip("skip") - def test_restore_to_lsn_inclusive(self): - """recovery to target lsn""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - if self.get_version(node) < self.version_to_num('10.0'): - return - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.pgbench_init(scale=2) - with node.connect("postgres") as con: - con.execute("CREATE TABLE tbl0005 (a int)") - con.commit() - - backup_id = self.backup_node(backup_dir, 'node', node) - - pgbench = node.pgbench( - stdout=subprocess.PIPE, stderr=subprocess.STDOUT) - pgbench.wait() - pgbench.stdout.close() - - before = node.safe_psql("postgres", "SELECT * FROM pgbench_branches") - with node.connect("postgres") as con: - con.execute("INSERT INTO tbl0005 VALUES (1)") - con.commit() - res = con.execute("SELECT pg_current_wal_lsn()") - con.commit() - con.execute("INSERT INTO tbl0005 VALUES (2)") - con.commit() - xlogid, xrecoff = res[0][0].split('/') - xrecoff = hex(int(xrecoff, 16) + 1)[2:] - target_lsn = "{0}/{1}".format(xlogid, xrecoff) - - pgbench = node.pgbench( - stdout=subprocess.PIPE, stderr=subprocess.STDOUT) - pgbench.wait() - pgbench.stdout.close() - - node.stop() - node.cleanup() - - self.assertIn( - "INFO: Restore of backup {0} completed.".format(backup_id), - self.restore_node( - backup_dir, 'node', node, - options=[ - "-j", "4", '--lsn={0}'.format(target_lsn), - "--recovery-target-action=promote"] - ), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) - - node.slow_start() - - after = node.safe_psql("postgres", "SELECT * FROM pgbench_branches") - self.assertEqual(before, after) - self.assertEqual( - len(node.execute("postgres", "SELECT * FROM tbl0005")), 2) - - # @unittest.skip("skip") - def test_restore_to_lsn_not_inclusive(self): - """recovery to target lsn""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - if self.get_version(node) < self.version_to_num('10.0'): - return - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.pgbench_init(scale=2) - with node.connect("postgres") as con: - con.execute("CREATE TABLE tbl0005 (a int)") - con.commit() - - backup_id = self.backup_node(backup_dir, 'node', node) - - pgbench = node.pgbench( - stdout=subprocess.PIPE, stderr=subprocess.STDOUT) - pgbench.wait() - pgbench.stdout.close() - - before = node.safe_psql("postgres", "SELECT * FROM pgbench_branches") - with node.connect("postgres") as con: - con.execute("INSERT INTO tbl0005 VALUES (1)") - con.commit() - res = con.execute("SELECT pg_current_wal_lsn()") - con.commit() - con.execute("INSERT INTO tbl0005 VALUES (2)") - con.commit() - xlogid, xrecoff = res[0][0].split('/') - xrecoff = hex(int(xrecoff, 16) + 1)[2:] - target_lsn = "{0}/{1}".format(xlogid, xrecoff) - - pgbench = node.pgbench( - stdout=subprocess.PIPE, stderr=subprocess.STDOUT) - pgbench.wait() - pgbench.stdout.close() - - node.stop() - node.cleanup() - - self.assertIn( - "INFO: Restore of backup {0} completed.".format(backup_id), - self.restore_node( - backup_dir, 'node', node, - options=[ - "--inclusive=false", - "-j", "4", '--lsn={0}'.format(target_lsn), - "--recovery-target-action=promote"] - ), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) - - node.slow_start() - - after = node.safe_psql("postgres", "SELECT * FROM pgbench_branches") - self.assertEqual(before, after) - self.assertEqual( - len(node.execute("postgres", "SELECT * FROM tbl0005")), 1) - - # @unittest.skip("skip") - def test_restore_full_ptrack_archive(self): - """recovery to latest from archive full+ptrack backups""" - if not self.ptrack: - self.skipTest('Skipped because ptrack support is disabled') - - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums'], - ptrack_enable=True) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") - - node.pgbench_init(scale=2) - - self.backup_node(backup_dir, 'node', node) - - pgbench = node.pgbench( - stdout=subprocess.PIPE, stderr=subprocess.STDOUT) - pgbench.wait() - pgbench.stdout.close() - - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type="ptrack") - - before = node.execute("postgres", "SELECT * FROM pgbench_branches") - - node.stop() - node.cleanup() - - self.assertIn( - "INFO: Restore of backup {0} completed.".format(backup_id), - self.restore_node( - backup_dir, 'node', node, - options=["-j", "4"]), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) - - node.slow_start() - after = node.execute("postgres", "SELECT * FROM pgbench_branches") - self.assertEqual(before, after) - - # @unittest.skip("skip") - def test_restore_ptrack(self): - """recovery to latest from archive full+ptrack+ptrack backups""" - if not self.ptrack: - self.skipTest('Skipped because ptrack support is disabled') - - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums'], - ptrack_enable=True) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") - - node.pgbench_init(scale=2) - - self.backup_node(backup_dir, 'node', node) - - pgbench = node.pgbench( - stdout=subprocess.PIPE, stderr=subprocess.STDOUT) - pgbench.wait() - pgbench.stdout.close() - - self.backup_node(backup_dir, 'node', node, backup_type="ptrack") - - pgbench = node.pgbench( - stdout=subprocess.PIPE, stderr=subprocess.STDOUT) - pgbench.wait() - pgbench.stdout.close() - - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type="ptrack") - - before = node.execute("postgres", "SELECT * FROM pgbench_branches") - - node.stop() - node.cleanup() - - self.assertIn( - "INFO: Restore of backup {0} completed.".format(backup_id), - self.restore_node( - backup_dir, 'node', node, - options=["-j", "4"]), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) - - node.slow_start() - after = node.execute("postgres", "SELECT * FROM pgbench_branches") - self.assertEqual(before, after) - - # @unittest.skip("skip") - def test_restore_full_ptrack_stream(self): - """recovery in stream mode to latest from full + ptrack backups""" - if not self.ptrack: - self.skipTest('Skipped because ptrack support is disabled') - - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") - - node.pgbench_init(scale=2) - - self.backup_node(backup_dir, 'node', node, options=["--stream"]) - - pgbench = node.pgbench( - stdout=subprocess.PIPE, stderr=subprocess.STDOUT) - pgbench.wait() - pgbench.stdout.close() - - backup_id = self.backup_node( - backup_dir, 'node', node, - backup_type="ptrack", options=["--stream"]) - - before = node.execute("postgres", "SELECT * FROM pgbench_branches") - - node.stop() - node.cleanup() - - self.assertIn( - "INFO: Restore of backup {0} completed.".format(backup_id), - self.restore_node( - backup_dir, 'node', node, options=["-j", "4"]), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) - - node.slow_start() - after = node.execute("postgres", "SELECT * FROM pgbench_branches") - self.assertEqual(before, after) - - # @unittest.skip("skip") - def test_restore_full_ptrack_under_load(self): - """ - recovery to latest from full + ptrack backups - with loads when ptrack backup do - """ - if not self.ptrack: - self.skipTest('Skipped because ptrack support is disabled') - - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") - - node.pgbench_init(scale=2) - - self.backup_node(backup_dir, 'node', node) - - pgbench = node.pgbench( - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - options=["-c", "4", "-T", "8"] - ) - - backup_id = self.backup_node( - backup_dir, 'node', node, - backup_type="ptrack", options=["--stream"]) - - pgbench.wait() - pgbench.stdout.close() - - bbalance = node.execute( - "postgres", "SELECT sum(bbalance) FROM pgbench_branches") - delta = node.execute( - "postgres", "SELECT sum(delta) FROM pgbench_history") - - self.assertEqual(bbalance, delta) - node.stop() - node.cleanup() - - self.assertIn( - "INFO: Restore of backup {0} completed.".format(backup_id), - self.restore_node( - backup_dir, 'node', node, options=["-j", "4"]), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) - - node.slow_start() - bbalance = node.execute( - "postgres", "SELECT sum(bbalance) FROM pgbench_branches") - delta = node.execute( - "postgres", "SELECT sum(delta) FROM pgbench_history") - self.assertEqual(bbalance, delta) - - # @unittest.skip("skip") - def test_restore_full_under_load_ptrack(self): - """ - recovery to latest from full + page backups - with loads when full backup do - """ - if not self.ptrack: - self.skipTest('Skipped because ptrack support is disabled') - - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") - - # wal_segment_size = self.guc_wal_segment_size(node) - node.pgbench_init(scale=2) - - pgbench = node.pgbench( - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - options=["-c", "4", "-T", "8"] - ) - - self.backup_node(backup_dir, 'node', node) - - pgbench.wait() - pgbench.stdout.close() - - backup_id = self.backup_node( - backup_dir, 'node', node, - backup_type="ptrack", options=["--stream"]) - - bbalance = node.execute( - "postgres", "SELECT sum(bbalance) FROM pgbench_branches") - delta = node.execute( - "postgres", "SELECT sum(delta) FROM pgbench_history") - - self.assertEqual(bbalance, delta) - - node.stop() - node.cleanup() - # self.wrong_wal_clean(node, wal_segment_size) - - self.assertIn( - "INFO: Restore of backup {0} completed.".format(backup_id), - self.restore_node( - backup_dir, 'node', node, options=["-j", "4"]), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) - node.slow_start() - bbalance = node.execute( - "postgres", "SELECT sum(bbalance) FROM pgbench_branches") - delta = node.execute( - "postgres", "SELECT sum(delta) FROM pgbench_history") - self.assertEqual(bbalance, delta) - - # @unittest.skip("skip") - def test_restore_with_tablespace_mapping_1(self): - """recovery using tablespace-mapping option""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # Create tablespace - tblspc_path = os.path.join(node.base_dir, "tblspc") - os.makedirs(tblspc_path) - with node.connect("postgres") as con: - con.connection.autocommit = True - con.execute("CREATE TABLESPACE tblspc LOCATION '%s'" % tblspc_path) - con.connection.autocommit = False - con.execute("CREATE TABLE test (id int) TABLESPACE tblspc") - con.execute("INSERT INTO test VALUES (1)") - con.commit() - - backup_id = self.backup_node(backup_dir, 'node', node) - self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK") - - # 1 - Try to restore to existing directory - node.stop() - try: - self.restore_node(backup_dir, 'node', node) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because restore destination is not empty.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Restore destination is not empty:', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - # 2 - Try to restore to existing tablespace directory - tblspc_path_tmp = os.path.join(node.base_dir, "tblspc_tmp") - os.rename(tblspc_path, tblspc_path_tmp) - node.cleanup() - os.rename(tblspc_path_tmp, tblspc_path) - try: - self.restore_node(backup_dir, 'node', node) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because restore tablespace destination is " - "not empty.\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Restore tablespace destination is not empty:', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - # 3 - Restore using tablespace-mapping to not empty directory - tblspc_path_temp = os.path.join(node.base_dir, "tblspc_temp") - os.mkdir(tblspc_path_temp) - with open(os.path.join(tblspc_path_temp, 'file'), 'w+') as f: - f.close() - - try: - self.restore_node( - backup_dir, 'node', node, - options=["-T", "%s=%s" % (tblspc_path, tblspc_path_temp)]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because restore tablespace destination is " - "not empty.\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Restore tablespace destination is not empty:', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - # 4 - Restore using tablespace-mapping - tblspc_path_new = os.path.join(node.base_dir, "tblspc_new") - self.assertIn( - "INFO: Restore of backup {0} completed.".format(backup_id), - self.restore_node( - backup_dir, 'node', node, - options=[ - "-T", "%s=%s" % (tblspc_path, tblspc_path_new)] - ), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) - - node.slow_start() - - result = node.execute("postgres", "SELECT id FROM test") - self.assertEqual(result[0][0], 1) - - # 4 - Restore using tablespace-mapping using page backup - self.backup_node(backup_dir, 'node', node) - with node.connect("postgres") as con: - con.execute("INSERT INTO test VALUES (2)") - con.commit() - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type="page") - - show_pb = self.show_pb(backup_dir, 'node') - self.assertEqual(show_pb[1]['status'], "OK") - self.assertEqual(show_pb[2]['status'], "OK") - - node.stop() - node.cleanup() - tblspc_path_page = os.path.join(node.base_dir, "tblspc_page") - - self.assertIn( - "INFO: Restore of backup {0} completed.".format(backup_id), - self.restore_node( - backup_dir, 'node', node, - options=[ - "-T", "%s=%s" % (tblspc_path_new, tblspc_path_page)]), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) - - node.slow_start() - result = node.execute("postgres", "SELECT id FROM test OFFSET 1") - self.assertEqual(result[0][0], 2) - - # @unittest.skip("skip") - def test_restore_with_tablespace_mapping_2(self): - """recovery using tablespace-mapping option and page backup""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # Full backup - self.backup_node(backup_dir, 'node', node) - self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK") - - # Create tablespace - tblspc_path = os.path.join(node.base_dir, "tblspc") - os.makedirs(tblspc_path) - with node.connect("postgres") as con: - con.connection.autocommit = True - con.execute("CREATE TABLESPACE tblspc LOCATION '%s'" % tblspc_path) - con.connection.autocommit = False - con.execute( - "CREATE TABLE tbl AS SELECT * " - "FROM generate_series(0,3) AS integer") - con.commit() - - # First page backup - self.backup_node(backup_dir, 'node', node, backup_type="page") - self.assertEqual(self.show_pb(backup_dir, 'node')[1]['status'], "OK") - self.assertEqual( - self.show_pb(backup_dir, 'node')[1]['backup-mode'], "PAGE") - - # Create tablespace table - with node.connect("postgres") as con: -# con.connection.autocommit = True -# con.execute("CHECKPOINT") -# con.connection.autocommit = False - con.execute("CREATE TABLE tbl1 (a int) TABLESPACE tblspc") - con.execute( - "INSERT INTO tbl1 SELECT * " - "FROM generate_series(0,3) AS integer") - con.commit() - - # Second page backup - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type="page") - self.assertEqual(self.show_pb(backup_dir, 'node')[2]['status'], "OK") - self.assertEqual( - self.show_pb(backup_dir, 'node')[2]['backup-mode'], "PAGE") - - node.stop() - node.cleanup() - - tblspc_path_new = os.path.join(node.base_dir, "tblspc_new") - - self.assertIn( - "INFO: Restore of backup {0} completed.".format(backup_id), - self.restore_node( - backup_dir, 'node', node, - options=[ - "-T", "%s=%s" % (tblspc_path, tblspc_path_new)]), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) - node.slow_start() - - count = node.execute("postgres", "SELECT count(*) FROM tbl") - self.assertEqual(count[0][0], 4) - count = node.execute("postgres", "SELECT count(*) FROM tbl1") - self.assertEqual(count[0][0], 4) - - # @unittest.skip("skip") - def test_restore_with_missing_or_corrupted_tablespace_map(self): - """restore backup with missing or corrupted tablespace_map""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # Create tablespace - self.create_tblspace_in_node(node, 'tblspace') - node.pgbench_init(scale=1, tablespace='tblspace') - - # Full backup - self.backup_node(backup_dir, 'node', node) - - # Change some data - pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) - pgbench.wait() - - # Page backup - page_id = self.backup_node(backup_dir, 'node', node, backup_type="page") - - pgdata = self.pgdata_content(node.data_dir) - - node2 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node2')) - node2.cleanup() - - olddir = self.get_tblspace_path(node, 'tblspace') - newdir = self.get_tblspace_path(node2, 'tblspace') - - # drop tablespace_map - tablespace_map = os.path.join( - backup_dir, 'backups', 'node', - page_id, 'database', 'tablespace_map') - - tablespace_map_tmp = os.path.join( - backup_dir, 'backups', 'node', - page_id, 'database', 'tablespace_map_tmp') - - os.rename(tablespace_map, tablespace_map_tmp) - - try: - self.restore_node( - backup_dir, 'node', node2, - options=["-T", "{0}={1}".format(olddir, newdir)]) - self.assertEqual( - 1, 0, - "Expecting Error because tablespace_map is missing.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Tablespace map is missing: "{0}", ' - 'probably backup {1} is corrupt, validate it'.format( - tablespace_map, page_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - try: - self.restore_node(backup_dir, 'node', node2) - self.assertEqual( - 1, 0, - "Expecting Error because tablespace_map is missing.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Tablespace map is missing: "{0}", ' - 'probably backup {1} is corrupt, validate it'.format( - tablespace_map, page_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - copyfile(tablespace_map_tmp, tablespace_map) - - with open(tablespace_map, "a") as f: - f.write("HELLO\n") - - try: - self.restore_node( - backup_dir, 'node', node2, - options=["-T", "{0}={1}".format(olddir, newdir)]) - self.assertEqual( - 1, 0, - "Expecting Error because tablespace_map is corupted.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Invalid CRC of tablespace map file "{0}"'.format(tablespace_map), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - try: - self.restore_node(backup_dir, 'node', node2) - self.assertEqual( - 1, 0, - "Expecting Error because tablespace_map is corupted.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Invalid CRC of tablespace map file "{0}"'.format(tablespace_map), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - # rename it back - os.rename(tablespace_map_tmp, tablespace_map) - - print(self.restore_node( - backup_dir, 'node', node2, - options=["-T", "{0}={1}".format(olddir, newdir)])) - - pgdata_restored = self.pgdata_content(node2.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.skip("skip") - def test_archive_node_backup_stream_restore_to_recovery_time(self): - """ - make node with archiving, make stream backup, - make PITR to Recovery Time - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - backup_id = self.backup_node( - backup_dir, 'node', node, options=["--stream"]) - node.safe_psql("postgres", "create table t_heap(a int)") - - node.stop() - node.cleanup() - - recovery_time = self.show_pb( - backup_dir, 'node', backup_id)['recovery-time'] - - self.assertIn( - "INFO: Restore of backup {0} completed.".format(backup_id), - self.restore_node( - backup_dir, 'node', node, - options=[ - "-j", "4", '--time={0}'.format(recovery_time), - "--recovery-target-action=promote" - ] - ), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) - - node.slow_start() - - result = node.psql("postgres", 'select * from t_heap') - self.assertTrue('does not exist' in result[2].decode("utf-8")) - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_archive_node_backup_stream_restore_to_recovery_time(self): - """ - make node with archiving, make stream backup, - make PITR to Recovery Time - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - backup_id = self.backup_node( - backup_dir, 'node', node, options=["--stream"]) - node.safe_psql("postgres", "create table t_heap(a int)") - node.stop() - node.cleanup() - - recovery_time = self.show_pb( - backup_dir, 'node', backup_id)['recovery-time'] - - self.assertIn( - "INFO: Restore of backup {0} completed.".format(backup_id), - self.restore_node( - backup_dir, 'node', node, - options=[ - "-j", "4", '--time={0}'.format(recovery_time), - "--recovery-target-action=promote" - ] - ), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) - - node.slow_start() - result = node.psql("postgres", 'select * from t_heap') - self.assertTrue('does not exist' in result[2].decode("utf-8")) - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_archive_node_backup_stream_pitr(self): - """ - make node with archiving, make stream backup, - create table t_heap, make pitr to Recovery Time, - check that t_heap do not exists - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - backup_id = self.backup_node( - backup_dir, 'node', node, options=["--stream"]) - node.safe_psql("postgres", "create table t_heap(a int)") - node.cleanup() - - recovery_time = self.show_pb( - backup_dir, 'node', backup_id)['recovery-time'] - - self.assertIn( - "INFO: Restore of backup {0} completed.".format(backup_id), - self.restore_node( - backup_dir, 'node', node, - options=[ - "-j", "4", '--time={0}'.format(recovery_time), - "--recovery-target-action=promote" - ] - ), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) - - node.slow_start() - - result = node.psql("postgres", 'select * from t_heap') - self.assertEqual(True, 'does not exist' in result[2].decode("utf-8")) - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_archive_node_backup_archive_pitr_2(self): - """ - make node with archiving, make archive backup, - create table t_heap, make pitr to Recovery Time, - check that t_heap do not exists - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - backup_id = self.backup_node(backup_dir, 'node', node) - if self.paranoia: - pgdata = self.pgdata_content(node.data_dir) - - node.safe_psql("postgres", "create table t_heap(a int)") - node.stop() - - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - node_restored.cleanup() - - recovery_time = self.show_pb( - backup_dir, 'node', backup_id)['recovery-time'] - - self.assertIn( - "INFO: Restore of backup {0} completed.".format(backup_id), - self.restore_node( - backup_dir, 'node', node_restored, - options=[ - "-j", "4", '--time={0}'.format(recovery_time), - "--recovery-target-action=promote"] - ), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) - - if self.paranoia: - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - self.set_auto_conf(node_restored, {'port': node_restored.port}) - - node_restored.slow_start() - - result = node_restored.psql("postgres", 'select * from t_heap') - self.assertTrue('does not exist' in result[2].decode("utf-8")) - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_archive_restore_to_restore_point(self): - """ - make node with archiving, make archive backup, - create table t_heap, make pitr to Recovery Time, - check that t_heap do not exists - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - self.backup_node(backup_dir, 'node', node) - - node.safe_psql( - "postgres", - "create table t_heap as select generate_series(0,10000)") - result = node.safe_psql( - "postgres", - "select * from t_heap") - node.safe_psql( - "postgres", "select pg_create_restore_point('savepoint')") - node.safe_psql( - "postgres", - "create table t_heap_1 as select generate_series(0,10000)") - node.cleanup() - - self.restore_node( - backup_dir, 'node', node, - options=[ - "--recovery-target-name=savepoint", - "--recovery-target-action=promote"]) - - node.slow_start() - - result_new = node.safe_psql("postgres", "select * from t_heap") - res = node.psql("postgres", "select * from t_heap_1") - self.assertEqual( - res[0], 1, - "Table t_heap_1 should not exist in restored instance") - - self.assertEqual(result, result_new) - - @unittest.skip("skip") - # @unittest.expectedFailure - def test_zags_block_corrupt(self): - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - self.backup_node(backup_dir, 'node', node) - - conn = node.connect() - with node.connect("postgres") as conn: - - conn.execute( - "create table tbl(i int)") - conn.commit() - conn.execute( - "create index idx ON tbl (i)") - conn.commit() - conn.execute( - "insert into tbl select i from generate_series(0,400) as i") - conn.commit() - conn.execute( - "select pg_relation_size('idx')") - conn.commit() - conn.execute( - "delete from tbl where i < 100") - conn.commit() - conn.execute( - "explain analyze select i from tbl order by i") - conn.commit() - conn.execute( - "select i from tbl order by i") - conn.commit() - conn.execute( - "create extension pageinspect") - conn.commit() - print(conn.execute( - "select * from bt_page_stats('idx',1)")) - conn.commit() - conn.execute( - "insert into tbl select i from generate_series(0,100) as i") - conn.commit() - conn.execute( - "insert into tbl select i from generate_series(0,100) as i") - conn.commit() - conn.execute( - "insert into tbl select i from generate_series(0,100) as i") - conn.commit() - conn.execute( - "insert into tbl select i from generate_series(0,100) as i") - - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored'), - initdb_params=['--data-checksums']) - - node_restored.cleanup() - - self.restore_node( - backup_dir, 'node', node_restored) - - self.set_auto_conf( - node_restored, - {'archive_mode': 'off', 'hot_standby': 'on', 'port': node_restored.port}) - - node_restored.slow_start() - - @unittest.skip("skip") - # @unittest.expectedFailure - def test_zags_block_corrupt_1(self): - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums'], - pg_options={ - 'full_page_writes': 'on'} - ) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - self.backup_node(backup_dir, 'node', node) - - node.safe_psql('postgres', 'create table tbl(i int)') - - node.safe_psql('postgres', 'create index idx ON tbl (i)') - - node.safe_psql( - 'postgres', - 'insert into tbl select i from generate_series(0,100000) as i') - - node.safe_psql( - 'postgres', - 'delete from tbl where i%2 = 0') - - node.safe_psql( - 'postgres', - 'explain analyze select i from tbl order by i') - - node.safe_psql( - 'postgres', - 'select i from tbl order by i') - - node.safe_psql( - 'postgres', - 'create extension pageinspect') - - node.safe_psql( - 'postgres', - 'insert into tbl select i from generate_series(0,100) as i') - - node.safe_psql( - 'postgres', - 'insert into tbl select i from generate_series(0,100) as i') - - node.safe_psql( - 'postgres', - 'insert into tbl select i from generate_series(0,100) as i') - - node.safe_psql( - 'postgres', - 'insert into tbl select i from generate_series(0,100) as i') - - self.switch_wal_segment(node) - - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored'), - initdb_params=['--data-checksums']) - - pgdata = self.pgdata_content(node.data_dir) - - node_restored.cleanup() - - self.restore_node( - backup_dir, 'node', node_restored) - - self.set_auto_conf( - node_restored, - {'archive_mode': 'off', 'hot_standby': 'on', 'port': node_restored.port}) - - node_restored.slow_start() - - while True: - with open(node_restored.pg_log_file, 'r') as f: - if 'selected new timeline ID' in f.read(): - break - - # with open(node_restored.pg_log_file, 'r') as f: - # print(f.read()) - - pgdata_restored = self.pgdata_content(node_restored.data_dir) - - self.compare_pgdata(pgdata, pgdata_restored) - -# pg_xlogdump_path = self.get_bin_path('pg_xlogdump') - -# pg_xlogdump = self.run_binary( -# [ -# pg_xlogdump_path, '-b', -# os.path.join(backup_dir, 'wal', 'node', '000000010000000000000003'), -# ' | ', 'grep', 'Btree', '' -# ], async=False) - - if pg_xlogdump.returncode: - self.assertFalse( - True, - 'Failed to start pg_wal_dump: {0}'.format( - pg_receivexlog.communicate()[1])) - - # @unittest.skip("skip") - def test_restore_chain(self): - """ - make node, take full backup, take several - ERROR delta backups, take valid delta backup, - restore must be successfull - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # Take FULL - self.backup_node( - backup_dir, 'node', node) - - # Take DELTA - self.backup_node( - backup_dir, 'node', node, backup_type='delta') - - # Take ERROR DELTA - try: - self.backup_node( - backup_dir, 'node', node, - backup_type='delta', options=['-U', 'wrong_name']) - except ProbackupException as e: - pass - - # Take ERROR DELTA - try: - self.backup_node( - backup_dir, 'node', node, - backup_type='delta', options=['-U', 'wrong_name']) - except ProbackupException as e: - pass - - # Take DELTA - self.backup_node( - backup_dir, 'node', node, backup_type='delta') - - # Take ERROR DELTA - try: - self.backup_node( - backup_dir, 'node', node, - backup_type='delta', options=['-U', 'wrong_name']) - except ProbackupException as e: - pass - - self.assertEqual( - 'OK', - self.show_pb(backup_dir, 'node')[0]['status'], - 'Backup STATUS should be "OK"') - - self.assertEqual( - 'OK', - self.show_pb(backup_dir, 'node')[1]['status'], - 'Backup STATUS should be "OK"') - - self.assertEqual( - 'ERROR', - self.show_pb(backup_dir, 'node')[2]['status'], - 'Backup STATUS should be "ERROR"') - - self.assertEqual( - 'ERROR', - self.show_pb(backup_dir, 'node')[3]['status'], - 'Backup STATUS should be "ERROR"') - - self.assertEqual( - 'OK', - self.show_pb(backup_dir, 'node')[4]['status'], - 'Backup STATUS should be "OK"') - - self.assertEqual( - 'ERROR', - self.show_pb(backup_dir, 'node')[5]['status'], - 'Backup STATUS should be "ERROR"') - - node.cleanup() - - self.restore_node(backup_dir, 'node', node) - - # @unittest.skip("skip") - def test_restore_chain_with_corrupted_backup(self): - """more complex test_restore_chain()""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # Take FULL - self.backup_node( - backup_dir, 'node', node) - - # Take DELTA - self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # Take ERROR DELTA - try: - self.backup_node( - backup_dir, 'node', node, - backup_type='page', options=['-U', 'wrong_name']) - except ProbackupException as e: - pass - - # Take 1 DELTA - self.backup_node( - backup_dir, 'node', node, backup_type='delta') - - # Take ERROR DELTA - try: - self.backup_node( - backup_dir, 'node', node, - backup_type='delta', options=['-U', 'wrong_name']) - except ProbackupException as e: - pass - - # Take 2 DELTA - self.backup_node( - backup_dir, 'node', node, backup_type='delta') - - # Take ERROR DELTA - try: - self.backup_node( - backup_dir, 'node', node, - backup_type='delta', options=['-U', 'wrong_name']) - except ProbackupException as e: - pass - - # Take 3 DELTA - self.backup_node( - backup_dir, 'node', node, backup_type='delta') - - # Corrupted 4 DELTA - corrupt_id = self.backup_node( - backup_dir, 'node', node, backup_type='delta') - - # ORPHAN 5 DELTA - restore_target_id = self.backup_node( - backup_dir, 'node', node, backup_type='delta') - - # ORPHAN 6 DELTA - self.backup_node( - backup_dir, 'node', node, backup_type='delta') - - # NEXT FULL BACKUP - self.backup_node( - backup_dir, 'node', node, backup_type='full') - - # Next Delta - self.backup_node( - backup_dir, 'node', node, backup_type='delta') - - # do corrupt 6 DELTA backup - file = os.path.join( - backup_dir, 'backups', 'node', - corrupt_id, 'database', 'global', 'pg_control') - - file_new = os.path.join(backup_dir, 'pg_control') - os.rename(file, file_new) - - # RESTORE BACKUP - node.cleanup() - - try: - self.restore_node( - backup_dir, 'node', node, backup_id=restore_target_id) - self.assertEqual( - 1, 0, - "Expecting Error because restore backup is corrupted.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Backup {0} is orphan'.format(restore_target_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertEqual( - 'OK', - self.show_pb(backup_dir, 'node')[0]['status'], - 'Backup STATUS should be "OK"') - - self.assertEqual( - 'OK', - self.show_pb(backup_dir, 'node')[1]['status'], - 'Backup STATUS should be "OK"') - - self.assertEqual( - 'ERROR', - self.show_pb(backup_dir, 'node')[2]['status'], - 'Backup STATUS should be "ERROR"') - - self.assertEqual( - 'OK', - self.show_pb(backup_dir, 'node')[3]['status'], - 'Backup STATUS should be "OK"') - - self.assertEqual( - 'ERROR', - self.show_pb(backup_dir, 'node')[4]['status'], - 'Backup STATUS should be "ERROR"') - - self.assertEqual( - 'OK', - self.show_pb(backup_dir, 'node')[5]['status'], - 'Backup STATUS should be "OK"') - - self.assertEqual( - 'ERROR', - self.show_pb(backup_dir, 'node')[6]['status'], - 'Backup STATUS should be "ERROR"') - - self.assertEqual( - 'OK', - self.show_pb(backup_dir, 'node')[7]['status'], - 'Backup STATUS should be "OK"') - - # corruption victim - self.assertEqual( - 'CORRUPT', - self.show_pb(backup_dir, 'node')[8]['status'], - 'Backup STATUS should be "CORRUPT"') - - # orphaned child - self.assertEqual( - 'ORPHAN', - self.show_pb(backup_dir, 'node')[9]['status'], - 'Backup STATUS should be "ORPHAN"') - - # orphaned child - self.assertEqual( - 'ORPHAN', - self.show_pb(backup_dir, 'node')[10]['status'], - 'Backup STATUS should be "ORPHAN"') - - # next FULL - self.assertEqual( - 'OK', - self.show_pb(backup_dir, 'node')[11]['status'], - 'Backup STATUS should be "OK"') - - # next DELTA - self.assertEqual( - 'OK', - self.show_pb(backup_dir, 'node')[12]['status'], - 'Backup STATUS should be "OK"') - - node.cleanup() - - # Skipped, because backups from the future are invalid. - # This cause a "ERROR: Can't assign backup_id, there is already a backup in future" - # now (PBCKP-259). We can conduct such a test again when we - # untie 'backup_id' from 'start_time' - @unittest.skip("skip") - def test_restore_backup_from_future(self): - """more complex test_restore_chain()""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # Take FULL - self.backup_node(backup_dir, 'node', node) - - node.pgbench_init(scale=5) - # pgbench = node.pgbench(options=['-T', '20', '-c', '2']) - # pgbench.wait() - - # Take PAGE from future - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - with open( - os.path.join( - backup_dir, 'backups', 'node', - backup_id, "backup.control"), "a") as conf: - conf.write("start-time='{:%Y-%m-%d %H:%M:%S}'\n".format( - datetime.now() + timedelta(days=3))) - - # rename directory - new_id = self.show_pb(backup_dir, 'node')[1]['id'] - - os.rename( - os.path.join(backup_dir, 'backups', 'node', backup_id), - os.path.join(backup_dir, 'backups', 'node', new_id)) - - pgbench = node.pgbench(options=['-T', '7', '-c', '1', '--no-vacuum']) - pgbench.wait() - - backup_id = self.backup_node(backup_dir, 'node', node, backup_type='page') - pgdata = self.pgdata_content(node.data_dir) - - node.cleanup() - self.restore_node(backup_dir, 'node', node, backup_id=backup_id) - - pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.skip("skip") - def test_restore_target_immediate_stream(self): - """ - correct handling of immediate recovery target - for STREAM backups - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - # Take FULL - self.backup_node( - backup_dir, 'node', node, options=['--stream']) - - # Take delta - backup_id = self.backup_node( - backup_dir, 'node', node, - backup_type='delta', options=['--stream']) - - pgdata = self.pgdata_content(node.data_dir) - - # TODO update test - if self.get_version(node) >= self.version_to_num('12.0'): - recovery_conf = os.path.join(node.data_dir, 'postgresql.auto.conf') - with open(recovery_conf, 'r') as f: - print(f.read()) - else: - recovery_conf = os.path.join(node.data_dir, 'recovery.conf') - - # restore delta backup - node.cleanup() - self.restore_node( - backup_dir, 'node', node, options=['--immediate']) - - self.assertTrue( - os.path.isfile(recovery_conf), - "File {0} do not exists".format(recovery_conf)) - - # restore delta backup - node.cleanup() - self.restore_node( - backup_dir, 'node', node, options=['--recovery-target=immediate']) - - self.assertTrue( - os.path.isfile(recovery_conf), - "File {0} do not exists".format(recovery_conf)) - - # @unittest.skip("skip") - def test_restore_target_immediate_archive(self): - """ - correct handling of immediate recovery target - for ARCHIVE backups - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # Take FULL - self.backup_node( - backup_dir, 'node', node) - - # Take delta - backup_id = self.backup_node( - backup_dir, 'node', node, - backup_type='delta') - - pgdata = self.pgdata_content(node.data_dir) - - # TODO update test - if self.get_version(node) >= self.version_to_num('12.0'): - recovery_conf = os.path.join(node.data_dir, 'postgresql.auto.conf') - with open(recovery_conf, 'r') as f: - print(f.read()) - else: - recovery_conf = os.path.join(node.data_dir, 'recovery.conf') - - # restore page backup - node.cleanup() - self.restore_node( - backup_dir, 'node', node, options=['--immediate']) - - # For archive backup with immediate recovery target - # recovery.conf is mandatory - with open(recovery_conf, 'r') as f: - self.assertIn("recovery_target = 'immediate'", f.read()) - - # restore page backup - node.cleanup() - self.restore_node( - backup_dir, 'node', node, options=['--recovery-target=immediate']) - - # For archive backup with immediate recovery target - # recovery.conf is mandatory - with open(recovery_conf, 'r') as f: - self.assertIn("recovery_target = 'immediate'", f.read()) - - # @unittest.skip("skip") - def test_restore_target_latest_archive(self): - """ - make sure that recovery_target 'latest' - is default recovery target - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # Take FULL - self.backup_node(backup_dir, 'node', node) - - if self.get_version(node) >= self.version_to_num('12.0'): - recovery_conf = os.path.join(node.data_dir, 'postgresql.auto.conf') - else: - recovery_conf = os.path.join(node.data_dir, 'recovery.conf') - - # restore - node.cleanup() - self.restore_node(backup_dir, 'node', node) - - # hash_1 = hashlib.md5( - # open(recovery_conf, 'rb').read()).hexdigest() - - with open(recovery_conf, 'r') as f: - content_1 = '' - while True: - line = f.readline() - - if not line: - break - if line.startswith("#"): - continue - content_1 += line - - node.cleanup() - self.restore_node(backup_dir, 'node', node, options=['--recovery-target=latest']) - - # hash_2 = hashlib.md5( - # open(recovery_conf, 'rb').read()).hexdigest() - - with open(recovery_conf, 'r') as f: - content_2 = '' - while True: - line = f.readline() - - if not line: - break - if line.startswith("#"): - continue - content_2 += line - - self.assertEqual(content_1, content_2) - - # @unittest.skip("skip") - def test_restore_target_new_options(self): - """ - check that new --recovery-target-* - options are working correctly - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # Take FULL - self.backup_node(backup_dir, 'node', node) - - # TODO update test - if self.get_version(node) >= self.version_to_num('12.0'): - recovery_conf = os.path.join(node.data_dir, 'postgresql.auto.conf') - with open(recovery_conf, 'r') as f: - print(f.read()) - else: - recovery_conf = os.path.join(node.data_dir, 'recovery.conf') - - node.pgbench_init(scale=2) - pgbench = node.pgbench( - stdout=subprocess.PIPE, stderr=subprocess.STDOUT) - pgbench.wait() - pgbench.stdout.close() - - node.safe_psql( - "postgres", - "CREATE TABLE tbl0005 (a text)") - - node.safe_psql( - "postgres", "select pg_create_restore_point('savepoint')") - - target_name = 'savepoint' - - # in python-3.6+ it can be ...now()..astimezone()... - target_time = datetime.utcnow().replace(tzinfo=timezone.utc).astimezone().strftime("%Y-%m-%d %H:%M:%S %z") - with node.connect("postgres") as con: - res = con.execute( - "INSERT INTO tbl0005 VALUES ('inserted') RETURNING (xmin)") - con.commit() - target_xid = res[0][0] - - with node.connect("postgres") as con: - con.execute("INSERT INTO tbl0005 VALUES (1)") - con.commit() - if self.get_version(node) > self.version_to_num('10.0'): - res = con.execute("SELECT pg_current_wal_lsn()") - else: - res = con.execute("SELECT pg_current_xlog_location()") - - con.commit() - con.execute("INSERT INTO tbl0005 VALUES (2)") - con.commit() - xlogid, xrecoff = res[0][0].split('/') - xrecoff = hex(int(xrecoff, 16) + 1)[2:] - target_lsn = "{0}/{1}".format(xlogid, xrecoff) - - # Restore with recovery target time - node.cleanup() - self.restore_node( - backup_dir, 'node', node, - options=[ - '--recovery-target-time={0}'.format(target_time), - "--recovery-target-action=promote", - '--recovery-target-timeline=1', - ]) - - with open(recovery_conf, 'r') as f: - recovery_conf_content = f.read() - - self.assertIn( - "recovery_target_time = '{0}'".format(target_time), - recovery_conf_content) - - self.assertIn( - "recovery_target_action = 'promote'", - recovery_conf_content) - - self.assertIn( - "recovery_target_timeline = '1'", - recovery_conf_content) - - node.slow_start() - - # Restore with recovery target xid - node.cleanup() - self.restore_node( - backup_dir, 'node', node, - options=[ - '--recovery-target-xid={0}'.format(target_xid), - "--recovery-target-action=promote", - '--recovery-target-timeline=1', - ]) - - with open(recovery_conf, 'r') as f: - recovery_conf_content = f.read() - - self.assertIn( - "recovery_target_xid = '{0}'".format(target_xid), - recovery_conf_content) - - self.assertIn( - "recovery_target_action = 'promote'", - recovery_conf_content) - - self.assertIn( - "recovery_target_timeline = '1'", - recovery_conf_content) - - node.slow_start() - - # Restore with recovery target name - node.cleanup() - self.restore_node( - backup_dir, 'node', node, - options=[ - '--recovery-target-name={0}'.format(target_name), - "--recovery-target-action=promote", - '--recovery-target-timeline=1', - ]) - - with open(recovery_conf, 'r') as f: - recovery_conf_content = f.read() - - self.assertIn( - "recovery_target_name = '{0}'".format(target_name), - recovery_conf_content) - - self.assertIn( - "recovery_target_action = 'promote'", - recovery_conf_content) - - self.assertIn( - "recovery_target_timeline = '1'", - recovery_conf_content) - - node.slow_start() - - # Restore with recovery target lsn - if self.get_version(node) >= 100000: - - node.cleanup() - self.restore_node( - backup_dir, 'node', node, - options=[ - '--recovery-target-lsn={0}'.format(target_lsn), - "--recovery-target-action=promote", - '--recovery-target-timeline=1', - ]) - - with open(recovery_conf, 'r') as f: - recovery_conf_content = f.read() - - self.assertIn( - "recovery_target_lsn = '{0}'".format(target_lsn), - recovery_conf_content) - - self.assertIn( - "recovery_target_action = 'promote'", - recovery_conf_content) - - self.assertIn( - "recovery_target_timeline = '1'", - recovery_conf_content) - - node.slow_start() - - # @unittest.skip("skip") - def test_smart_restore(self): - """ - make node, create database, take full backup, drop database, - take incremental backup and restore it, - make sure that files from dropped database are not - copied during restore - https://github.com/postgrespro/pg_probackup/issues/63 - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # create database - node.safe_psql( - "postgres", - "CREATE DATABASE testdb") - - # take FULL backup - full_id = self.backup_node(backup_dir, 'node', node) - - # drop database - node.safe_psql( - "postgres", - "DROP DATABASE testdb") - - # take PAGE backup - page_id = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # restore PAGE backup - node.cleanup() - self.restore_node( - backup_dir, 'node', node, backup_id=page_id, - options=['--no-validate', '--log-level-file=VERBOSE']) - - logfile = os.path.join(backup_dir, 'log', 'pg_probackup.log') - with open(logfile, 'r') as f: - logfile_content = f.read() - - # get delta between FULL and PAGE filelists - filelist_full = self.get_backup_filelist( - backup_dir, 'node', full_id) - - filelist_page = self.get_backup_filelist( - backup_dir, 'node', page_id) - - filelist_diff = self.get_backup_filelist_diff( - filelist_full, filelist_page) - - for file in filelist_diff: - self.assertNotIn(file, logfile_content) - - # @unittest.skip("skip") - def test_pg_11_group_access(self): - """ - test group access for PG >= 11 - """ - if self.pg_config_version < self.version_to_num('11.0'): - self.skipTest('You need PostgreSQL >= 11 for this test') - - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=[ - '--data-checksums', - '--allow-group-access']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - # take FULL backup - self.backup_node(backup_dir, 'node', node, options=['--stream']) - - pgdata = self.pgdata_content(node.data_dir) - - # restore backup - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - node_restored.cleanup() - - self.restore_node( - backup_dir, 'node', node_restored) - - # compare pgdata permissions - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.skip("skip") - def test_restore_concurrent_drop_table(self): - """""" - self._check_gdb_flag_or_skip_test() - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.pgbench_init(scale=1) - - # FULL backup - self.backup_node( - backup_dir, 'node', node, - options=['--stream', '--compress']) - - # DELTA backup - gdb = self.backup_node( - backup_dir, 'node', node, backup_type='delta', - options=['--stream', '--compress', '--no-validate'], - gdb=True) - - gdb.set_breakpoint('backup_data_file') - gdb.run_until_break() - - node.safe_psql( - 'postgres', - 'DROP TABLE pgbench_accounts') - - # do checkpoint to guarantee filenode removal - node.safe_psql( - 'postgres', - 'CHECKPOINT') - - gdb.remove_all_breakpoints() - gdb.continue_execution_until_exit() - - pgdata = self.pgdata_content(node.data_dir) - node.cleanup() - - self.restore_node( - backup_dir, 'node', node, options=['--no-validate']) - - pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.skip("skip") - def test_lost_non_data_file(self): - """""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - # FULL backup - backup_id = self.backup_node( - backup_dir, 'node', node, options=['--stream']) - - file = os.path.join( - backup_dir, 'backups', 'node', - backup_id, 'database', 'postgresql.auto.conf') - - os.remove(file) - - node.cleanup() - - try: - self.restore_node( - backup_dir, 'node', node, options=['--no-validate']) - self.assertEqual( - 1, 0, - "Expecting Error because of non-data file dissapearance.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - 'No such file or directory', e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'ERROR: Backup files restoring failed', e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - def test_partial_restore_exclude(self): - """""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - for i in range(1, 10, 1): - node.safe_psql( - 'postgres', - 'CREATE database db{0}'.format(i)) - - db_list_raw = node.safe_psql( - 'postgres', - 'SELECT to_json(a) ' - 'FROM (SELECT oid, datname FROM pg_database) a').decode('utf-8').rstrip() - - db_list_splitted = db_list_raw.splitlines() - - db_list = {} - for line in db_list_splitted: - line = json.loads(line) - db_list[line['datname']] = line['oid'] - - # FULL backup - backup_id = self.backup_node(backup_dir, 'node', node) - pgdata = self.pgdata_content(node.data_dir) - - # restore FULL backup - node_restored_1 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored_1')) - node_restored_1.cleanup() - - try: - self.restore_node( - backup_dir, 'node', - node_restored_1, options=[ - "--db-include=db1", - "--db-exclude=db2"]) - self.assertEqual( - 1, 0, - "Expecting Error because of 'db-exclude' and 'db-include'.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: You cannot specify '--db-include' " - "and '--db-exclude' together", e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.restore_node( - backup_dir, 'node', node_restored_1) - - pgdata_restored_1 = self.pgdata_content(node_restored_1.data_dir) - self.compare_pgdata(pgdata, pgdata_restored_1) - - db1_path = os.path.join( - node_restored_1.data_dir, 'base', db_list['db1']) - db5_path = os.path.join( - node_restored_1.data_dir, 'base', db_list['db5']) - - self.truncate_every_file_in_dir(db1_path) - self.truncate_every_file_in_dir(db5_path) - pgdata_restored_1 = self.pgdata_content(node_restored_1.data_dir) - - node_restored_2 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored_2')) - node_restored_2.cleanup() - - self.restore_node( - backup_dir, 'node', - node_restored_2, options=[ - "--db-exclude=db1", - "--db-exclude=db5"]) - - pgdata_restored_2 = self.pgdata_content(node_restored_2.data_dir) - self.compare_pgdata(pgdata_restored_1, pgdata_restored_2) - - self.set_auto_conf(node_restored_2, {'port': node_restored_2.port}) - - node_restored_2.slow_start() - - node_restored_2.safe_psql( - 'postgres', - 'select 1') - - try: - node_restored_2.safe_psql( - 'db1', - 'select 1') - except QueryException as e: - self.assertIn('FATAL', e.message) - - try: - node_restored_2.safe_psql( - 'db5', - 'select 1') - except QueryException as e: - self.assertIn('FATAL', e.message) - - with open(node_restored_2.pg_log_file, 'r') as f: - output = f.read() - - self.assertNotIn('PANIC', output) - - def test_partial_restore_exclude_tablespace(self): - """""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - cat_version = node.get_control_data()["Catalog version number"] - version_specific_dir = 'PG_' + node.major_version_str + '_' + cat_version - - # PG_10_201707211 - # pg_tblspc/33172/PG_9.5_201510051/16386/ - - self.create_tblspace_in_node(node, 'somedata') - - node_tablespace = self.get_tblspace_path(node, 'somedata') - - tbl_oid = node.safe_psql( - 'postgres', - "SELECT oid " - "FROM pg_tablespace " - "WHERE spcname = 'somedata'").decode('utf-8').rstrip() - - for i in range(1, 10, 1): - node.safe_psql( - 'postgres', - 'CREATE database db{0} tablespace somedata'.format(i)) - - db_list_raw = node.safe_psql( - 'postgres', - 'SELECT to_json(a) ' - 'FROM (SELECT oid, datname FROM pg_database) a').decode('utf-8').rstrip() - - db_list_splitted = db_list_raw.splitlines() - - db_list = {} - for line in db_list_splitted: - line = json.loads(line) - db_list[line['datname']] = line['oid'] - - # FULL backup - backup_id = self.backup_node(backup_dir, 'node', node) - pgdata = self.pgdata_content(node.data_dir) - - # restore FULL backup - node_restored_1 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored_1')) - node_restored_1.cleanup() - - node1_tablespace = self.get_tblspace_path(node_restored_1, 'somedata') - - self.restore_node( - backup_dir, 'node', - node_restored_1, options=[ - "-T", "{0}={1}".format( - node_tablespace, node1_tablespace)]) - - pgdata_restored_1 = self.pgdata_content(node_restored_1.data_dir) - self.compare_pgdata(pgdata, pgdata_restored_1) - - # truncate every db - for db in db_list: - # with exception below - if db in ['db1', 'db5']: - self.truncate_every_file_in_dir( - os.path.join( - node_restored_1.data_dir, 'pg_tblspc', - tbl_oid, version_specific_dir, db_list[db])) - - pgdata_restored_1 = self.pgdata_content(node_restored_1.data_dir) - - node_restored_2 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored_2')) - node_restored_2.cleanup() - node2_tablespace = self.get_tblspace_path(node_restored_2, 'somedata') - - self.restore_node( - backup_dir, 'node', - node_restored_2, options=[ - "--db-exclude=db1", - "--db-exclude=db5", - "-T", "{0}={1}".format( - node_tablespace, node2_tablespace)]) - - pgdata_restored_2 = self.pgdata_content(node_restored_2.data_dir) - self.compare_pgdata(pgdata_restored_1, pgdata_restored_2) - - self.set_auto_conf(node_restored_2, {'port': node_restored_2.port}) - - node_restored_2.slow_start() - - node_restored_2.safe_psql( - 'postgres', - 'select 1') - - try: - node_restored_2.safe_psql( - 'db1', - 'select 1') - except QueryException as e: - self.assertIn('FATAL', e.message) - - try: - node_restored_2.safe_psql( - 'db5', - 'select 1') - except QueryException as e: - self.assertIn('FATAL', e.message) - - with open(node_restored_2.pg_log_file, 'r') as f: - output = f.read() - - self.assertNotIn('PANIC', output) - - def test_partial_restore_include(self): - """ - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - for i in range(1, 10, 1): - node.safe_psql( - 'postgres', - 'CREATE database db{0}'.format(i)) - - db_list_raw = node.safe_psql( - 'postgres', - 'SELECT to_json(a) ' - 'FROM (SELECT oid, datname FROM pg_database) a').decode('utf-8').rstrip() - - db_list_splitted = db_list_raw.splitlines() - - db_list = {} - for line in db_list_splitted: - line = json.loads(line) - db_list[line['datname']] = line['oid'] - - # FULL backup - backup_id = self.backup_node(backup_dir, 'node', node) - pgdata = self.pgdata_content(node.data_dir) - - # restore FULL backup - node_restored_1 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored_1')) - node_restored_1.cleanup() - - try: - self.restore_node( - backup_dir, 'node', - node_restored_1, options=[ - "--db-include=db1", - "--db-exclude=db2"]) - self.assertEqual( - 1, 0, - "Expecting Error because of 'db-exclude' and 'db-include'.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: You cannot specify '--db-include' " - "and '--db-exclude' together", e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.restore_node( - backup_dir, 'node', node_restored_1) - - pgdata_restored_1 = self.pgdata_content(node_restored_1.data_dir) - self.compare_pgdata(pgdata, pgdata_restored_1) - - # truncate every db - for db in db_list: - # with exception below - if db in ['template0', 'template1', 'postgres', 'db1', 'db5']: - continue - self.truncate_every_file_in_dir( - os.path.join( - node_restored_1.data_dir, 'base', db_list[db])) - - pgdata_restored_1 = self.pgdata_content(node_restored_1.data_dir) - - node_restored_2 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored_2')) - node_restored_2.cleanup() - - self.restore_node( - backup_dir, 'node', - node_restored_2, options=[ - "--db-include=db1", - "--db-include=db5", - "--db-include=postgres"]) - - pgdata_restored_2 = self.pgdata_content(node_restored_2.data_dir) - self.compare_pgdata(pgdata_restored_1, pgdata_restored_2) - - self.set_auto_conf(node_restored_2, {'port': node_restored_2.port}) - node_restored_2.slow_start() - - node_restored_2.safe_psql( - 'db1', - 'select 1') - - node_restored_2.safe_psql( - 'db5', - 'select 1') - - node_restored_2.safe_psql( - 'template1', - 'select 1') - - try: - node_restored_2.safe_psql( - 'db2', - 'select 1') - except QueryException as e: - self.assertIn('FATAL', e.message) - - try: - node_restored_2.safe_psql( - 'db10', - 'select 1') - except QueryException as e: - self.assertIn('FATAL', e.message) - - with open(node_restored_2.pg_log_file, 'r') as f: - output = f.read() - - self.assertNotIn('PANIC', output) - - def test_partial_restore_backward_compatibility_1(self): - """ - old binary should be of version < 2.2.0 - """ - if not self.probackup_old_path: - self.skipTest("You must specify PGPROBACKUPBIN_OLD" - " for run this test") - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir, old_binary=True) - self.add_instance(backup_dir, 'node', node, old_binary=True) - - node.slow_start() - - # create databases - for i in range(1, 10, 1): - node.safe_psql( - 'postgres', - 'CREATE database db{0}'.format(i)) - - # FULL backup with old binary, without partial restore support - backup_id = self.backup_node( - backup_dir, 'node', node, - old_binary=True, options=['--stream']) - - pgdata = self.pgdata_content(node.data_dir) - - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - node_restored.cleanup() - - try: - self.restore_node( - backup_dir, 'node', - node_restored, options=[ - "--db-exclude=db5"]) - self.assertEqual( - 1, 0, - "Expecting Error because backup do not support partial restore.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: Backup {0} doesn't contain a database_map, " - "partial restore is impossible".format(backup_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.restore_node(backup_dir, 'node', node_restored) - - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # incremental backup with partial restore support - for i in range(11, 15, 1): - node.safe_psql( - 'postgres', - 'CREATE database db{0}'.format(i)) - - # get db list - db_list_raw = node.safe_psql( - 'postgres', - 'SELECT to_json(a) ' - 'FROM (SELECT oid, datname FROM pg_database) a').rstrip() - db_list_splitted = db_list_raw.splitlines() - db_list = {} - for line in db_list_splitted: - line = json.loads(line) - db_list[line['datname']] = line['oid'] - - backup_id = self.backup_node( - backup_dir, 'node', node, - backup_type='delta', options=['--stream']) - - # get etalon - node_restored.cleanup() - self.restore_node(backup_dir, 'node', node_restored) - self.truncate_every_file_in_dir( - os.path.join( - node_restored.data_dir, 'base', db_list['db5'])) - self.truncate_every_file_in_dir( - os.path.join( - node_restored.data_dir, 'base', db_list['db14'])) - pgdata_restored = self.pgdata_content(node_restored.data_dir) - - # get new node - node_restored_1 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored_1')) - node_restored_1.cleanup() - - self.restore_node( - backup_dir, 'node', - node_restored_1, options=[ - "--db-exclude=db5", - "--db-exclude=db14"]) - - pgdata_restored_1 = self.pgdata_content(node_restored_1.data_dir) - - self.compare_pgdata(pgdata_restored, pgdata_restored_1) - - def test_partial_restore_backward_compatibility_merge(self): - """ - old binary should be of version < 2.2.0 - """ - if not self.probackup_old_path: - self.skipTest("You must specify PGPROBACKUPBIN_OLD" - " for run this test") - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir, old_binary=True) - self.add_instance(backup_dir, 'node', node, old_binary=True) - - node.slow_start() - - # create databases - for i in range(1, 10, 1): - node.safe_psql( - 'postgres', - 'CREATE database db{0}'.format(i)) - - # FULL backup with old binary, without partial restore support - backup_id = self.backup_node( - backup_dir, 'node', node, - old_binary=True, options=['--stream']) - - pgdata = self.pgdata_content(node.data_dir) - - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - node_restored.cleanup() - - try: - self.restore_node( - backup_dir, 'node', - node_restored, options=[ - "--db-exclude=db5"]) - self.assertEqual( - 1, 0, - "Expecting Error because backup do not support partial restore.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: Backup {0} doesn't contain a database_map, " - "partial restore is impossible.".format(backup_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.restore_node(backup_dir, 'node', node_restored) - - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # incremental backup with partial restore support - for i in range(11, 15, 1): - node.safe_psql( - 'postgres', - 'CREATE database db{0}'.format(i)) - - # get db list - db_list_raw = node.safe_psql( - 'postgres', - 'SELECT to_json(a) ' - 'FROM (SELECT oid, datname FROM pg_database) a').rstrip() - db_list_splitted = db_list_raw.splitlines() - db_list = {} - for line in db_list_splitted: - line = json.loads(line) - db_list[line['datname']] = line['oid'] - - backup_id = self.backup_node( - backup_dir, 'node', node, - backup_type='delta', options=['--stream']) - - # get etalon - node_restored.cleanup() - self.restore_node(backup_dir, 'node', node_restored) - self.truncate_every_file_in_dir( - os.path.join( - node_restored.data_dir, 'base', db_list['db5'])) - self.truncate_every_file_in_dir( - os.path.join( - node_restored.data_dir, 'base', db_list['db14'])) - pgdata_restored = self.pgdata_content(node_restored.data_dir) - - # get new node - node_restored_1 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored_1')) - node_restored_1.cleanup() - - # merge - self.merge_backup(backup_dir, 'node', backup_id=backup_id) - - self.restore_node( - backup_dir, 'node', - node_restored_1, options=[ - "--db-exclude=db5", - "--db-exclude=db14"]) - pgdata_restored_1 = self.pgdata_content(node_restored_1.data_dir) - - self.compare_pgdata(pgdata_restored, pgdata_restored_1) - - def test_empty_and_mangled_database_map(self): - """ - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - - node.slow_start() - - # create databases - for i in range(1, 10, 1): - node.safe_psql( - 'postgres', - 'CREATE database db{0}'.format(i)) - - # FULL backup with database_map - backup_id = self.backup_node( - backup_dir, 'node', node, options=['--stream']) - pgdata = self.pgdata_content(node.data_dir) - - # truncate database_map - path = os.path.join( - backup_dir, 'backups', 'node', - backup_id, 'database', 'database_map') - with open(path, "w") as f: - f.close() - - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - node_restored.cleanup() - - try: - self.restore_node( - backup_dir, 'node', node_restored, - options=["--db-include=db1", '--no-validate']) - self.assertEqual( - 1, 0, - "Expecting Error because database_map is empty.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: Backup {0} has empty or mangled database_map, " - "partial restore is impossible".format(backup_id), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - try: - self.restore_node( - backup_dir, 'node', node_restored, - options=["--db-exclude=db1", '--no-validate']) - self.assertEqual( - 1, 0, - "Expecting Error because database_map is empty.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: Backup {0} has empty or mangled database_map, " - "partial restore is impossible".format(backup_id), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - # mangle database_map - with open(path, "w") as f: - f.write("42") - f.close() - - try: - self.restore_node( - backup_dir, 'node', node_restored, - options=["--db-include=db1", '--no-validate']) - self.assertEqual( - 1, 0, - "Expecting Error because database_map is empty.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: field "dbOid" is not found in the line 42 of ' - 'the file backup_content.control', e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - try: - self.restore_node( - backup_dir, 'node', node_restored, - options=["--db-exclude=db1", '--no-validate']) - self.assertEqual( - 1, 0, - "Expecting Error because database_map is empty.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: field "dbOid" is not found in the line 42 of ' - 'the file backup_content.control', e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - # check that simple restore is still possible - self.restore_node( - backup_dir, 'node', node_restored, options=['--no-validate']) - - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - def test_missing_database_map(self): - """ - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=self.ptrack, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - - node.slow_start() - - # create databases - for i in range(1, 10, 1): - node.safe_psql( - 'postgres', - 'CREATE database db{0}'.format(i)) - - node.safe_psql( - "postgres", - "CREATE DATABASE backupdb") - - # PG 9.5 - if self.get_version(node) < 90600: - node.safe_psql( - 'backupdb', - "REVOKE ALL ON DATABASE backupdb from PUBLIC; " - "REVOKE ALL ON SCHEMA public from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " - "CREATE ROLE backup WITH LOGIN REPLICATION; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") - # PG 9.6 - elif self.get_version(node) > 90600 and self.get_version(node) < 100000: - node.safe_psql( - 'backupdb', - "REVOKE ALL ON DATABASE backupdb from PUBLIC; " - "REVOKE ALL ON SCHEMA public from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " - "CREATE ROLE backup WITH LOGIN REPLICATION; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) - # >= 10 && < 15 - elif self.get_version(node) >= 100000 and self.get_version(node) < 150000: - node.safe_psql( - 'backupdb', - "REVOKE ALL ON DATABASE backupdb from PUBLIC; " - "REVOKE ALL ON SCHEMA public from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " - "CREATE ROLE backup WITH LOGIN REPLICATION; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) - # >= 15 - else: - node.safe_psql( - 'backupdb', - "REVOKE ALL ON DATABASE backupdb from PUBLIC; " - "REVOKE ALL ON SCHEMA public from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " - "CREATE ROLE backup WITH LOGIN REPLICATION; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_start(text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_stop(boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) - - if self.ptrack: - # TODO why backup works without these grants ? - # 'pg_ptrack_get_pagemapset(pg_lsn)', - # 'pg_ptrack_control_lsn()', - # because PUBLIC - node.safe_psql( - "backupdb", - "CREATE SCHEMA ptrack; " - "GRANT USAGE ON SCHEMA ptrack TO backup; " - "CREATE EXTENSION ptrack WITH SCHEMA ptrack") - - if ProbackupTest.enterprise: - - node.safe_psql( - "backupdb", - "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;") - - # FULL backup without database_map - backup_id = self.backup_node( - backup_dir, 'node', node, datname='backupdb', - options=['--stream', "-U", "backup", '--log-level-file=verbose']) - - pgdata = self.pgdata_content(node.data_dir) - - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - node_restored.cleanup() - - # backup has missing database_map and that is legal - try: - self.restore_node( - backup_dir, 'node', node_restored, - options=["--db-exclude=db5", "--db-exclude=db9"]) - self.assertEqual( - 1, 0, - "Expecting Error because user do not have pg_database access.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: Backup {0} doesn't contain a database_map, " - "partial restore is impossible.".format( - backup_id), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - try: - self.restore_node( - backup_dir, 'node', node_restored, - options=["--db-include=db1"]) - self.assertEqual( - 1, 0, - "Expecting Error because user do not have pg_database access.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: Backup {0} doesn't contain a database_map, " - "partial restore is impossible.".format( - backup_id), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - # check that simple restore is still possible - self.restore_node(backup_dir, 'node', node_restored) - - pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.skip("skip") - def test_stream_restore_command_option(self): - """ - correct handling of restore command options - when restoring STREAM backup - - 1. Restore STREAM backup with --restore-command only - parameter, check that PostgreSQL recovery uses - restore_command to obtain WAL from archive. - - 2. Restore STREAM backup wuth --restore-command - as replica, check that PostgreSQL recovery uses - restore_command to obtain WAL from archive. - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={'max_wal_size': '32MB'}) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # TODO update test - if self.get_version(node) >= self.version_to_num('12.0'): - recovery_conf = os.path.join(node.data_dir, 'postgresql.auto.conf') - with open(recovery_conf, 'r') as f: - print(f.read()) - else: - recovery_conf = os.path.join(node.data_dir, 'recovery.conf') - - # Take FULL - self.backup_node( - backup_dir, 'node', node, options=['--stream']) - - node.pgbench_init(scale=5) - - node.safe_psql( - 'postgres', - 'create table t1()') - - # restore backup - node.cleanup() - shutil.rmtree(os.path.join(node.logs_dir)) - - restore_cmd = self.get_restore_command(backup_dir, 'node', node) - - self.restore_node( - backup_dir, 'node', node, - options=[ - '--restore-command={0}'.format(restore_cmd)]) - - self.assertTrue( - os.path.isfile(recovery_conf), - "File '{0}' do not exists".format(recovery_conf)) - - if self.get_version(node) >= self.version_to_num('12.0'): - recovery_signal = os.path.join(node.data_dir, 'recovery.signal') - self.assertTrue( - os.path.isfile(recovery_signal), - "File '{0}' do not exists".format(recovery_signal)) - - node.slow_start() - - node.safe_psql( - 'postgres', - 'select * from t1') - - timeline_id = node.safe_psql( - 'postgres', - 'select timeline_id from pg_control_checkpoint()').decode('utf-8').rstrip() - - self.assertEqual('2', timeline_id) - - # @unittest.skip("skip") - def test_restore_primary_conninfo(self): - """ - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - # Take FULL - self.backup_node(backup_dir, 'node', node, options=['--stream']) - - node.pgbench_init(scale=1) - - #primary_conninfo = 'host=192.168.1.50 port=5432 user=foo password=foopass' - - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) - replica.cleanup() - str_conninfo='host=192.168.1.50 port=5432 user=foo password=foopass' - - self.restore_node( - backup_dir, 'node', replica, - options=['-R', '--primary-conninfo={0}'.format(str_conninfo)]) - - if self.get_version(node) >= self.version_to_num('12.0'): - standby_signal = os.path.join(replica.data_dir, 'standby.signal') - self.assertTrue( - os.path.isfile(standby_signal), - "File '{0}' do not exists".format(standby_signal)) - - # TODO update test - if self.get_version(node) >= self.version_to_num('12.0'): - recovery_conf = os.path.join(replica.data_dir, 'postgresql.auto.conf') - with open(recovery_conf, 'r') as f: - print(f.read()) - else: - recovery_conf = os.path.join(replica.data_dir, 'recovery.conf') - - with open(os.path.join(replica.data_dir, recovery_conf), 'r') as f: - recovery_conf_content = f.read() - - self.assertIn(str_conninfo, recovery_conf_content) - - # @unittest.skip("skip") - def test_restore_primary_slot_info(self): - """ - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - # Take FULL - self.backup_node(backup_dir, 'node', node, options=['--stream']) - - node.pgbench_init(scale=1) - - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) - replica.cleanup() - - node.safe_psql( - "SELECT pg_create_physical_replication_slot('master_slot')") - - self.restore_node( - backup_dir, 'node', replica, - options=['-R', '--primary-slot-name=master_slot']) - - self.set_auto_conf(replica, {'port': replica.port}) - self.set_auto_conf(replica, {'hot_standby': 'on'}) - - if self.get_version(node) >= self.version_to_num('12.0'): - standby_signal = os.path.join(replica.data_dir, 'standby.signal') - self.assertTrue( - os.path.isfile(standby_signal), - "File '{0}' do not exists".format(standby_signal)) - - replica.slow_start(replica=True) - - def test_issue_249(self): - """ - https://github.com/postgrespro/pg_probackup/issues/249 - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - 'postgres', - 'CREATE database db1') - - node.pgbench_init(scale=5) - - node.safe_psql( - 'postgres', - 'CREATE TABLE t1 as SELECT * from pgbench_accounts where aid > 200000 and aid < 450000') - - node.safe_psql( - 'postgres', - 'DELETE from pgbench_accounts where aid > 200000 and aid < 450000') - - node.safe_psql( - 'postgres', - 'select * from pgbench_accounts') - - # FULL backup - self.backup_node(backup_dir, 'node', node) - - node.safe_psql( - 'postgres', - 'INSERT INTO pgbench_accounts SELECT * FROM t1') - - # restore FULL backup - node_restored_1 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored_1')) - node_restored_1.cleanup() - - self.restore_node( - backup_dir, 'node', - node_restored_1, options=["--db-include=db1"]) - - self.set_auto_conf( - node_restored_1, - {'port': node_restored_1.port, 'hot_standby': 'off'}) - - node_restored_1.slow_start() - - node_restored_1.safe_psql( - 'db1', - 'select 1') - - try: - node_restored_1.safe_psql( - 'postgres', - 'select 1') - except QueryException as e: - self.assertIn('FATAL', e.message) - - def test_pg_12_probackup_recovery_conf_compatibility(self): - """ - https://github.com/postgrespro/pg_probackup/issues/249 - - pg_probackup version must be 12 or greater - """ - if not self.probackup_old_path: - self.skipTest("You must specify PGPROBACKUPBIN_OLD" - " for run this test") - if self.pg_config_version < self.version_to_num('12.0'): - self.skipTest('You need PostgreSQL >= 12 for this test') - - if self.version_to_num(self.old_probackup_version) >= self.version_to_num('2.4.5'): - self.assertTrue(False, 'You need pg_probackup < 2.4.5 for this test') - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # FULL backup - self.backup_node(backup_dir, 'node', node, old_binary=True) - - node.pgbench_init(scale=5) - - node.safe_psql( - 'postgres', - 'CREATE TABLE t1 as SELECT * from pgbench_accounts where aid > 200000 and aid < 450000') - - time = node.safe_psql( - 'SELECT current_timestamp(0)::timestamptz;').decode('utf-8').rstrip() - - node.safe_psql( - 'postgres', - 'DELETE from pgbench_accounts where aid > 200000 and aid < 450000') - - node.cleanup() - - self.restore_node( - backup_dir, 'node',node, - options=[ - "--recovery-target-time={0}".format(time), - "--recovery-target-action=promote"], - old_binary=True) - - node.slow_start() - - self.backup_node(backup_dir, 'node', node, old_binary=True) - - node.pgbench_init(scale=5) - - xid = node.safe_psql( - 'SELECT txid_current()').decode('utf-8').rstrip() - node.pgbench_init(scale=1) - - node.cleanup() - - self.restore_node( - backup_dir, 'node',node, - options=[ - "--recovery-target-xid={0}".format(xid), - "--recovery-target-action=promote"]) - - node.slow_start() - - def test_drop_postgresql_auto_conf(self): - """ - https://github.com/postgrespro/pg_probackup/issues/249 - - pg_probackup version must be 12 or greater - """ - - if self.pg_config_version < self.version_to_num('12.0'): - self.skipTest('You need PostgreSQL >= 12 for this test') - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # FULL backup - self.backup_node(backup_dir, 'node', node) - - # drop postgresql.auto.conf - auto_path = os.path.join(node.data_dir, "postgresql.auto.conf") - os.remove(auto_path) - - self.backup_node(backup_dir, 'node', node, backup_type='page') - - node.cleanup() - - self.restore_node( - backup_dir, 'node',node, - options=[ - "--recovery-target=latest", - "--recovery-target-action=promote"]) - - node.slow_start() - - self.assertTrue(os.path.exists(auto_path)) - - def test_truncate_postgresql_auto_conf(self): - """ - https://github.com/postgrespro/pg_probackup/issues/249 - - pg_probackup version must be 12 or greater - """ - - if self.pg_config_version < self.version_to_num('12.0'): - self.skipTest('You need PostgreSQL >= 12 for this test') - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # FULL backup - self.backup_node(backup_dir, 'node', node) - - # truncate postgresql.auto.conf - auto_path = os.path.join(node.data_dir, "postgresql.auto.conf") - with open(auto_path, "w+") as f: - f.truncate() - - self.backup_node(backup_dir, 'node', node, backup_type='page') - - node.cleanup() - - self.restore_node( - backup_dir, 'node',node, - options=[ - "--recovery-target=latest", - "--recovery-target-action=promote"]) - node.slow_start() - - self.assertTrue(os.path.exists(auto_path)) - - # @unittest.skip("skip") - def test_concurrent_restore(self): - """""" - self._check_gdb_flag_or_skip_test() - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.pgbench_init(scale=1) - - # FULL backup - self.backup_node( - backup_dir, 'node', node, - options=['--stream', '--compress']) - - pgbench = node.pgbench(options=['-T', '7', '-c', '1', '--no-vacuum']) - pgbench.wait() - - # DELTA backup - self.backup_node( - backup_dir, 'node', node, backup_type='delta', - options=['--stream', '--compress', '--no-validate']) - - pgdata1 = self.pgdata_content(node.data_dir) - - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - - node.cleanup() - node_restored.cleanup() - - gdb = self.restore_node( - backup_dir, 'node', node, options=['--no-validate'], gdb=True) - - gdb.set_breakpoint('restore_data_file') - gdb.run_until_break() - - self.restore_node( - backup_dir, 'node', node_restored, options=['--no-validate']) - - gdb.remove_all_breakpoints() - gdb.continue_execution_until_exit() - - pgdata2 = self.pgdata_content(node.data_dir) - pgdata3 = self.pgdata_content(node_restored.data_dir) - - self.compare_pgdata(pgdata1, pgdata2) - self.compare_pgdata(pgdata2, pgdata3) - - # skip this test until https://github.com/postgrespro/pg_probackup/pull/399 - @unittest.skip("skip") - def test_restore_issue_313(self): - """ - Check that partially restored PostgreSQL instance cannot be started - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # FULL backup - backup_id = self.backup_node(backup_dir, 'node', node) - node.cleanup() - - count = 0 - filelist = self.get_backup_filelist(backup_dir, 'node', backup_id) - for file in filelist: - # count only nondata files - if int(filelist[file]['is_datafile']) == 0 and int(filelist[file]['size']) > 0: - count += 1 - - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - node_restored.cleanup() - self.restore_node(backup_dir, 'node', node_restored) - - gdb = self.restore_node(backup_dir, 'node', node, gdb=True, options=['--progress']) - gdb.verbose = False - gdb.set_breakpoint('restore_non_data_file') - gdb.run_until_break() - gdb.continue_execution_until_break(count - 2) - gdb.quit() - - # emulate the user or HA taking care of PG configuration - for fname in os.listdir(node_restored.data_dir): - if fname.endswith('.conf'): - os.rename( - os.path.join(node_restored.data_dir, fname), - os.path.join(node.data_dir, fname)) - - try: - node.slow_start() - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because backup is not fully restored") - except StartNodeException as e: - self.assertIn( - 'Cannot start node', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - # @unittest.skip("skip") - def test_restore_with_waldir(self): - """recovery using tablespace-mapping option and page backup""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - - with node.connect("postgres") as con: - con.execute( - "CREATE TABLE tbl AS SELECT * " - "FROM generate_series(0,3) AS integer") - con.commit() - - # Full backup - backup_id = self.backup_node(backup_dir, 'node', node) - - node.stop() - node.cleanup() - - # Create waldir - waldir_path = os.path.join(node.base_dir, "waldir") - os.makedirs(waldir_path) - - # Test recovery from latest - self.assertIn( - "INFO: Restore of backup {0} completed.".format(backup_id), - self.restore_node( - backup_dir, 'node', node, - options=[ - "-X", "%s" % (waldir_path)]), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) - node.slow_start() - - count = node.execute("postgres", "SELECT count(*) FROM tbl") - self.assertEqual(count[0][0], 4) - - # check pg_wal is symlink - if node.major_version >= 10: - wal_path=os.path.join(node.data_dir, "pg_wal") - else: - wal_path=os.path.join(node.data_dir, "pg_xlog") - - self.assertEqual(os.path.islink(wal_path), True) diff --git a/tests/retention_test.py b/tests/retention_test.py deleted file mode 100644 index 88432a00f..000000000 --- a/tests/retention_test.py +++ /dev/null @@ -1,2529 +0,0 @@ -import os -import unittest -from datetime import datetime, timedelta -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException -from time import sleep -from distutils.dir_util import copy_tree - - -class RetentionTest(ProbackupTest, unittest.TestCase): - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_retention_redundancy_1(self): - """purge backups using redundancy-based retention policy""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - self.set_config( - backup_dir, 'node', options=['--retention-redundancy=1']) - - # Make backups to be purged - self.backup_node(backup_dir, 'node', node) - self.backup_node(backup_dir, 'node', node, backup_type="page") - # Make backups to be keeped - self.backup_node(backup_dir, 'node', node) - self.backup_node(backup_dir, 'node', node, backup_type="page") - - self.assertEqual(len(self.show_pb(backup_dir, 'node')), 4) - - output_before = self.show_archive(backup_dir, 'node', tli=1) - - # Purge backups - self.delete_expired( - backup_dir, 'node', options=['--expired', '--wal']) - self.assertEqual(len(self.show_pb(backup_dir, 'node')), 2) - - output_after = self.show_archive(backup_dir, 'node', tli=1) - - self.assertEqual( - output_before['max-segno'], - output_after['max-segno']) - - self.assertNotEqual( - output_before['min-segno'], - output_after['min-segno']) - - # Check that WAL segments were deleted - min_wal = output_after['min-segno'] - max_wal = output_after['max-segno'] - - for wal_name in os.listdir(os.path.join(backup_dir, 'wal', 'node')): - if not wal_name.endswith(".backup"): - - if self.archive_compress: - wal_name = wal_name[-27:] - wal_name = wal_name[:-3] - else: - wal_name = wal_name[-24:] - - self.assertTrue(wal_name >= min_wal) - self.assertTrue(wal_name <= max_wal) - - # @unittest.skip("skip") - def test_retention_window_2(self): - """purge backups using window-based retention policy""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - with open( - os.path.join( - backup_dir, - 'backups', - 'node', - "pg_probackup.conf"), "a") as conf: - conf.write("retention-redundancy = 1\n") - conf.write("retention-window = 1\n") - - # Make backups to be purged - self.backup_node(backup_dir, 'node', node) - self.backup_node(backup_dir, 'node', node, backup_type="page") - # Make backup to be keeped - self.backup_node(backup_dir, 'node', node) - - backups = os.path.join(backup_dir, 'backups', 'node') - days_delta = 5 - for backup in os.listdir(backups): - if backup == 'pg_probackup.conf': - continue - with open( - os.path.join( - backups, backup, "backup.control"), "a") as conf: - conf.write("recovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format( - datetime.now() - timedelta(days=days_delta))) - days_delta -= 1 - - # Make backup to be keeped - self.backup_node(backup_dir, 'node', node, backup_type="page") - - self.assertEqual(len(self.show_pb(backup_dir, 'node')), 4) - - # Purge backups - self.delete_expired(backup_dir, 'node', options=['--expired']) - self.assertEqual(len(self.show_pb(backup_dir, 'node')), 2) - - # @unittest.skip("skip") - def test_retention_window_3(self): - """purge all backups using window-based retention policy""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # take FULL BACKUP - self.backup_node(backup_dir, 'node', node) - - # Take second FULL BACKUP - self.backup_node(backup_dir, 'node', node) - - # Take third FULL BACKUP - self.backup_node(backup_dir, 'node', node) - - backups = os.path.join(backup_dir, 'backups', 'node') - for backup in os.listdir(backups): - if backup == 'pg_probackup.conf': - continue - with open( - os.path.join( - backups, backup, "backup.control"), "a") as conf: - conf.write("recovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format( - datetime.now() - timedelta(days=3))) - - # Purge backups - self.delete_expired( - backup_dir, 'node', options=['--retention-window=1', '--expired']) - - self.assertEqual(len(self.show_pb(backup_dir, 'node')), 0) - - print(self.show_pb( - backup_dir, 'node', as_json=False, as_text=True)) - - # count wal files in ARCHIVE - - # @unittest.skip("skip") - def test_retention_window_4(self): - """purge all backups using window-based retention policy""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # take FULL BACKUPs - self.backup_node(backup_dir, 'node', node) - - backup_id_2 = self.backup_node(backup_dir, 'node', node) - - backup_id_3 = self.backup_node(backup_dir, 'node', node) - - backups = os.path.join(backup_dir, 'backups', 'node') - for backup in os.listdir(backups): - if backup == 'pg_probackup.conf': - continue - with open( - os.path.join( - backups, backup, "backup.control"), "a") as conf: - conf.write("recovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format( - datetime.now() - timedelta(days=3))) - - self.delete_pb(backup_dir, 'node', backup_id_2) - self.delete_pb(backup_dir, 'node', backup_id_3) - - # Purge backups - self.delete_expired( - backup_dir, 'node', - options=['--retention-window=1', '--expired', '--wal']) - - self.assertEqual(len(self.show_pb(backup_dir, 'node')), 0) - - print(self.show_pb( - backup_dir, 'node', as_json=False, as_text=True)) - - # count wal files in ARCHIVE - wals_dir = os.path.join(backup_dir, 'wal', 'node') - # n_wals = len(os.listdir(wals_dir)) - - # self.assertTrue(n_wals > 0) - - # self.delete_expired( - # backup_dir, 'node', - # options=['--retention-window=1', '--expired', '--wal']) - - # count again - n_wals = len(os.listdir(wals_dir)) - self.assertTrue(n_wals == 0) - - # @unittest.skip("skip") - def test_window_expire_interleaved_incremental_chains(self): - """complicated case of interleaved backup chains""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # take FULL BACKUPs - backup_id_a = self.backup_node(backup_dir, 'node', node) - backup_id_b = self.backup_node(backup_dir, 'node', node) - - # Change FULLb backup status to ERROR - self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR') - - # FULLb ERROR - # FULLa OK - - # Take PAGEa1 backup - page_id_a1 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # PAGEa1 OK - # FULLb ERROR - # FULLa OK - - # Change FULLb backup status to OK - self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK') - - # Change PAGEa1 and FULLa to ERROR - self.change_backup_status(backup_dir, 'node', page_id_a1, 'ERROR') - self.change_backup_status(backup_dir, 'node', backup_id_a, 'ERROR') - - # PAGEa1 ERROR - # FULLb OK - # FULLa ERROR - - page_id_b1 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # PAGEb1 OK - # PAGEa1 ERROR - # FULLb OK - # FULLa ERROR - - # Now we start to play with first generation of PAGE backups - # Change PAGEb1 and FULLb to ERROR - self.change_backup_status(backup_dir, 'node', page_id_b1, 'ERROR') - self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR') - - # Change PAGEa1 and FULLa to OK - self.change_backup_status(backup_dir, 'node', page_id_a1, 'OK') - self.change_backup_status(backup_dir, 'node', backup_id_a, 'OK') - - # PAGEb1 ERROR - # PAGEa1 OK - # FULLb ERROR - # FULLa OK - - page_id_a2 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # PAGEa2 OK - # PAGEb1 ERROR - # PAGEa1 OK - # FULLb ERROR - # FULLa OK - - # Change PAGEa2 and FULLa to ERROR - self.change_backup_status(backup_dir, 'node', page_id_a2, 'ERROR') - self.change_backup_status(backup_dir, 'node', backup_id_a, 'ERROR') - - # Change PAGEb1 and FULLb to OK - self.change_backup_status(backup_dir, 'node', page_id_b1, 'OK') - self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK') - - # PAGEa2 ERROR - # PAGEb1 OK - # PAGEa1 OK - # FULLb OK - # FULLa ERROR - - page_id_b2 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # Change PAGEa2 and FULla to OK - self.change_backup_status(backup_dir, 'node', page_id_a2, 'OK') - self.change_backup_status(backup_dir, 'node', backup_id_a, 'OK') - - # PAGEb2 OK - # PAGEa2 OK - # PAGEb1 OK - # PAGEa1 OK - # FULLb OK - # FULLa OK - - # Purge backups - backups = os.path.join(backup_dir, 'backups', 'node') - for backup in os.listdir(backups): - if backup not in [page_id_a2, page_id_b2, 'pg_probackup.conf']: - with open( - os.path.join( - backups, backup, "backup.control"), "a") as conf: - conf.write("recovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format( - datetime.now() - timedelta(days=3))) - - self.delete_expired( - backup_dir, 'node', - options=['--retention-window=1', '--expired']) - - self.assertEqual(len(self.show_pb(backup_dir, 'node')), 6) - - print(self.show_pb( - backup_dir, 'node', as_json=False, as_text=True)) - - # @unittest.skip("skip") - def test_redundancy_expire_interleaved_incremental_chains(self): - """complicated case of interleaved backup chains""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # take FULL BACKUPs - backup_id_a = self.backup_node(backup_dir, 'node', node) - backup_id_b = self.backup_node(backup_dir, 'node', node) - - # Change FULL B backup status to ERROR - self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR') - - # FULLb ERROR - # FULLa OK - # Take PAGEa1 backup - page_id_a1 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # PAGEa1 OK - # FULLb ERROR - # FULLa OK - - # Change FULLb backup status to OK - self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK') - - # Change PAGEa1 and FULLa backup status to ERROR - self.change_backup_status(backup_dir, 'node', page_id_a1, 'ERROR') - self.change_backup_status(backup_dir, 'node', backup_id_a, 'ERROR') - - # PAGEa1 ERROR - # FULLb OK - # FULLa ERROR - - page_id_b1 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # PAGEb1 OK - # PAGEa1 ERROR - # FULLb OK - # FULLa ERROR - - # Now we start to play with first generation of PAGE backups - # Change PAGEb1 status to ERROR - self.change_backup_status(backup_dir, 'node', page_id_b1, 'ERROR') - self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR') - - # Change PAGEa1 status to OK - self.change_backup_status(backup_dir, 'node', page_id_a1, 'OK') - self.change_backup_status(backup_dir, 'node', backup_id_a, 'OK') - - # PAGEb1 ERROR - # PAGEa1 OK - # FULLb ERROR - # FULLa OK - page_id_a2 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # PAGEa2 OK - # PAGEb1 ERROR - # PAGEa1 OK - # FULLb ERROR - # FULLa OK - - # Change PAGEa2 and FULLa status to ERROR - self.change_backup_status(backup_dir, 'node', page_id_a2, 'ERROR') - self.change_backup_status(backup_dir, 'node', backup_id_a, 'ERROR') - - # Change PAGEb1 and FULLb status to OK - self.change_backup_status(backup_dir, 'node', page_id_b1, 'OK') - self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK') - - # PAGEa2 ERROR - # PAGEb1 OK - # PAGEa1 OK - # FULLb OK - # FULLa ERROR - self.backup_node(backup_dir, 'node', node, backup_type='page') - - # Change PAGEa2 and FULLa status to OK - self.change_backup_status(backup_dir, 'node', page_id_a2, 'OK') - self.change_backup_status(backup_dir, 'node', backup_id_a, 'OK') - - # PAGEb2 OK - # PAGEa2 OK - # PAGEb1 OK - # PAGEa1 OK - # FULLb OK - # FULLa OK - - self.delete_expired( - backup_dir, 'node', - options=['--retention-redundancy=1', '--expired']) - - self.assertEqual(len(self.show_pb(backup_dir, 'node')), 3) - - print(self.show_pb( - backup_dir, 'node', as_json=False, as_text=True)) - - # @unittest.skip("skip") - def test_window_merge_interleaved_incremental_chains(self): - """complicated case of interleaved backup chains""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # Take FULL BACKUPs - backup_id_a = self.backup_node(backup_dir, 'node', node) - backup_id_b = self.backup_node(backup_dir, 'node', node) - - # Change FULLb backup status to ERROR - self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR') - - # FULLb ERROR - # FULLa OK - - # Take PAGEa1 backup - page_id_a1 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # PAGEa1 OK - # FULLb ERROR - # FULLa OK - - # Change FULLb to OK - self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK') - - # Change PAGEa1 backup status to ERROR - self.change_backup_status(backup_dir, 'node', page_id_a1, 'ERROR') - - # PAGEa1 ERROR - # FULLb OK - # FULLa OK - - page_id_b1 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # PAGEb1 OK - # PAGEa1 ERROR - # FULLb OK - # FULLa OK - - # Now we start to play with first generation of PAGE backups - # Change PAGEb1 and FULLb to ERROR - self.change_backup_status(backup_dir, 'node', page_id_b1, 'ERROR') - self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR') - - # Change PAGEa1 to OK - self.change_backup_status(backup_dir, 'node', page_id_a1, 'OK') - - # PAGEb1 ERROR - # PAGEa1 OK - # FULLb ERROR - # FULLa OK - - page_id_a2 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # PAGEa2 OK - # PAGEb1 ERROR - # PAGEa1 OK - # FULLb ERROR - # FULLa OK - - # Change PAGEa2 and FULLa to ERROR - self.change_backup_status(backup_dir, 'node', page_id_a2, 'ERROR') - self.change_backup_status(backup_dir, 'node', backup_id_a, 'ERROR') - - # Change PAGEb1 and FULLb to OK - self.change_backup_status(backup_dir, 'node', page_id_b1, 'OK') - self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK') - - # PAGEa2 ERROR - # PAGEb1 OK - # PAGEa1 OK - # FULLb OK - # FULLa ERROR - - page_id_b2 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # Change PAGEa2 and FULLa to OK - self.change_backup_status(backup_dir, 'node', page_id_a2, 'OK') - self.change_backup_status(backup_dir, 'node', backup_id_a, 'OK') - - # PAGEb2 OK - # PAGEa2 OK - # PAGEb1 OK - # PAGEa1 OK - # FULLb OK - # FULLa OK - - # Purge backups - backups = os.path.join(backup_dir, 'backups', 'node') - for backup in os.listdir(backups): - if backup not in [page_id_a2, page_id_b2, 'pg_probackup.conf']: - with open( - os.path.join( - backups, backup, "backup.control"), "a") as conf: - conf.write("recovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format( - datetime.now() - timedelta(days=3))) - - output = self.delete_expired( - backup_dir, 'node', - options=['--retention-window=1', '--expired', '--merge-expired']) - - self.assertIn( - "Merge incremental chain between full backup {0} and backup {1}".format( - backup_id_a, page_id_a2), - output) - - self.assertIn( - "Rename merged full backup {0} to {1}".format( - backup_id_a, page_id_a2), output) - - self.assertIn( - "Merge incremental chain between full backup {0} and backup {1}".format( - backup_id_b, page_id_b2), - output) - - self.assertIn( - "Rename merged full backup {0} to {1}".format( - backup_id_b, page_id_b2), output) - - self.assertEqual(len(self.show_pb(backup_dir, 'node')), 2) - - # @unittest.skip("skip") - def test_window_merge_interleaved_incremental_chains_1(self): - """ - PAGEb3 - PAGEb2 - PAGEb1 - PAGEa1 - FULLb - FULLa - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.pgbench_init(scale=5) - - # Take FULL BACKUPs - self.backup_node(backup_dir, 'node', node) - pgbench = node.pgbench(options=['-t', '20', '-c', '1']) - pgbench.wait() - - backup_id_b = self.backup_node(backup_dir, 'node', node) - pgbench = node.pgbench(options=['-t', '20', '-c', '1']) - pgbench.wait() - - # Change FULL B backup status to ERROR - self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR') - - page_id_a1 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - pgdata_a1 = self.pgdata_content(node.data_dir) - - pgbench = node.pgbench(options=['-t', '20', '-c', '1']) - pgbench.wait() - - # PAGEa1 OK - # FULLb ERROR - # FULLa OK - # Change FULL B backup status to OK - self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK') - - # Change PAGEa1 backup status to ERROR - self.change_backup_status(backup_dir, 'node', page_id_a1, 'ERROR') - - # PAGEa1 ERROR - # FULLb OK - # FULLa OK - self.backup_node( - backup_dir, 'node', node, backup_type='page') - - pgbench = node.pgbench(options=['-t', '20', '-c', '1']) - pgbench.wait() - - self.backup_node( - backup_dir, 'node', node, backup_type='page') - - pgbench = node.pgbench(options=['-t', '20', '-c', '1']) - pgbench.wait() - - page_id_b3 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - pgdata_b3 = self.pgdata_content(node.data_dir) - - pgbench = node.pgbench(options=['-t', '20', '-c', '1']) - pgbench.wait() - - # PAGEb3 OK - # PAGEb2 OK - # PAGEb1 OK - # PAGEa1 ERROR - # FULLb OK - # FULLa OK - - # Change PAGEa1 backup status to ERROR - self.change_backup_status(backup_dir, 'node', page_id_a1, 'OK') - - # PAGEb3 OK - # PAGEb2 OK - # PAGEb1 OK - # PAGEa1 OK - # FULLb OK - # FULLa OK - - # Purge backups - backups = os.path.join(backup_dir, 'backups', 'node') - for backup in os.listdir(backups): - if backup in [page_id_a1, page_id_b3, 'pg_probackup.conf']: - continue - - with open( - os.path.join( - backups, backup, "backup.control"), "a") as conf: - conf.write("recovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format( - datetime.now() - timedelta(days=3))) - - self.delete_expired( - backup_dir, 'node', - options=['--retention-window=1', '--expired', '--merge-expired']) - - self.assertEqual(len(self.show_pb(backup_dir, 'node')), 2) - - self.assertEqual( - self.show_pb(backup_dir, 'node')[1]['id'], - page_id_b3) - - self.assertEqual( - self.show_pb(backup_dir, 'node')[0]['id'], - page_id_a1) - - self.assertEqual( - self.show_pb(backup_dir, 'node')[1]['backup-mode'], - 'FULL') - - self.assertEqual( - self.show_pb(backup_dir, 'node')[0]['backup-mode'], - 'FULL') - - node.cleanup() - - # Data correctness of PAGEa3 - self.restore_node(backup_dir, 'node', node, backup_id=page_id_a1) - pgdata_restored_a1 = self.pgdata_content(node.data_dir) - self.compare_pgdata(pgdata_a1, pgdata_restored_a1) - - node.cleanup() - - # Data correctness of PAGEb3 - self.restore_node(backup_dir, 'node', node, backup_id=page_id_b3) - pgdata_restored_b3 = self.pgdata_content(node.data_dir) - self.compare_pgdata(pgdata_b3, pgdata_restored_b3) - - # @unittest.skip("skip") - def test_basic_window_merge_multiple_descendants(self): - """ - PAGEb3 - | PAGEa3 - -----------------------------retention window - PAGEb2 / - | PAGEa2 / should be deleted - PAGEb1 \ / - | PAGEa1 - FULLb | - FULLa - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.pgbench_init(scale=3) - - # Take FULL BACKUPs - backup_id_a = self.backup_node(backup_dir, 'node', node) - # pgbench = node.pgbench(options=['-T', '10', '-c', '2']) - # pgbench.wait() - - backup_id_b = self.backup_node(backup_dir, 'node', node) - # pgbench = node.pgbench(options=['-T', '10', '-c', '2']) - # pgbench.wait() - - # Change FULLb backup status to ERROR - self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR') - - page_id_a1 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # pgbench = node.pgbench(options=['-T', '10', '-c', '2']) - # pgbench.wait() - - # Change FULLb to OK - self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK') - - # Change PAGEa1 to ERROR - self.change_backup_status(backup_dir, 'node', page_id_a1, 'ERROR') - - # PAGEa1 ERROR - # FULLb OK - # FULLa OK - - page_id_b1 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # PAGEb1 OK - # PAGEa1 ERROR - # FULLb OK - # FULLa OK - - # pgbench = node.pgbench(options=['-T', '10', '-c', '2']) - # pgbench.wait() - - # Change PAGEa1 to OK - self.change_backup_status(backup_dir, 'node', page_id_a1, 'OK') - - # Change PAGEb1 and FULLb to ERROR - self.change_backup_status(backup_dir, 'node', page_id_b1, 'ERROR') - self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR') - - # PAGEb1 ERROR - # PAGEa1 OK - # FULLb ERROR - # FULLa OK - - page_id_a2 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # pgbench = node.pgbench(options=['-T', '10', '-c', '2']) - # pgbench.wait() - - # PAGEa2 OK - # PAGEb1 ERROR - # PAGEa1 OK - # FULLb ERROR - # FULLa OK - - # Change PAGEb1 and FULLb to OK - self.change_backup_status(backup_dir, 'node', page_id_b1, 'OK') - self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK') - - # Change PAGEa2 and FULLa to ERROR - self.change_backup_status(backup_dir, 'node', page_id_a2, 'ERROR') - self.change_backup_status(backup_dir, 'node', backup_id_a, 'ERROR') - - # PAGEa2 ERROR - # PAGEb1 OK - # PAGEa1 OK - # FULLb OK - # FULLa ERROR - - page_id_b2 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # pgbench = node.pgbench(options=['-T', '10', '-c', '2']) - # pgbench.wait() - - # PAGEb2 OK - # PAGEa2 ERROR - # PAGEb1 OK - # PAGEa1 OK - # FULLb OK - # FULLa ERROR - - # Change PAGEb2 and PAGEb1 to ERROR - self.change_backup_status(backup_dir, 'node', page_id_b2, 'ERROR') - self.change_backup_status(backup_dir, 'node', page_id_b1, 'ERROR') - - # and FULL stuff - self.change_backup_status(backup_dir, 'node', backup_id_a, 'OK') - self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR') - - # PAGEb2 ERROR - # PAGEa2 ERROR - # PAGEb1 ERROR - # PAGEa1 OK - # FULLb ERROR - # FULLa OK - - page_id_a3 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - # pgbench = node.pgbench(options=['-T', '10', '-c', '2']) - # pgbench.wait() - - # PAGEa3 OK - # PAGEb2 ERROR - # PAGEa2 ERROR - # PAGEb1 ERROR - # PAGEa1 OK - # FULLb ERROR - # FULLa OK - - # Change PAGEa3 to ERROR - self.change_backup_status(backup_dir, 'node', page_id_a3, 'ERROR') - - # Change PAGEb2, PAGEb1 and FULLb to OK - self.change_backup_status(backup_dir, 'node', page_id_b2, 'OK') - self.change_backup_status(backup_dir, 'node', page_id_b1, 'OK') - self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK') - - page_id_b3 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # PAGEb3 OK - # PAGEa3 ERROR - # PAGEb2 OK - # PAGEa2 ERROR - # PAGEb1 OK - # PAGEa1 OK - # FULLb OK - # FULLa OK - - # Change PAGEa3, PAGEa2 and PAGEb1 status to OK - self.change_backup_status(backup_dir, 'node', page_id_a3, 'OK') - self.change_backup_status(backup_dir, 'node', page_id_a2, 'OK') - self.change_backup_status(backup_dir, 'node', page_id_b1, 'OK') - - # PAGEb3 OK - # PAGEa3 OK - # PAGEb2 OK - # PAGEa2 OK - # PAGEb1 OK - # PAGEa1 OK - # FULLb OK - # FULLa OK - - # Check that page_id_a3 and page_id_a2 are both direct descendants of page_id_a1 - self.assertEqual( - self.show_pb( - backup_dir, 'node', backup_id=page_id_a3)['parent-backup-id'], - page_id_a1) - - self.assertEqual( - self.show_pb( - backup_dir, 'node', backup_id=page_id_a2)['parent-backup-id'], - page_id_a1) - - # Purge backups - backups = os.path.join(backup_dir, 'backups', 'node') - for backup in os.listdir(backups): - if backup in [page_id_a3, page_id_b3, 'pg_probackup.conf']: - continue - - with open( - os.path.join( - backups, backup, "backup.control"), "a") as conf: - conf.write("recovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format( - datetime.now() - timedelta(days=3))) - - output = self.delete_expired( - backup_dir, 'node', - options=[ - '--retention-window=1', '--delete-expired', - '--merge-expired', '--log-level-console=log']) - - self.assertEqual(len(self.show_pb(backup_dir, 'node')), 2) - - # Merging chain A - self.assertIn( - "Merge incremental chain between full backup {0} and backup {1}".format( - backup_id_a, page_id_a3), - output) - - self.assertIn( - "INFO: Rename merged full backup {0} to {1}".format( - backup_id_a, page_id_a3), output) - -# self.assertIn( -# "WARNING: Backup {0} has multiple valid descendants. " -# "Automatic merge is not possible.".format( -# page_id_a1), output) - - self.assertIn( - "LOG: Consider backup {0} for purge".format( - page_id_a2), output) - - # Merge chain B - self.assertIn( - "Merge incremental chain between full backup {0} and backup {1}".format( - backup_id_b, page_id_b3), - output) - - self.assertIn( - "INFO: Rename merged full backup {0} to {1}".format( - backup_id_b, page_id_b3), output) - - self.assertIn( - "Delete: {0}".format(page_id_a2), output) - - self.assertEqual( - self.show_pb(backup_dir, 'node')[1]['id'], - page_id_b3) - - self.assertEqual( - self.show_pb(backup_dir, 'node')[0]['id'], - page_id_a3) - - self.assertEqual( - self.show_pb(backup_dir, 'node')[1]['backup-mode'], - 'FULL') - - self.assertEqual( - self.show_pb(backup_dir, 'node')[0]['backup-mode'], - 'FULL') - - # @unittest.skip("skip") - def test_basic_window_merge_multiple_descendants_1(self): - """ - PAGEb3 - | PAGEa3 - -----------------------------retention window - PAGEb2 / - | PAGEa2 / - PAGEb1 \ / - | PAGEa1 - FULLb | - FULLa - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.pgbench_init(scale=3) - - # Take FULL BACKUPs - backup_id_a = self.backup_node(backup_dir, 'node', node) - # pgbench = node.pgbench(options=['-T', '10', '-c', '2']) - # pgbench.wait() - - backup_id_b = self.backup_node(backup_dir, 'node', node) - # pgbench = node.pgbench(options=['-T', '10', '-c', '2']) - # pgbench.wait() - - # Change FULLb backup status to ERROR - self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR') - - page_id_a1 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # pgbench = node.pgbench(options=['-T', '10', '-c', '2']) - # pgbench.wait() - - # Change FULLb to OK - self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK') - - # Change PAGEa1 to ERROR - self.change_backup_status(backup_dir, 'node', page_id_a1, 'ERROR') - - # PAGEa1 ERROR - # FULLb OK - # FULLa OK - - page_id_b1 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # PAGEb1 OK - # PAGEa1 ERROR - # FULLb OK - # FULLa OK - - # pgbench = node.pgbench(options=['-T', '10', '-c', '2']) - # pgbench.wait() - - # Change PAGEa1 to OK - self.change_backup_status(backup_dir, 'node', page_id_a1, 'OK') - - # Change PAGEb1 and FULLb to ERROR - self.change_backup_status(backup_dir, 'node', page_id_b1, 'ERROR') - self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR') - - # PAGEb1 ERROR - # PAGEa1 OK - # FULLb ERROR - # FULLa OK - - page_id_a2 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # pgbench = node.pgbench(options=['-T', '10', '-c', '2']) - # pgbench.wait() - - # PAGEa2 OK - # PAGEb1 ERROR - # PAGEa1 OK - # FULLb ERROR - # FULLa OK - - # Change PAGEb1 and FULLb to OK - self.change_backup_status(backup_dir, 'node', page_id_b1, 'OK') - self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK') - - # Change PAGEa2 and FULLa to ERROR - self.change_backup_status(backup_dir, 'node', page_id_a2, 'ERROR') - self.change_backup_status(backup_dir, 'node', backup_id_a, 'ERROR') - - # PAGEa2 ERROR - # PAGEb1 OK - # PAGEa1 OK - # FULLb OK - # FULLa ERROR - - page_id_b2 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # pgbench = node.pgbench(options=['-T', '10', '-c', '2']) - # pgbench.wait() - - # PAGEb2 OK - # PAGEa2 ERROR - # PAGEb1 OK - # PAGEa1 OK - # FULLb OK - # FULLa ERROR - - # Change PAGEb2 and PAGEb1 to ERROR - self.change_backup_status(backup_dir, 'node', page_id_b2, 'ERROR') - self.change_backup_status(backup_dir, 'node', page_id_b1, 'ERROR') - - # and FULL stuff - self.change_backup_status(backup_dir, 'node', backup_id_a, 'OK') - self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR') - - # PAGEb2 ERROR - # PAGEa2 ERROR - # PAGEb1 ERROR - # PAGEa1 OK - # FULLb ERROR - # FULLa OK - - page_id_a3 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - # pgbench = node.pgbench(options=['-T', '10', '-c', '2']) - # pgbench.wait() - - # PAGEa3 OK - # PAGEb2 ERROR - # PAGEa2 ERROR - # PAGEb1 ERROR - # PAGEa1 OK - # FULLb ERROR - # FULLa OK - - # Change PAGEa3 to ERROR - self.change_backup_status(backup_dir, 'node', page_id_a3, 'ERROR') - - # Change PAGEb2, PAGEb1 and FULLb to OK - self.change_backup_status(backup_dir, 'node', page_id_b2, 'OK') - self.change_backup_status(backup_dir, 'node', page_id_b1, 'OK') - self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK') - - page_id_b3 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # PAGEb3 OK - # PAGEa3 ERROR - # PAGEb2 OK - # PAGEa2 ERROR - # PAGEb1 OK - # PAGEa1 OK - # FULLb OK - # FULLa OK - - # Change PAGEa3, PAGEa2 and PAGEb1 status to OK - self.change_backup_status(backup_dir, 'node', page_id_a3, 'OK') - self.change_backup_status(backup_dir, 'node', page_id_a2, 'OK') - self.change_backup_status(backup_dir, 'node', page_id_b1, 'OK') - - # PAGEb3 OK - # PAGEa3 OK - # PAGEb2 OK - # PAGEa2 OK - # PAGEb1 OK - # PAGEa1 OK - # FULLb OK - # FULLa OK - - # Check that page_id_a3 and page_id_a2 are both direct descendants of page_id_a1 - self.assertEqual( - self.show_pb( - backup_dir, 'node', backup_id=page_id_a3)['parent-backup-id'], - page_id_a1) - - self.assertEqual( - self.show_pb( - backup_dir, 'node', backup_id=page_id_a2)['parent-backup-id'], - page_id_a1) - - # Purge backups - backups = os.path.join(backup_dir, 'backups', 'node') - for backup in os.listdir(backups): - if backup in [page_id_a3, page_id_b3, 'pg_probackup.conf']: - continue - - with open( - os.path.join( - backups, backup, "backup.control"), "a") as conf: - conf.write("recovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format( - datetime.now() - timedelta(days=3))) - - output = self.delete_expired( - backup_dir, 'node', - options=[ - '--retention-window=1', - '--merge-expired', '--log-level-console=log']) - - self.assertEqual(len(self.show_pb(backup_dir, 'node')), 3) - - # Merging chain A - self.assertIn( - "Merge incremental chain between full backup {0} and backup {1}".format( - backup_id_a, page_id_a3), - output) - - self.assertIn( - "INFO: Rename merged full backup {0} to {1}".format( - backup_id_a, page_id_a3), output) - -# self.assertIn( -# "WARNING: Backup {0} has multiple valid descendants. " -# "Automatic merge is not possible.".format( -# page_id_a1), output) - - # Merge chain B - self.assertIn( - "Merge incremental chain between full backup {0} and backup {1}".format( - backup_id_b, page_id_b3), output) - - self.assertIn( - "INFO: Rename merged full backup {0} to {1}".format( - backup_id_b, page_id_b3), output) - - self.assertEqual( - self.show_pb(backup_dir, 'node')[2]['id'], - page_id_b3) - - self.assertEqual( - self.show_pb(backup_dir, 'node')[1]['id'], - page_id_a3) - - self.assertEqual( - self.show_pb(backup_dir, 'node')[0]['id'], - page_id_a2) - - self.assertEqual( - self.show_pb(backup_dir, 'node')[2]['backup-mode'], - 'FULL') - - self.assertEqual( - self.show_pb(backup_dir, 'node')[1]['backup-mode'], - 'FULL') - - self.assertEqual( - self.show_pb(backup_dir, 'node')[0]['backup-mode'], - 'PAGE') - - output = self.delete_expired( - backup_dir, 'node', - options=[ - '--retention-window=1', - '--delete-expired', '--log-level-console=log']) - - # @unittest.skip("skip") - def test_window_chains(self): - """ - PAGE - -------window - PAGE - PAGE - FULL - PAGE - PAGE - FULL - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.pgbench_init(scale=3) - - # Chain A - self.backup_node(backup_dir, 'node', node) - self.backup_node( - backup_dir, 'node', node, backup_type='page') - - self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # Chain B - self.backup_node(backup_dir, 'node', node) - - pgbench = node.pgbench(options=['-T', '10', '-c', '2']) - pgbench.wait() - - self.backup_node( - backup_dir, 'node', node, backup_type='delta') - - pgbench = node.pgbench(options=['-T', '10', '-c', '2']) - pgbench.wait() - - self.backup_node( - backup_dir, 'node', node, backup_type='page') - - pgbench = node.pgbench(options=['-T', '10', '-c', '2']) - pgbench.wait() - - page_id_b3 = self.backup_node( - backup_dir, 'node', node, backup_type='delta') - - pgdata = self.pgdata_content(node.data_dir) - - # Purge backups - backups = os.path.join(backup_dir, 'backups', 'node') - for backup in os.listdir(backups): - if backup in [page_id_b3, 'pg_probackup.conf']: - continue - - with open( - os.path.join( - backups, backup, "backup.control"), "a") as conf: - conf.write("recovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format( - datetime.now() - timedelta(days=3))) - - self.delete_expired( - backup_dir, 'node', - options=[ - '--retention-window=1', '--expired', - '--merge-expired', '--log-level-console=log']) - - self.assertEqual(len(self.show_pb(backup_dir, 'node')), 1) - - node.cleanup() - - self.restore_node(backup_dir, 'node', node) - - pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.skip("skip") - def test_window_chains_1(self): - """ - PAGE - -------window - PAGE - PAGE - FULL - PAGE - PAGE - FULL - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.pgbench_init(scale=3) - - # Chain A - self.backup_node(backup_dir, 'node', node) - self.backup_node( - backup_dir, 'node', node, backup_type='page') - - self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # Chain B - self.backup_node(backup_dir, 'node', node) - - self.backup_node( - backup_dir, 'node', node, backup_type='delta') - - self.backup_node( - backup_dir, 'node', node, backup_type='page') - - page_id_b3 = self.backup_node( - backup_dir, 'node', node, backup_type='delta') - - self.pgdata_content(node.data_dir) - - # Purge backups - backups = os.path.join(backup_dir, 'backups', 'node') - for backup in os.listdir(backups): - if backup in [page_id_b3, 'pg_probackup.conf']: - continue - - with open( - os.path.join( - backups, backup, "backup.control"), "a") as conf: - conf.write("recovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format( - datetime.now() - timedelta(days=3))) - - output = self.delete_expired( - backup_dir, 'node', - options=[ - '--retention-window=1', - '--merge-expired', '--log-level-console=log']) - - self.assertEqual(len(self.show_pb(backup_dir, 'node')), 4) - - self.assertIn( - "There are no backups to delete by retention policy", - output) - - self.assertIn( - "Retention merging finished", - output) - - output = self.delete_expired( - backup_dir, 'node', - options=[ - '--retention-window=1', - '--expired', '--log-level-console=log']) - - self.assertEqual(len(self.show_pb(backup_dir, 'node')), 1) - - self.assertIn( - "There are no backups to merge by retention policy", - output) - - self.assertIn( - "Purging finished", - output) - - @unittest.skip("skip") - def test_window_error_backups(self): - """ - PAGE ERROR - -------window - PAGE ERROR - PAGE ERROR - PAGE ERROR - FULL ERROR - FULL - -------redundancy - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # Take FULL BACKUPs - self.backup_node(backup_dir, 'node', node) - self.backup_node( - backup_dir, 'node', node, backup_type='page') - - self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # Change FULLb backup status to ERROR - # self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR') - - # @unittest.skip("skip") - def test_window_error_backups_1(self): - """ - DELTA - PAGE ERROR - FULL - -------window - """ - self._check_gdb_flag_or_skip_test() - - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # Take FULL BACKUP - self.backup_node(backup_dir, 'node', node) - - # Take PAGE BACKUP - gdb = self.backup_node( - backup_dir, 'node', node, backup_type='page', gdb=True) - - # Attention! this breakpoint has been set on internal probackup function, not on a postgres core one - gdb.set_breakpoint('pg_stop_backup') - gdb.run_until_break() - gdb.remove_all_breakpoints() - gdb._execute('signal SIGINT') - gdb.continue_execution_until_error() - - self.show_pb(backup_dir, 'node')[1]['id'] - - # Take DELTA backup - self.backup_node( - backup_dir, 'node', node, backup_type='delta', - options=['--retention-window=2', '--delete-expired']) - - # Take FULL BACKUP - self.backup_node(backup_dir, 'node', node) - - self.assertEqual(len(self.show_pb(backup_dir, 'node')), 4) - - # @unittest.skip("skip") - def test_window_error_backups_2(self): - """ - DELTA - PAGE ERROR - FULL - -------window - """ - self._check_gdb_flag_or_skip_test() - - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # Take FULL BACKUP - self.backup_node(backup_dir, 'node', node) - - # Take PAGE BACKUP - gdb = self.backup_node( - backup_dir, 'node', node, backup_type='page', gdb=True) - - # Attention! this breakpoint has been set on internal probackup function, not on a postgres core one - gdb.set_breakpoint('pg_stop_backup') - gdb.run_until_break() - gdb._execute('signal SIGKILL') - gdb.continue_execution_until_error() - - self.show_pb(backup_dir, 'node')[1]['id'] - - if self.get_version(node) < 90600: - node.safe_psql( - 'postgres', - 'SELECT pg_catalog.pg_stop_backup()') - - # Take DELTA backup - self.backup_node( - backup_dir, 'node', node, backup_type='delta', - options=['--retention-window=2', '--delete-expired']) - - self.assertEqual(len(self.show_pb(backup_dir, 'node')), 3) - - def test_retention_redundancy_overlapping_chains(self): - """""" - self._check_gdb_flag_or_skip_test() - - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - if self.get_version(node) < 90600: - self.skipTest('Skipped because ptrack support is disabled') - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - self.set_config( - backup_dir, 'node', options=['--retention-redundancy=1']) - - # Make backups to be purged - self.backup_node(backup_dir, 'node', node) - self.backup_node(backup_dir, 'node', node, backup_type="page") - - # Make backups to be keeped - gdb = self.backup_node(backup_dir, 'node', node, gdb=True) - gdb.set_breakpoint('backup_files') - gdb.run_until_break() - - sleep(1) - - self.backup_node(backup_dir, 'node', node, backup_type="page") - - gdb.remove_all_breakpoints() - gdb.continue_execution_until_exit() - - self.backup_node(backup_dir, 'node', node, backup_type="page") - - # Purge backups - self.delete_expired( - backup_dir, 'node', options=['--expired', '--wal']) - self.assertEqual(len(self.show_pb(backup_dir, 'node')), 2) - - self.validate_pb(backup_dir, 'node') - - def test_retention_redundancy_overlapping_chains_1(self): - """""" - self._check_gdb_flag_or_skip_test() - - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - if self.get_version(node) < 90600: - self.skipTest('Skipped because ptrack support is disabled') - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - self.set_config( - backup_dir, 'node', options=['--retention-redundancy=1']) - - # Make backups to be purged - self.backup_node(backup_dir, 'node', node) - self.backup_node(backup_dir, 'node', node, backup_type="page") - - # Make backups to be keeped - gdb = self.backup_node(backup_dir, 'node', node, gdb=True) - gdb.set_breakpoint('backup_files') - gdb.run_until_break() - - sleep(1) - - self.backup_node(backup_dir, 'node', node, backup_type="page") - - gdb.remove_all_breakpoints() - gdb.continue_execution_until_exit() - - self.backup_node(backup_dir, 'node', node, backup_type="page") - - # Purge backups - self.delete_expired( - backup_dir, 'node', options=['--expired', '--wal']) - self.assertEqual(len(self.show_pb(backup_dir, 'node')), 2) - - self.validate_pb(backup_dir, 'node') - - def test_wal_purge_victim(self): - """ - https://github.com/postgrespro/pg_probackup/issues/103 - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # Make ERROR incremental backup - try: - self.backup_node(backup_dir, 'node', node, backup_type='page') - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because page backup should not be possible " - "without valid full backup.\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - "WARNING: Valid full backup on current timeline 1 is not found" in e.message and - "ERROR: Create new full backup before an incremental one" in e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - page_id = self.show_pb(backup_dir, 'node')[0]['id'] - - sleep(1) - - # Make FULL backup - full_id = self.backup_node(backup_dir, 'node', node, options=['--delete-wal']) - - try: - self.validate_pb(backup_dir, 'node') - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because page backup should not be possible " - "without valid full backup.\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "INFO: Backup {0} WAL segments are valid".format(full_id), - e.message) - self.assertIn( - "WARNING: Backup {0} has missing parent 0".format(page_id), - e.message) - - # @unittest.skip("skip") - def test_failed_merge_redundancy_retention(self): - """ - Check that retention purge works correctly with MERGING backups - """ - self._check_gdb_flag_or_skip_test() - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join( - self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # FULL1 backup - full_id = self.backup_node(backup_dir, 'node', node) - - # DELTA BACKUP - delta_id = self.backup_node( - backup_dir, 'node', node, backup_type='delta') - - # DELTA BACKUP - self.backup_node( - backup_dir, 'node', node, backup_type='delta') - - # DELTA BACKUP - self.backup_node( - backup_dir, 'node', node, backup_type='delta') - - # FULL2 backup - self.backup_node(backup_dir, 'node', node) - - # DELTA BACKUP - self.backup_node( - backup_dir, 'node', node, backup_type='delta') - - # DELTA BACKUP - self.backup_node( - backup_dir, 'node', node, backup_type='delta') - - # FULL3 backup - self.backup_node(backup_dir, 'node', node) - - # DELTA BACKUP - self.backup_node( - backup_dir, 'node', node, backup_type='delta') - - # DELTA BACKUP - self.backup_node( - backup_dir, 'node', node, backup_type='delta') - - self.set_config( - backup_dir, 'node', options=['--retention-redundancy=2']) - - self.set_config( - backup_dir, 'node', options=['--retention-window=2']) - - # create pair of MERGING backup as a result of failed merge - gdb = self.merge_backup( - backup_dir, 'node', delta_id, gdb=True) - gdb.set_breakpoint('backup_non_data_file') - gdb.run_until_break() - gdb.continue_execution_until_break(2) - gdb._execute('signal SIGKILL') - - # "expire" first full backup - backups = os.path.join(backup_dir, 'backups', 'node') - with open( - os.path.join( - backups, full_id, "backup.control"), "a") as conf: - conf.write("recovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format( - datetime.now() - timedelta(days=3))) - - # run retention merge - self.delete_expired( - backup_dir, 'node', options=['--delete-expired']) - - self.assertEqual( - 'MERGING', - self.show_pb(backup_dir, 'node', full_id)['status'], - 'Backup STATUS should be "MERGING"') - - self.assertEqual( - 'MERGING', - self.show_pb(backup_dir, 'node', delta_id)['status'], - 'Backup STATUS should be "MERGING"') - - self.assertEqual(len(self.show_pb(backup_dir, 'node')), 10) - - def test_wal_depth_1(self): - """ - |-------------B5----------> WAL timeline3 - |-----|-------------------------> WAL timeline2 - B1 B2---| B3 B4-------B6-----> WAL timeline1 - - wal-depth=2 - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'archive_timeout': '30s', - 'checkpoint_timeout': '30s'}) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - - self.set_config(backup_dir, 'node', options=['--archive-timeout=60s']) - - node.slow_start() - - # FULL - node.pgbench_init(scale=1) - self.backup_node(backup_dir, 'node', node) - - # PAGE - node.pgbench_init(scale=1) - B2 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # generate_some more data - node.pgbench_init(scale=1) - - target_xid = node.safe_psql( - "postgres", - "select txid_current()").decode('utf-8').rstrip() - - node.pgbench_init(scale=1) - - self.backup_node( - backup_dir, 'node', node, backup_type='page') - - node.pgbench_init(scale=1) - - self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # Timeline 2 - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - - node_restored.cleanup() - - output = self.restore_node( - backup_dir, 'node', node_restored, - options=[ - '--recovery-target-xid={0}'.format(target_xid), - '--recovery-target-action=promote']) - - self.assertIn( - 'Restore of backup {0} completed'.format(B2), - output) - - self.set_auto_conf(node_restored, options={'port': node_restored.port}) - - node_restored.slow_start() - - node_restored.pgbench_init(scale=1) - - target_xid = node_restored.safe_psql( - "postgres", - "select txid_current()").decode('utf-8').rstrip() - - node_restored.pgbench_init(scale=2) - - # Timeline 3 - node_restored.cleanup() - - output = self.restore_node( - backup_dir, 'node', node_restored, - options=[ - '--recovery-target-xid={0}'.format(target_xid), - '--recovery-target-timeline=2', - '--recovery-target-action=promote']) - - self.assertIn( - 'Restore of backup {0} completed'.format(B2), - output) - - self.set_auto_conf(node_restored, options={'port': node_restored.port}) - - node_restored.slow_start() - - node_restored.pgbench_init(scale=1) - self.backup_node( - backup_dir, 'node', node_restored, data_dir=node_restored.data_dir) - - node.pgbench_init(scale=1) - self.backup_node(backup_dir, 'node', node) - - lsn = self.show_archive(backup_dir, 'node', tli=2)['switchpoint'] - - self.validate_pb( - backup_dir, 'node', backup_id=B2, - options=['--recovery-target-lsn={0}'.format(lsn)]) - - self.validate_pb(backup_dir, 'node') - - def test_wal_purge(self): - """ - -------------------------------------> tli5 - ---------------------------B6--------> tli4 - S2`---------------> tli3 - S1`------------S2---B4-------B5--> tli2 - B1---S1-------------B2--------B3------> tli1 - - B* - backups - S* - switchpoints - - Expected result: - TLI5 will be purged entirely - B6--------> tli4 - S2`---------------> tli3 - S1`------------S2---B4-------B5--> tli2 - B1---S1-------------B2--------B3------> tli1 - - wal-depth=2 - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_config(backup_dir, 'node', options=['--archive-timeout=60s']) - - node.slow_start() - - # STREAM FULL - stream_id = self.backup_node( - backup_dir, 'node', node, options=['--stream']) - - node.stop() - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # FULL - B1 = self.backup_node(backup_dir, 'node', node) - node.pgbench_init(scale=1) - - target_xid = node.safe_psql( - "postgres", - "select txid_current()").decode('utf-8').rstrip() - node.pgbench_init(scale=5) - - # B2 FULL on TLI1 - self.backup_node(backup_dir, 'node', node) - node.pgbench_init(scale=4) - self.backup_node(backup_dir, 'node', node) - node.pgbench_init(scale=4) - - self.delete_pb(backup_dir, 'node', options=['--delete-wal']) - - # TLI 2 - node_tli2 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_tli2')) - node_tli2.cleanup() - - output = self.restore_node( - backup_dir, 'node', node_tli2, - options=[ - '--recovery-target-xid={0}'.format(target_xid), - '--recovery-target-timeline=1', - '--recovery-target-action=promote']) - - self.assertIn( - 'INFO: Restore of backup {0} completed'.format(B1), - output) - - self.set_auto_conf(node_tli2, options={'port': node_tli2.port}) - node_tli2.slow_start() - node_tli2.pgbench_init(scale=4) - - target_xid = node_tli2.safe_psql( - "postgres", - "select txid_current()").decode('utf-8').rstrip() - node_tli2.pgbench_init(scale=1) - - self.backup_node( - backup_dir, 'node', node_tli2, data_dir=node_tli2.data_dir) - node_tli2.pgbench_init(scale=3) - - self.backup_node( - backup_dir, 'node', node_tli2, data_dir=node_tli2.data_dir) - node_tli2.pgbench_init(scale=1) - node_tli2.cleanup() - - # TLI3 - node_tli3 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_tli3')) - node_tli3.cleanup() - - # Note, that successful validation here is a happy coincidence - output = self.restore_node( - backup_dir, 'node', node_tli3, - options=[ - '--recovery-target-xid={0}'.format(target_xid), - '--recovery-target-timeline=2', - '--recovery-target-action=promote']) - - self.assertIn( - 'INFO: Restore of backup {0} completed'.format(B1), - output) - self.set_auto_conf(node_tli3, options={'port': node_tli3.port}) - node_tli3.slow_start() - node_tli3.pgbench_init(scale=5) - node_tli3.cleanup() - - # TLI4 - node_tli4 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_tli4')) - node_tli4.cleanup() - - self.restore_node( - backup_dir, 'node', node_tli4, backup_id=stream_id, - options=[ - '--recovery-target=immediate', - '--recovery-target-action=promote']) - - self.set_auto_conf(node_tli4, options={'port': node_tli4.port}) - self.set_archiving(backup_dir, 'node', node_tli4) - node_tli4.slow_start() - - node_tli4.pgbench_init(scale=5) - - self.backup_node( - backup_dir, 'node', node_tli4, data_dir=node_tli4.data_dir) - node_tli4.pgbench_init(scale=5) - node_tli4.cleanup() - - # TLI5 - node_tli5 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_tli5')) - node_tli5.cleanup() - - self.restore_node( - backup_dir, 'node', node_tli5, backup_id=stream_id, - options=[ - '--recovery-target=immediate', - '--recovery-target-action=promote']) - - self.set_auto_conf(node_tli5, options={'port': node_tli5.port}) - self.set_archiving(backup_dir, 'node', node_tli5) - node_tli5.slow_start() - node_tli5.pgbench_init(scale=10) - - # delete '.history' file of TLI4 - os.remove(os.path.join(backup_dir, 'wal', 'node', '00000004.history')) - # delete '.history' file of TLI5 - os.remove(os.path.join(backup_dir, 'wal', 'node', '00000005.history')) - - output = self.delete_pb( - backup_dir, 'node', - options=[ - '--delete-wal', '--dry-run', - '--log-level-console=verbose']) - - self.assertIn( - 'INFO: On timeline 4 WAL segments between 000000040000000000000002 ' - 'and 000000040000000000000006 can be removed', - output) - - self.assertIn( - 'INFO: On timeline 5 all files can be removed', - output) - - show_tli1_before = self.show_archive(backup_dir, 'node', tli=1) - show_tli2_before = self.show_archive(backup_dir, 'node', tli=2) - show_tli3_before = self.show_archive(backup_dir, 'node', tli=3) - show_tli4_before = self.show_archive(backup_dir, 'node', tli=4) - show_tli5_before = self.show_archive(backup_dir, 'node', tli=5) - - self.assertTrue(show_tli1_before) - self.assertTrue(show_tli2_before) - self.assertTrue(show_tli3_before) - self.assertTrue(show_tli4_before) - self.assertTrue(show_tli5_before) - - output = self.delete_pb( - backup_dir, 'node', - options=['--delete-wal', '--log-level-console=verbose']) - - self.assertIn( - 'INFO: On timeline 4 WAL segments between 000000040000000000000002 ' - 'and 000000040000000000000006 will be removed', - output) - - self.assertIn( - 'INFO: On timeline 5 all files will be removed', - output) - - show_tli1_after = self.show_archive(backup_dir, 'node', tli=1) - show_tli2_after = self.show_archive(backup_dir, 'node', tli=2) - show_tli3_after = self.show_archive(backup_dir, 'node', tli=3) - show_tli4_after = self.show_archive(backup_dir, 'node', tli=4) - show_tli5_after = self.show_archive(backup_dir, 'node', tli=5) - - self.assertEqual(show_tli1_before, show_tli1_after) - self.assertEqual(show_tli2_before, show_tli2_after) - self.assertEqual(show_tli3_before, show_tli3_after) - self.assertNotEqual(show_tli4_before, show_tli4_after) - self.assertNotEqual(show_tli5_before, show_tli5_after) - - self.assertEqual( - show_tli4_before['min-segno'], - '000000040000000000000002') - - self.assertEqual( - show_tli4_after['min-segno'], - '000000040000000000000006') - - self.assertFalse(show_tli5_after) - - self.validate_pb(backup_dir, 'node') - - def test_wal_depth_2(self): - """ - -------------------------------------> tli5 - ---------------------------B6--------> tli4 - S2`---------------> tli3 - S1`------------S2---B4-------B5--> tli2 - B1---S1-------------B2--------B3------> tli1 - - B* - backups - S* - switchpoints - wal-depth=2 - - Expected result: - TLI5 will be purged entirely - B6--------> tli4 - S2`---------------> tli3 - S1`------------S2 B4-------B5--> tli2 - B1---S1 B2--------B3------> tli1 - - wal-depth=2 - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_config(backup_dir, 'node', options=['--archive-timeout=60s']) - - node.slow_start() - - # STREAM FULL - stream_id = self.backup_node( - backup_dir, 'node', node, options=['--stream']) - - node.stop() - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # FULL - B1 = self.backup_node(backup_dir, 'node', node) - node.pgbench_init(scale=1) - - target_xid = node.safe_psql( - "postgres", - "select txid_current()").decode('utf-8').rstrip() - node.pgbench_init(scale=5) - - # B2 FULL on TLI1 - B2 = self.backup_node(backup_dir, 'node', node) - node.pgbench_init(scale=4) - self.backup_node(backup_dir, 'node', node) - node.pgbench_init(scale=4) - - # TLI 2 - node_tli2 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_tli2')) - node_tli2.cleanup() - - output = self.restore_node( - backup_dir, 'node', node_tli2, - options=[ - '--recovery-target-xid={0}'.format(target_xid), - '--recovery-target-timeline=1', - '--recovery-target-action=promote']) - - self.assertIn( - 'INFO: Restore of backup {0} completed'.format(B1), - output) - - self.set_auto_conf(node_tli2, options={'port': node_tli2.port}) - node_tli2.slow_start() - node_tli2.pgbench_init(scale=4) - - target_xid = node_tli2.safe_psql( - "postgres", - "select txid_current()").decode('utf-8').rstrip() - node_tli2.pgbench_init(scale=1) - - B4 = self.backup_node( - backup_dir, 'node', node_tli2, data_dir=node_tli2.data_dir) - node_tli2.pgbench_init(scale=3) - - self.backup_node( - backup_dir, 'node', node_tli2, data_dir=node_tli2.data_dir) - node_tli2.pgbench_init(scale=1) - node_tli2.cleanup() - - # TLI3 - node_tli3 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_tli3')) - node_tli3.cleanup() - - # Note, that successful validation here is a happy coincidence - output = self.restore_node( - backup_dir, 'node', node_tli3, - options=[ - '--recovery-target-xid={0}'.format(target_xid), - '--recovery-target-timeline=2', - '--recovery-target-action=promote']) - - self.assertIn( - 'INFO: Restore of backup {0} completed'.format(B1), - output) - self.set_auto_conf(node_tli3, options={'port': node_tli3.port}) - node_tli3.slow_start() - node_tli3.pgbench_init(scale=5) - node_tli3.cleanup() - - # TLI4 - node_tli4 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_tli4')) - node_tli4.cleanup() - - self.restore_node( - backup_dir, 'node', node_tli4, backup_id=stream_id, - options=[ - '--recovery-target=immediate', - '--recovery-target-action=promote']) - - self.set_auto_conf(node_tli4, options={'port': node_tli4.port}) - self.set_archiving(backup_dir, 'node', node_tli4) - node_tli4.slow_start() - - node_tli4.pgbench_init(scale=5) - - self.backup_node( - backup_dir, 'node', node_tli4, data_dir=node_tli4.data_dir) - node_tli4.pgbench_init(scale=5) - node_tli4.cleanup() - - # TLI5 - node_tli5 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_tli5')) - node_tli5.cleanup() - - self.restore_node( - backup_dir, 'node', node_tli5, backup_id=stream_id, - options=[ - '--recovery-target=immediate', - '--recovery-target-action=promote']) - - self.set_auto_conf(node_tli5, options={'port': node_tli5.port}) - self.set_archiving(backup_dir, 'node', node_tli5) - node_tli5.slow_start() - node_tli5.pgbench_init(scale=10) - - # delete '.history' file of TLI4 - os.remove(os.path.join(backup_dir, 'wal', 'node', '00000004.history')) - # delete '.history' file of TLI5 - os.remove(os.path.join(backup_dir, 'wal', 'node', '00000005.history')) - - output = self.delete_pb( - backup_dir, 'node', - options=[ - '--delete-wal', '--dry-run', - '--wal-depth=2', '--log-level-console=verbose']) - - start_lsn_B2 = self.show_pb(backup_dir, 'node', B2)['start-lsn'] - self.assertIn( - 'On timeline 1 WAL is protected from purge at {0}'.format(start_lsn_B2), - output) - - self.assertIn( - 'LOG: Archive backup {0} to stay consistent protect from ' - 'purge WAL interval between 000000010000000000000004 ' - 'and 000000010000000000000005 on timeline 1'.format(B1), output) - - start_lsn_B4 = self.show_pb(backup_dir, 'node', B4)['start-lsn'] - self.assertIn( - 'On timeline 2 WAL is protected from purge at {0}'.format(start_lsn_B4), - output) - - self.assertIn( - 'LOG: Timeline 3 to stay reachable from timeline 1 protect ' - 'from purge WAL interval between 000000020000000000000006 and ' - '000000020000000000000009 on timeline 2', output) - - self.assertIn( - 'LOG: Timeline 3 to stay reachable from timeline 1 protect ' - 'from purge WAL interval between 000000010000000000000004 and ' - '000000010000000000000006 on timeline 1', output) - - show_tli1_before = self.show_archive(backup_dir, 'node', tli=1) - show_tli2_before = self.show_archive(backup_dir, 'node', tli=2) - show_tli3_before = self.show_archive(backup_dir, 'node', tli=3) - show_tli4_before = self.show_archive(backup_dir, 'node', tli=4) - show_tli5_before = self.show_archive(backup_dir, 'node', tli=5) - - self.assertTrue(show_tli1_before) - self.assertTrue(show_tli2_before) - self.assertTrue(show_tli3_before) - self.assertTrue(show_tli4_before) - self.assertTrue(show_tli5_before) - - sleep(5) - - output = self.delete_pb( - backup_dir, 'node', - options=['--delete-wal', '--wal-depth=2', '--log-level-console=verbose']) - -# print(output) - - show_tli1_after = self.show_archive(backup_dir, 'node', tli=1) - show_tli2_after = self.show_archive(backup_dir, 'node', tli=2) - show_tli3_after = self.show_archive(backup_dir, 'node', tli=3) - show_tli4_after = self.show_archive(backup_dir, 'node', tli=4) - show_tli5_after = self.show_archive(backup_dir, 'node', tli=5) - - self.assertNotEqual(show_tli1_before, show_tli1_after) - self.assertNotEqual(show_tli2_before, show_tli2_after) - self.assertEqual(show_tli3_before, show_tli3_after) - self.assertNotEqual(show_tli4_before, show_tli4_after) - self.assertNotEqual(show_tli5_before, show_tli5_after) - - self.assertEqual( - show_tli4_before['min-segno'], - '000000040000000000000002') - - self.assertEqual( - show_tli4_after['min-segno'], - '000000040000000000000006') - - self.assertFalse(show_tli5_after) - - self.assertTrue(show_tli1_after['lost-segments']) - self.assertTrue(show_tli2_after['lost-segments']) - self.assertFalse(show_tli3_after['lost-segments']) - self.assertFalse(show_tli4_after['lost-segments']) - self.assertFalse(show_tli5_after) - - self.assertEqual(len(show_tli1_after['lost-segments']), 1) - self.assertEqual(len(show_tli2_after['lost-segments']), 1) - - self.assertEqual( - show_tli1_after['lost-segments'][0]['begin-segno'], - '000000010000000000000007') - - self.assertEqual( - show_tli1_after['lost-segments'][0]['end-segno'], - '00000001000000000000000A') - - self.assertEqual( - show_tli2_after['lost-segments'][0]['begin-segno'], - '00000002000000000000000A') - - self.assertEqual( - show_tli2_after['lost-segments'][0]['end-segno'], - '00000002000000000000000A') - - self.validate_pb(backup_dir, 'node') - - def test_basic_wal_depth(self): - """ - B1---B1----B3-----B4----B5------> tli1 - - Expected result with wal-depth=1: - B1 B1 B3 B4 B5------> tli1 - - wal-depth=1 - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_config(backup_dir, 'node', options=['--archive-timeout=60s']) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # FULL - node.pgbench_init(scale=1) - B1 = self.backup_node(backup_dir, 'node', node) - - - # B2 - pgbench = node.pgbench(options=['-T', '10', '-c', '2']) - pgbench.wait() - B2 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # B3 - pgbench = node.pgbench(options=['-T', '10', '-c', '2']) - pgbench.wait() - B3 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # B4 - pgbench = node.pgbench(options=['-T', '10', '-c', '2']) - pgbench.wait() - B4 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # B5 - pgbench = node.pgbench(options=['-T', '10', '-c', '2']) - pgbench.wait() - B5 = self.backup_node( - backup_dir, 'node', node, backup_type='page', - options=['--wal-depth=1', '--delete-wal']) - - pgbench = node.pgbench(options=['-T', '10', '-c', '2']) - pgbench.wait() - - target_xid = node.safe_psql( - "postgres", - "select txid_current()").decode('utf-8').rstrip() - - self.switch_wal_segment(node) - - pgbench = node.pgbench(options=['-T', '10', '-c', '2']) - pgbench.wait() - - tli1 = self.show_archive(backup_dir, 'node', tli=1) - - # check that there are 4 lost_segments intervals - self.assertEqual(len(tli1['lost-segments']), 4) - - output = self.validate_pb( - backup_dir, 'node', B5, - options=['--recovery-target-xid={0}'.format(target_xid)]) - - print(output) - - self.assertIn( - 'INFO: Backup validation completed successfully on time', - output) - - self.assertIn( - 'xid {0} and LSN'.format(target_xid), - output) - - for backup_id in [B1, B2, B3, B4]: - try: - self.validate_pb( - backup_dir, 'node', backup_id, - options=['--recovery-target-xid={0}'.format(target_xid)]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because page backup should not be possible " - "without valid full backup.\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: Not enough WAL records to xid {0}".format(target_xid), - e.message) - - self.validate_pb(backup_dir, 'node') - - def test_concurrent_running_full_backup(self): - """ - https://github.com/postgrespro/pg_probackup/issues/328 - """ - self._check_gdb_flag_or_skip_test() - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # FULL - self.backup_node(backup_dir, 'node', node) - - gdb = self.backup_node(backup_dir, 'node', node, gdb=True) - gdb.set_breakpoint('backup_data_file') - gdb.run_until_break() - gdb.kill() - - self.assertTrue( - self.show_pb(backup_dir, 'node')[0]['status'], - 'RUNNING') - - self.backup_node( - backup_dir, 'node', node, backup_type='delta', - options=['--retention-redundancy=2', '--delete-expired']) - - self.assertTrue( - self.show_pb(backup_dir, 'node')[1]['status'], - 'RUNNING') - - self.backup_node(backup_dir, 'node', node) - - gdb = self.backup_node(backup_dir, 'node', node, gdb=True) - gdb.set_breakpoint('backup_data_file') - gdb.run_until_break() - gdb.kill() - - gdb = self.backup_node(backup_dir, 'node', node, gdb=True) - gdb.set_breakpoint('backup_data_file') - gdb.run_until_break() - gdb.kill() - - self.backup_node(backup_dir, 'node', node) - - gdb = self.backup_node(backup_dir, 'node', node, gdb=True) - gdb.set_breakpoint('backup_data_file') - gdb.run_until_break() - gdb.kill() - - self.backup_node( - backup_dir, 'node', node, backup_type='delta', - options=['--retention-redundancy=2', '--delete-expired'], - return_id=False) - - self.assertTrue( - self.show_pb(backup_dir, 'node')[0]['status'], - 'OK') - - self.assertTrue( - self.show_pb(backup_dir, 'node')[1]['status'], - 'RUNNING') - - self.assertTrue( - self.show_pb(backup_dir, 'node')[2]['status'], - 'OK') - - self.assertEqual( - len(self.show_pb(backup_dir, 'node')), - 6) diff --git a/tests/set_backup_test.py b/tests/set_backup_test.py deleted file mode 100644 index e789d174a..000000000 --- a/tests/set_backup_test.py +++ /dev/null @@ -1,476 +0,0 @@ -import unittest -import subprocess -import os -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException -from sys import exit -from datetime import datetime, timedelta - - -class SetBackupTest(ProbackupTest, unittest.TestCase): - - # @unittest.expectedFailure - # @unittest.skip("skip") - def test_set_backup_sanity(self): - """general sanity for set-backup command""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - backup_id = self.backup_node( - backup_dir, 'node', node, options=['--stream']) - - recovery_time = self.show_pb( - backup_dir, 'node', backup_id=backup_id)['recovery-time'] - - expire_time_1 = "{:%Y-%m-%d %H:%M:%S}".format( - datetime.now() + timedelta(days=5)) - - try: - self.set_backup(backup_dir, False, options=['--ttl=30d']) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because of missing instance. " - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: required parameter not specified: --instance', - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - try: - self.set_backup( - backup_dir, 'node', - options=[ - "--ttl=30d", - "--expire-time='{0}'".format(expire_time_1)]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because options cannot be mixed. " - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: You cannot specify '--expire-time' " - "and '--ttl' options together", - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - try: - self.set_backup(backup_dir, 'node', options=["--ttl=30d"]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because of missing backup_id. " - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: You must specify parameter (-i, --backup-id) " - "for 'set-backup' command", - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - self.set_backup( - backup_dir, 'node', backup_id, options=["--ttl=30d"]) - - actual_expire_time = self.show_pb( - backup_dir, 'node', backup_id=backup_id)['expire-time'] - - self.assertNotEqual(expire_time_1, actual_expire_time) - - expire_time_2 = "{:%Y-%m-%d %H:%M:%S}".format( - datetime.now() + timedelta(days=6)) - - self.set_backup( - backup_dir, 'node', backup_id, - options=["--expire-time={0}".format(expire_time_2)]) - - actual_expire_time = self.show_pb( - backup_dir, 'node', backup_id=backup_id)['expire-time'] - - self.assertIn(expire_time_2, actual_expire_time) - - # unpin backup - self.set_backup( - backup_dir, 'node', backup_id, options=["--ttl=0"]) - - attr_list = self.show_pb( - backup_dir, 'node', backup_id=backup_id) - - self.assertNotIn('expire-time', attr_list) - - self.set_backup( - backup_dir, 'node', backup_id, options=["--expire-time={0}".format(recovery_time)]) - - # parse string to datetime object - #new_expire_time = datetime.strptime(new_expire_time, '%Y-%m-%d %H:%M:%S%z') - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_retention_redundancy_pinning(self): - """""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - with open(os.path.join( - backup_dir, 'backups', 'node', - "pg_probackup.conf"), "a") as conf: - conf.write("retention-redundancy = 1\n") - - self.set_config( - backup_dir, 'node', options=['--retention-redundancy=1']) - - # Make backups to be purged - full_id = self.backup_node(backup_dir, 'node', node) - page_id = self.backup_node( - backup_dir, 'node', node, backup_type="page") - # Make backups to be keeped - self.backup_node(backup_dir, 'node', node) - self.backup_node(backup_dir, 'node', node, backup_type="page") - - self.assertEqual(len(self.show_pb(backup_dir, 'node')), 4) - - self.set_backup( - backup_dir, 'node', page_id, options=['--ttl=5d']) - - # Purge backups - log = self.delete_expired( - backup_dir, 'node', - options=['--delete-expired', '--log-level-console=LOG']) - self.assertEqual(len(self.show_pb(backup_dir, 'node')), 4) - - self.assertIn('Time Window: 0d/5d', log) - self.assertIn( - 'LOG: Backup {0} is pinned until'.format(page_id), - log) - self.assertIn( - 'LOG: Retain backup {0} because his descendant ' - '{1} is guarded by retention'.format(full_id, page_id), - log) - - # @unittest.skip("skip") - def test_retention_window_pinning(self): - """purge all backups using window-based retention policy""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # take FULL BACKUP - backup_id_1 = self.backup_node(backup_dir, 'node', node) - page1 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # Take second FULL BACKUP - backup_id_2 = self.backup_node(backup_dir, 'node', node) - page2 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # Take third FULL BACKUP - backup_id_3 = self.backup_node(backup_dir, 'node', node) - page2 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - backups = os.path.join(backup_dir, 'backups', 'node') - for backup in os.listdir(backups): - if backup == 'pg_probackup.conf': - continue - with open( - os.path.join( - backups, backup, "backup.control"), "a") as conf: - conf.write("recovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format( - datetime.now() - timedelta(days=3))) - - self.set_backup( - backup_dir, 'node', page1, options=['--ttl=30d']) - - # Purge backups - out = self.delete_expired( - backup_dir, 'node', - options=[ - '--log-level-console=LOG', - '--retention-window=1', - '--delete-expired']) - - self.assertEqual(len(self.show_pb(backup_dir, 'node')), 2) - - self.assertIn( - 'LOG: Backup {0} is pinned until'.format(page1), out) - - self.assertIn( - 'LOG: Retain backup {0} because his descendant ' - '{1} is guarded by retention'.format(backup_id_1, page1), - out) - - # @unittest.skip("skip") - def test_wal_retention_and_pinning(self): - """ - B1---B2---P---B3---> - wal-depth=2 - P - pinned backup - - expected result after WAL purge: - B1 B2---P---B3---> - - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # take FULL BACKUP - self.backup_node( - backup_dir, 'node', node, options=['--stream']) - - node.pgbench_init(scale=1) - - # Take PAGE BACKUP - self.backup_node( - backup_dir, 'node', node, - backup_type='page', options=['--stream']) - - node.pgbench_init(scale=1) - - # Take DELTA BACKUP and pin it - expire_time = "{:%Y-%m-%d %H:%M:%S}".format( - datetime.now() + timedelta(days=6)) - backup_id_pinned = self.backup_node( - backup_dir, 'node', node, - backup_type='delta', - options=[ - '--stream', - '--expire-time={0}'.format(expire_time)]) - - node.pgbench_init(scale=1) - - # Take second PAGE BACKUP - self.backup_node( - backup_dir, 'node', node, backup_type='delta', options=['--stream']) - - node.pgbench_init(scale=1) - - # Purge backups - out = self.delete_expired( - backup_dir, 'node', - options=[ - '--log-level-console=LOG', - '--delete-wal', '--wal-depth=2']) - - # print(out) - self.assertIn( - 'Pinned backup {0} is ignored for the ' - 'purpose of WAL retention'.format(backup_id_pinned), - out) - - for instance in self.show_archive(backup_dir): - timelines = instance['timelines'] - - # sanity - for timeline in timelines: - self.assertEqual( - timeline['min-segno'], - '000000010000000000000004') - self.assertEqual(timeline['status'], 'OK') - - # @unittest.skip("skip") - def test_wal_retention_and_pinning_1(self): - """ - P---B1---> - wal-depth=2 - P - pinned backup - - expected result after WAL purge: - P---B1---> - - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - expire_time = "{:%Y-%m-%d %H:%M:%S}".format( - datetime.now() + timedelta(days=6)) - - # take FULL BACKUP - backup_id_pinned = self.backup_node( - backup_dir, 'node', node, - options=['--expire-time={0}'.format(expire_time)]) - - node.pgbench_init(scale=2) - - # Take second PAGE BACKUP - self.backup_node( - backup_dir, 'node', node, backup_type='delta') - - node.pgbench_init(scale=2) - - # Purge backups - out = self.delete_expired( - backup_dir, 'node', - options=[ - '--log-level-console=verbose', - '--delete-wal', '--wal-depth=2']) - - print(out) - self.assertIn( - 'Pinned backup {0} is ignored for the ' - 'purpose of WAL retention'.format(backup_id_pinned), - out) - - for instance in self.show_archive(backup_dir): - timelines = instance['timelines'] - - # sanity - for timeline in timelines: - self.assertEqual( - timeline['min-segno'], - '000000010000000000000002') - self.assertEqual(timeline['status'], 'OK') - - self.validate_pb(backup_dir) - - # @unittest.skip("skip") - def test_add_note_newlines(self): - """""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - # FULL - backup_id = self.backup_node( - backup_dir, 'node', node, - options=['--stream', '--note={0}'.format('hello\nhello')]) - - backup_meta = self.show_pb(backup_dir, 'node', backup_id) - self.assertEqual(backup_meta['note'], "hello") - - self.set_backup(backup_dir, 'node', backup_id, options=['--note=hello\nhello']) - - backup_meta = self.show_pb(backup_dir, 'node', backup_id) - self.assertEqual(backup_meta['note'], "hello") - - self.set_backup(backup_dir, 'node', backup_id, options=['--note=none']) - - backup_meta = self.show_pb(backup_dir, 'node', backup_id) - self.assertNotIn('note', backup_meta) - - # @unittest.skip("skip") - def test_add_big_note(self): - """""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - -# note = node.safe_psql( -# "postgres", -# "SELECT repeat('hello', 400)").rstrip() # TODO: investigate - - note = node.safe_psql( - "postgres", - "SELECT repeat('hello', 210)").rstrip() - - # FULL - try: - self.backup_node( - backup_dir, 'node', node, - options=['--stream', '--note={0}'.format(note)]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because note is too large " - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: Backup note cannot exceed 1024 bytes", - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - note = node.safe_psql( - "postgres", - "SELECT repeat('hello', 200)").decode('utf-8').rstrip() - - backup_id = self.backup_node( - backup_dir, 'node', node, - options=['--stream', '--note={0}'.format(note)]) - - backup_meta = self.show_pb(backup_dir, 'node', backup_id) - self.assertEqual(backup_meta['note'], note) - - - # @unittest.skip("skip") - def test_add_big_note_1(self): - """""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - note = node.safe_psql( - "postgres", - "SELECT repeat('q', 1024)").decode('utf-8').rstrip() - - # FULL - backup_id = self.backup_node(backup_dir, 'node', node, options=['--stream']) - - self.set_backup( - backup_dir, 'node', backup_id, - options=['--note={0}'.format(note)]) - - backup_meta = self.show_pb(backup_dir, 'node', backup_id) - - print(backup_meta) - self.assertEqual(backup_meta['note'], note) diff --git a/tests/show_test.py b/tests/show_test.py deleted file mode 100644 index c4b96499d..000000000 --- a/tests/show_test.py +++ /dev/null @@ -1,509 +0,0 @@ -import os -import unittest -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException - - -class ShowTest(ProbackupTest, unittest.TestCase): - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_show_1(self): - """Status DONE and OK""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - self.assertEqual( - self.backup_node( - backup_dir, 'node', node, - options=["--log-level-console=off"]), - None - ) - self.assertIn("OK", self.show_pb(backup_dir, 'node', as_text=True)) - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_show_json(self): - """Status DONE and OK""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - self.assertEqual( - self.backup_node( - backup_dir, 'node', node, - options=["--log-level-console=off"]), - None - ) - self.backup_node(backup_dir, 'node', node) - self.assertIn("OK", self.show_pb(backup_dir, 'node', as_text=True)) - - # @unittest.skip("skip") - def test_corrupt_2(self): - """Status CORRUPT""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - backup_id = self.backup_node(backup_dir, 'node', node) - - # delete file which belong to backup - file = os.path.join( - backup_dir, "backups", "node", - backup_id, "database", "postgresql.conf") - os.remove(file) - - try: - self.validate_pb(backup_dir, 'node', backup_id) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because backup corrupted." - " Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd - ) - ) - except ProbackupException as e: - self.assertIn( - 'data files are corrupted', - e.message, - '\n Unexpected Error Message: {0}\n' - ' CMD: {1}'.format(repr(e.message), self.cmd) - ) - self.assertIn("CORRUPT", self.show_pb(backup_dir, as_text=True)) - - # @unittest.skip("skip") - def test_no_control_file(self): - """backup.control doesn't exist""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - backup_id = self.backup_node(backup_dir, 'node', node) - - # delete backup.control file - file = os.path.join( - backup_dir, "backups", "node", - backup_id, "backup.control") - os.remove(file) - - output = self.show_pb(backup_dir, 'node', as_text=True, as_json=False) - - self.assertIn( - 'Control file', - output) - - self.assertIn( - 'doesn\'t exist', - output) - - # @unittest.skip("skip") - def test_empty_control_file(self): - """backup.control is empty""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - backup_id = self.backup_node(backup_dir, 'node', node) - - # truncate backup.control file - file = os.path.join( - backup_dir, "backups", "node", - backup_id, "backup.control") - fd = open(file, 'w') - fd.close() - - output = self.show_pb(backup_dir, 'node', as_text=True, as_json=False) - - self.assertIn( - 'Control file', - output) - - self.assertIn( - 'is empty', - output) - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_corrupt_control_file(self): - """backup.control contains invalid option""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - backup_id = self.backup_node(backup_dir, 'node', node) - - # corrupt backup.control file - file = os.path.join( - backup_dir, "backups", "node", - backup_id, "backup.control") - fd = open(file, 'a') - fd.write("statuss = OK") - fd.close() - - self.assertIn( - 'WARNING: Invalid option "statuss" in file', - self.show_pb(backup_dir, 'node', as_json=False, as_text=True)) - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_corrupt_correctness(self): - """backup.control contains invalid option""" - if not self.remote: - self.skipTest("You must enable PGPROBACKUP_SSH_REMOTE" - " for run this test") - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.pgbench_init(scale=1) - - # FULL - backup_local_id = self.backup_node( - backup_dir, 'node', node, no_remote=True) - - output_local = self.show_pb( - backup_dir, 'node', as_json=False, backup_id=backup_local_id) - - backup_remote_id = self.backup_node(backup_dir, 'node', node) - - output_remote = self.show_pb( - backup_dir, 'node', as_json=False, backup_id=backup_remote_id) - - # check correctness - self.assertEqual( - output_local['data-bytes'], - output_remote['data-bytes']) - - self.assertEqual( - output_local['uncompressed-bytes'], - output_remote['uncompressed-bytes']) - - # DELTA - backup_local_id = self.backup_node( - backup_dir, 'node', node, - backup_type='delta', no_remote=True) - - output_local = self.show_pb( - backup_dir, 'node', as_json=False, backup_id=backup_local_id) - self.delete_pb(backup_dir, 'node', backup_local_id) - - backup_remote_id = self.backup_node( - backup_dir, 'node', node, backup_type='delta') - - output_remote = self.show_pb( - backup_dir, 'node', as_json=False, backup_id=backup_remote_id) - self.delete_pb(backup_dir, 'node', backup_remote_id) - - # check correctness - self.assertEqual( - output_local['data-bytes'], - output_remote['data-bytes']) - - self.assertEqual( - output_local['uncompressed-bytes'], - output_remote['uncompressed-bytes']) - - # PAGE - backup_local_id = self.backup_node( - backup_dir, 'node', node, - backup_type='page', no_remote=True) - - output_local = self.show_pb( - backup_dir, 'node', as_json=False, backup_id=backup_local_id) - self.delete_pb(backup_dir, 'node', backup_local_id) - - backup_remote_id = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - output_remote = self.show_pb( - backup_dir, 'node', as_json=False, backup_id=backup_remote_id) - self.delete_pb(backup_dir, 'node', backup_remote_id) - - # check correctness - self.assertEqual( - output_local['data-bytes'], - output_remote['data-bytes']) - - self.assertEqual( - output_local['uncompressed-bytes'], - output_remote['uncompressed-bytes']) - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_corrupt_correctness_1(self): - """backup.control contains invalid option""" - if not self.remote: - self.skipTest("You must enable PGPROBACKUP_SSH_REMOTE" - " for run this test") - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.pgbench_init(scale=1) - - # FULL - backup_local_id = self.backup_node( - backup_dir, 'node', node, no_remote=True) - - output_local = self.show_pb( - backup_dir, 'node', as_json=False, backup_id=backup_local_id) - - backup_remote_id = self.backup_node(backup_dir, 'node', node) - - output_remote = self.show_pb( - backup_dir, 'node', as_json=False, backup_id=backup_remote_id) - - # check correctness - self.assertEqual( - output_local['data-bytes'], - output_remote['data-bytes']) - - self.assertEqual( - output_local['uncompressed-bytes'], - output_remote['uncompressed-bytes']) - - # change data - pgbench = node.pgbench(options=['-T', '10', '--no-vacuum']) - pgbench.wait() - - # DELTA - backup_local_id = self.backup_node( - backup_dir, 'node', node, - backup_type='delta', no_remote=True) - - output_local = self.show_pb( - backup_dir, 'node', as_json=False, backup_id=backup_local_id) - self.delete_pb(backup_dir, 'node', backup_local_id) - - backup_remote_id = self.backup_node( - backup_dir, 'node', node, backup_type='delta') - - output_remote = self.show_pb( - backup_dir, 'node', as_json=False, backup_id=backup_remote_id) - self.delete_pb(backup_dir, 'node', backup_remote_id) - - # check correctness - self.assertEqual( - output_local['data-bytes'], - output_remote['data-bytes']) - - self.assertEqual( - output_local['uncompressed-bytes'], - output_remote['uncompressed-bytes']) - - # PAGE - backup_local_id = self.backup_node( - backup_dir, 'node', node, - backup_type='page', no_remote=True) - - output_local = self.show_pb( - backup_dir, 'node', as_json=False, backup_id=backup_local_id) - self.delete_pb(backup_dir, 'node', backup_local_id) - - backup_remote_id = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - output_remote = self.show_pb( - backup_dir, 'node', as_json=False, backup_id=backup_remote_id) - self.delete_pb(backup_dir, 'node', backup_remote_id) - - # check correctness - self.assertEqual( - output_local['data-bytes'], - output_remote['data-bytes']) - - self.assertEqual( - output_local['uncompressed-bytes'], - output_remote['uncompressed-bytes']) - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_corrupt_correctness_2(self): - """backup.control contains invalid option""" - if not self.remote: - self.skipTest("You must enable PGPROBACKUP_SSH_REMOTE" - " for run this test") - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.pgbench_init(scale=1) - - # FULL - backup_local_id = self.backup_node( - backup_dir, 'node', node, - options=['--compress'], no_remote=True) - - output_local = self.show_pb( - backup_dir, 'node', as_json=False, backup_id=backup_local_id) - - if self.remote: - backup_remote_id = self.backup_node( - backup_dir, 'node', node, options=['--compress']) - else: - backup_remote_id = self.backup_node( - backup_dir, 'node', node, - options=['--remote-proto=ssh', '--remote-host=localhost', '--compress']) - - output_remote = self.show_pb( - backup_dir, 'node', as_json=False, backup_id=backup_remote_id) - - # check correctness - self.assertEqual( - output_local['data-bytes'], - output_remote['data-bytes']) - - self.assertEqual( - output_local['uncompressed-bytes'], - output_remote['uncompressed-bytes']) - - # change data - pgbench = node.pgbench(options=['-T', '10', '--no-vacuum']) - pgbench.wait() - - # DELTA - backup_local_id = self.backup_node( - backup_dir, 'node', node, - backup_type='delta', options=['--compress'], no_remote=True) - - output_local = self.show_pb( - backup_dir, 'node', as_json=False, backup_id=backup_local_id) - self.delete_pb(backup_dir, 'node', backup_local_id) - - if self.remote: - backup_remote_id = self.backup_node( - backup_dir, 'node', node, backup_type='delta', options=['--compress']) - else: - backup_remote_id = self.backup_node( - backup_dir, 'node', node, backup_type='delta', - options=['--remote-proto=ssh', '--remote-host=localhost', '--compress']) - - output_remote = self.show_pb( - backup_dir, 'node', as_json=False, backup_id=backup_remote_id) - self.delete_pb(backup_dir, 'node', backup_remote_id) - - # check correctness - self.assertEqual( - output_local['data-bytes'], - output_remote['data-bytes']) - - self.assertEqual( - output_local['uncompressed-bytes'], - output_remote['uncompressed-bytes']) - - # PAGE - backup_local_id = self.backup_node( - backup_dir, 'node', node, - backup_type='page', options=['--compress'], no_remote=True) - - output_local = self.show_pb( - backup_dir, 'node', as_json=False, backup_id=backup_local_id) - self.delete_pb(backup_dir, 'node', backup_local_id) - - if self.remote: - backup_remote_id = self.backup_node( - backup_dir, 'node', node, backup_type='page', options=['--compress']) - else: - backup_remote_id = self.backup_node( - backup_dir, 'node', node, backup_type='page', - options=['--remote-proto=ssh', '--remote-host=localhost', '--compress']) - - output_remote = self.show_pb( - backup_dir, 'node', as_json=False, backup_id=backup_remote_id) - self.delete_pb(backup_dir, 'node', backup_remote_id) - - # check correctness - self.assertEqual( - output_local['data-bytes'], - output_remote['data-bytes']) - - self.assertEqual( - output_local['uncompressed-bytes'], - output_remote['uncompressed-bytes']) - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_color_with_no_terminal(self): - """backup.control contains invalid option""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums'], - pg_options={'autovacuum': 'off'}) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - node.pgbench_init(scale=1) - - # FULL - try: - self.backup_node( - backup_dir, 'node', node, options=['--archive-timeout=1s']) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because archiving is disabled\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertNotIn( - '[0m', e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) diff --git a/tests/time_consuming_test.py b/tests/time_consuming_test.py deleted file mode 100644 index c0038c085..000000000 --- a/tests/time_consuming_test.py +++ /dev/null @@ -1,77 +0,0 @@ -import os -import unittest -from .helpers.ptrack_helpers import ProbackupTest -import subprocess -from time import sleep - - -class TimeConsumingTests(ProbackupTest, unittest.TestCase): - def test_pbckp150(self): - """ - https://jira.postgrespro.ru/browse/PBCKP-150 - create a node filled with pgbench - create FULL backup followed by PTRACK backup - run pgbench, vacuum VERBOSE FULL and ptrack backups in parallel - """ - # init node - if self.pg_config_version < self.version_to_num('11.0'): - self.skipTest('You need PostgreSQL >= 11 for this test') - if not self.ptrack: - self.skipTest('Skipped because ptrack support is disabled') - - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=self.ptrack, - initdb_params=['--data-checksums'], - pg_options={ - 'max_connections': 100, - 'log_statement': 'none', - 'log_checkpoints': 'on', - 'autovacuum': 'off', - 'ptrack.map_size': 1}) - - if node.major_version >= 13: - self.set_auto_conf(node, {'wal_keep_size': '16000MB'}) - else: - self.set_auto_conf(node, {'wal_keep_segments': '1000'}) - - # init probackup and add an instance - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - - # run the node and init ptrack - node.slow_start() - node.safe_psql("postgres", "CREATE EXTENSION ptrack") - # populate it with pgbench - node.pgbench_init(scale=5) - - # FULL backup followed by PTRACK backup - self.backup_node(backup_dir, 'node', node, options=['--stream']) - self.backup_node(backup_dir, 'node', node, backup_type='ptrack', options=['--stream']) - - # run ordinary pgbench scenario to imitate some activity and another pgbench for vacuuming in parallel - nBenchDuration = 30 - pgbench = node.pgbench(options=['-c', '20', '-j', '8', '-T', str(nBenchDuration)]) - with open('/tmp/pbckp150vacuum.sql', 'w') as f: - f.write('VACUUM (FULL) pgbench_accounts, pgbench_tellers, pgbench_history; SELECT pg_sleep(1);\n') - pgbenchval = node.pgbench(options=['-c', '1', '-f', '/tmp/pbckp150vacuum.sql', '-T', str(nBenchDuration)]) - - # several PTRACK backups - for i in range(nBenchDuration): - print("[{}] backing up PTRACK diff...".format(i+1)) - self.backup_node(backup_dir, 'node', node, backup_type='ptrack', options=['--stream', '--log-level-console', 'VERBOSE']) - sleep(0.1) - # if the activity pgbench has finished, stop backing up - if pgbench.poll() is not None: - break - - pgbench.kill() - pgbenchval.kill() - pgbench.wait() - pgbenchval.wait() - - backups = self.show_pb(backup_dir, 'node') - for b in backups: - self.assertEqual("OK", b['status']) diff --git a/tests/time_stamp_test.py b/tests/time_stamp_test.py deleted file mode 100644 index 170c62cd4..000000000 --- a/tests/time_stamp_test.py +++ /dev/null @@ -1,236 +0,0 @@ -import os -import unittest -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException -import subprocess -from time import sleep - - -class TimeStamp(ProbackupTest, unittest.TestCase): - - def test_start_time_format(self): - """Test backup ID changing after start-time editing in backup.control. - We should convert local time in UTC format""" - # Create simple node - node = self.make_simple_node( - base_dir="{0}/{1}/node".format(self.module_name, self.fname), - set_replication=True, - initdb_params=['--data-checksums']) - - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.start() - - backup_id = self.backup_node(backup_dir, 'node', node, options=['--stream', '-j 2']) - show_backup = self.show_pb(backup_dir, 'node') - - i = 0 - while i < 2: - with open(os.path.join(backup_dir, "backups", "node", backup_id, "backup.control"), "r+") as f: - output = "" - for line in f: - if line.startswith('start-time') is True: - if i == 0: - output = output + str(line[:-5])+'+00\''+'\n' - else: - output = output + str(line[:-5]) + '\'' + '\n' - else: - output = output + str(line) - f.close() - - with open(os.path.join(backup_dir, "backups", "node", backup_id, "backup.control"), "w") as fw: - fw.write(output) - fw.flush() - show_backup = show_backup + self.show_pb(backup_dir, 'node') - i += 1 - - print(show_backup[1]['id']) - print(show_backup[2]['id']) - - self.assertTrue(show_backup[1]['id'] == show_backup[2]['id'], "ERROR: Localtime format using instead of UTC") - - output = self.show_pb(backup_dir, as_json=False, as_text=True) - self.assertNotIn("backup ID in control file", output) - - node.stop() - - def test_server_date_style(self): - """Issue #112""" - node = self.make_simple_node( - base_dir="{0}/{1}/node".format(self.module_name, self.fname), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={"datestyle": "GERMAN, DMY"}) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.start() - - self.backup_node( - backup_dir, 'node', node, options=['--stream', '-j 2']) - - def test_handling_of_TZ_env_variable(self): - """Issue #284""" - node = self.make_simple_node( - base_dir="{0}/{1}/node".format(self.module_name, self.fname), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.start() - - my_env = os.environ.copy() - my_env["TZ"] = "America/Detroit" - - self.backup_node( - backup_dir, 'node', node, options=['--stream', '-j 2'], env=my_env) - - output = self.show_pb(backup_dir, 'node', as_json=False, as_text=True, env=my_env) - - self.assertNotIn("backup ID in control file", output) - - @unittest.skip("skip") - # @unittest.expectedFailure - def test_dst_timezone_handling(self): - """for manual testing""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - print(subprocess.Popen( - ['sudo', 'timedatectl', 'set-timezone', 'America/Detroit'], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE).communicate()) - - subprocess.Popen( - ['sudo', 'timedatectl', 'set-ntp', 'false'], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE).communicate() - - subprocess.Popen( - ['sudo', 'timedatectl', 'set-time', '2020-05-25 12:00:00'], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE).communicate() - - # FULL - output = self.backup_node(backup_dir, 'node', node, return_id=False) - self.assertNotIn("backup ID in control file", output) - - # move to dst - subprocess.Popen( - ['sudo', 'timedatectl', 'set-time', '2020-10-25 12:00:00'], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE).communicate() - - # DELTA - output = self.backup_node( - backup_dir, 'node', node, backup_type='delta', return_id=False) - self.assertNotIn("backup ID in control file", output) - - subprocess.Popen( - ['sudo', 'timedatectl', 'set-time', '2020-12-01 12:00:00'], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE).communicate() - - # DELTA - self.backup_node(backup_dir, 'node', node, backup_type='delta') - - output = self.show_pb(backup_dir, as_json=False, as_text=True) - self.assertNotIn("backup ID in control file", output) - - subprocess.Popen( - ['sudo', 'timedatectl', 'set-ntp', 'true'], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE).communicate() - - sleep(10) - - self.backup_node(backup_dir, 'node', node, backup_type='delta') - - output = self.show_pb(backup_dir, as_json=False, as_text=True) - self.assertNotIn("backup ID in control file", output) - - subprocess.Popen( - ['sudo', 'timedatectl', 'set-timezone', 'US/Moscow'], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE).communicate() - - @unittest.skip("skip") - def test_dst_timezone_handling_backward_compatibilty(self): - """for manual testing""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - subprocess.Popen( - ['sudo', 'timedatectl', 'set-timezone', 'America/Detroit'], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE).communicate() - - subprocess.Popen( - ['sudo', 'timedatectl', 'set-ntp', 'false'], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE).communicate() - - subprocess.Popen( - ['sudo', 'timedatectl', 'set-time', '2020-05-25 12:00:00'], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE).communicate() - - # FULL - self.backup_node(backup_dir, 'node', node, old_binary=True, return_id=False) - - # move to dst - subprocess.Popen( - ['sudo', 'timedatectl', 'set-time', '2020-10-25 12:00:00'], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE).communicate() - - # DELTA - output = self.backup_node( - backup_dir, 'node', node, backup_type='delta', old_binary=True, return_id=False) - - subprocess.Popen( - ['sudo', 'timedatectl', 'set-time', '2020-12-01 12:00:00'], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE).communicate() - - # DELTA - self.backup_node(backup_dir, 'node', node, backup_type='delta') - - output = self.show_pb(backup_dir, as_json=False, as_text=True) - self.assertNotIn("backup ID in control file", output) - - subprocess.Popen( - ['sudo', 'timedatectl', 'set-ntp', 'true'], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE).communicate() - - sleep(10) - - self.backup_node(backup_dir, 'node', node, backup_type='delta') - - output = self.show_pb(backup_dir, as_json=False, as_text=True) - self.assertNotIn("backup ID in control file", output) - - subprocess.Popen( - ['sudo', 'timedatectl', 'set-timezone', 'US/Moscow'], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE).communicate() diff --git a/tests/validate_test.py b/tests/validate_test.py deleted file mode 100644 index 98a0fd13f..000000000 --- a/tests/validate_test.py +++ /dev/null @@ -1,4083 +0,0 @@ -import os -import unittest -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException -from datetime import datetime, timedelta -from pathlib import Path -import subprocess -from sys import exit -import time -import hashlib - - -class ValidateTest(ProbackupTest, unittest.TestCase): - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_basic_validate_nullified_heap_page_backup(self): - """ - make node with nullified heap block - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.pgbench_init(scale=3) - - file_path = node.safe_psql( - "postgres", - "select pg_relation_filepath('pgbench_accounts')").decode('utf-8').rstrip() - - node.safe_psql( - "postgres", - "CHECKPOINT") - - # Nullify some block in PostgreSQL - file = os.path.join(node.data_dir, file_path) - with open(file, 'r+b') as f: - f.seek(8192) - f.write(b"\x00"*8192) - f.flush() - f.close - - self.backup_node( - backup_dir, 'node', node, options=['--log-level-file=verbose']) - - pgdata = self.pgdata_content(node.data_dir) - - if not self.remote: - log_file_path = os.path.join(backup_dir, "log", "pg_probackup.log") - with open(log_file_path) as f: - log_content = f.read() - self.assertIn( - 'File: "{0}" blknum 1, empty page'.format(Path(file).as_posix()), - log_content, - 'Failed to detect nullified block') - - self.validate_pb(backup_dir, options=["-j", "4"]) - node.cleanup() - - self.restore_node(backup_dir, 'node', node) - - pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_validate_wal_unreal_values(self): - """ - make node with archiving, make archive backup - validate to both real and unreal values - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.pgbench_init(scale=3) - with node.connect("postgres") as con: - con.execute("CREATE TABLE tbl0005 (a text)") - con.commit() - - backup_id = self.backup_node(backup_dir, 'node', node) - - node.pgbench_init(scale=3) - - target_time = self.show_pb( - backup_dir, 'node', backup_id)['recovery-time'] - after_backup_time = datetime.now().replace(second=0, microsecond=0) - - # Validate to real time - self.assertIn( - "INFO: Backup validation completed successfully", - self.validate_pb( - backup_dir, 'node', - options=["--time={0}".format(target_time), "-j", "4"]), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) - - # Validate to unreal time - unreal_time_1 = after_backup_time - timedelta(days=2) - try: - self.validate_pb( - backup_dir, 'node', options=["--time={0}".format( - unreal_time_1), "-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because of validation to unreal time.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Backup satisfying target options is not found', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - # Validate to unreal time #2 - unreal_time_2 = after_backup_time + timedelta(days=2) - try: - self.validate_pb( - backup_dir, 'node', - options=["--time={0}".format(unreal_time_2), "-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because of validation to unreal time.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'ERROR: Not enough WAL records to time' in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - # Validate to real xid - target_xid = None - with node.connect("postgres") as con: - res = con.execute( - "INSERT INTO tbl0005 VALUES ('inserted') RETURNING (xmin)") - con.commit() - target_xid = res[0][0] - self.switch_wal_segment(node) - time.sleep(5) - - self.assertIn( - "INFO: Backup validation completed successfully", - self.validate_pb( - backup_dir, 'node', options=["--xid={0}".format(target_xid), - "-j", "4"]), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) - - # Validate to unreal xid - unreal_xid = int(target_xid) + 1000 - try: - self.validate_pb( - backup_dir, 'node', options=["--xid={0}".format(unreal_xid), - "-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because of validation to unreal xid.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'ERROR: Not enough WAL records to xid' in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - # Validate with backup ID - output = self.validate_pb(backup_dir, 'node', backup_id, - options=["-j", "4"]) - self.assertIn( - "INFO: Validating backup {0}".format(backup_id), - output, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) - self.assertIn( - "INFO: Backup {0} data files are valid".format(backup_id), - output, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) - self.assertIn( - "INFO: Backup {0} WAL segments are valid".format(backup_id), - output, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) - self.assertIn( - "INFO: Backup {0} is valid".format(backup_id), - output, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) - self.assertIn( - "INFO: Validate of backup {0} completed".format(backup_id), - output, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) - - # @unittest.skip("skip") - def test_basic_validate_corrupted_intermediate_backup(self): - """ - make archive node, take FULL, PAGE1, PAGE2 backups, - corrupt file in PAGE1 backup, - run validate on PAGE1, expect PAGE1 to gain status CORRUPT - and PAGE2 gain status ORPHAN - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # FULL - backup_id_1 = self.backup_node(backup_dir, 'node', node) - - node.safe_psql( - "postgres", - "create table t_heap as select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,10000) i") - file_path = node.safe_psql( - "postgres", - "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip() - # PAGE1 - backup_id_2 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - node.safe_psql( - "postgres", - "insert into t_heap select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(10000,20000) i") - # PAGE2 - backup_id_3 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # Corrupt some file - file = os.path.join( - backup_dir, 'backups', 'node', - backup_id_2, 'database', file_path) - with open(file, "r+b", 0) as f: - f.seek(42) - f.write(b"blah") - f.flush() - f.close - - # Simple validate - try: - self.validate_pb( - backup_dir, 'node', backup_id=backup_id_2, options=["-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because of data files corruption.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'INFO: Validating parents for backup {0}'.format( - backup_id_2) in e.message and - 'ERROR: Backup {0} is corrupt'.format( - backup_id_2) in e.message and - 'WARNING: Backup {0} data files are corrupted'.format( - backup_id_2) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertEqual( - 'CORRUPT', - self.show_pb(backup_dir, 'node', backup_id_2)['status'], - 'Backup STATUS should be "CORRUPT"') - self.assertEqual( - 'ORPHAN', - self.show_pb(backup_dir, 'node', backup_id_3)['status'], - 'Backup STATUS should be "ORPHAN"') - - # @unittest.skip("skip") - def test_validate_corrupted_intermediate_backups(self): - """ - make archive node, take FULL, PAGE1, PAGE2 backups, - corrupt file in FULL and PAGE1 backups, run validate on PAGE1, - expect FULL and PAGE1 to gain status CORRUPT and - PAGE2 gain status ORPHAN - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - "postgres", - "create table t_heap as select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,10000) i") - file_path_t_heap = node.safe_psql( - "postgres", - "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip() - # FULL - backup_id_1 = self.backup_node(backup_dir, 'node', node) - - node.safe_psql( - "postgres", - "create table t_heap_1 as select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,10000) i") - file_path_t_heap_1 = node.safe_psql( - "postgres", - "select pg_relation_filepath('t_heap_1')").decode('utf-8').rstrip() - # PAGE1 - backup_id_2 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - node.safe_psql( - "postgres", - "insert into t_heap select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(20000,30000) i") - # PAGE2 - backup_id_3 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # Corrupt some file in FULL backup - file_full = os.path.join( - backup_dir, 'backups', 'node', - backup_id_1, 'database', file_path_t_heap) - with open(file_full, "rb+", 0) as f: - f.seek(84) - f.write(b"blah") - f.flush() - f.close - - # Corrupt some file in PAGE1 backup - file_page1 = os.path.join( - backup_dir, 'backups', 'node', - backup_id_2, 'database', file_path_t_heap_1) - with open(file_page1, "rb+", 0) as f: - f.seek(42) - f.write(b"blah") - f.flush() - f.close - - # Validate PAGE1 - try: - self.validate_pb( - backup_dir, 'node', backup_id=backup_id_2, options=["-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because of data files corruption.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'INFO: Validating parents for backup {0}'.format( - backup_id_2) in e.message, - '\n Unexpected Error Message: {0}\n ' - 'CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertTrue( - 'INFO: Validating backup {0}'.format( - backup_id_1) in e.message and - 'WARNING: Invalid CRC of backup file' in e.message and - 'WARNING: Backup {0} data files are corrupted'.format( - backup_id_1) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertTrue( - 'WARNING: Backup {0} is orphaned because his parent'.format( - backup_id_2) in e.message and - 'WARNING: Backup {0} is orphaned because his parent'.format( - backup_id_3) in e.message and - 'ERROR: Backup {0} is orphan.'.format( - backup_id_2) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertEqual( - 'CORRUPT', - self.show_pb(backup_dir, 'node', backup_id_1)['status'], - 'Backup STATUS should be "CORRUPT"') - self.assertEqual( - 'ORPHAN', - self.show_pb(backup_dir, 'node', backup_id_2)['status'], - 'Backup STATUS should be "ORPHAN"') - self.assertEqual( - 'ORPHAN', - self.show_pb(backup_dir, 'node', backup_id_3)['status'], - 'Backup STATUS should be "ORPHAN"') - - # @unittest.skip("skip") - def test_validate_specific_error_intermediate_backups(self): - """ - make archive node, take FULL, PAGE1, PAGE2 backups, - change backup status of FULL and PAGE1 to ERROR, - run validate on PAGE1 - purpose of this test is to be sure that not only - CORRUPT backup descendants can be orphanized - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # FULL - backup_id_1 = self.backup_node(backup_dir, 'node', node) - - # PAGE1 - backup_id_2 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # PAGE2 - backup_id_3 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # Change FULL backup status to ERROR - control_path = os.path.join( - backup_dir, 'backups', 'node', backup_id_1, 'backup.control') - - with open(control_path, 'r') as f: - actual_control = f.read() - - new_control_file = '' - for line in actual_control.splitlines(): - new_control_file += line.replace( - 'status = OK', 'status = ERROR') - new_control_file += '\n' - - with open(control_path, 'wt') as f: - f.write(new_control_file) - f.flush() - f.close() - - # Validate PAGE1 - try: - self.validate_pb( - backup_dir, 'node', backup_id=backup_id_2, options=["-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because backup has status ERROR.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'WARNING: Backup {0} is orphaned because ' - 'his parent {1} has status: ERROR'.format( - backup_id_2, backup_id_1) in e.message and - 'INFO: Validating parents for backup {0}'.format( - backup_id_2) in e.message and - 'WARNING: Backup {0} has status ERROR. Skip validation.'.format( - backup_id_1) and - 'ERROR: Backup {0} is orphan.'.format(backup_id_2) in e.message, - '\n Unexpected Error Message: {0}\n ' - 'CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertEqual( - 'ERROR', - self.show_pb(backup_dir, 'node', backup_id_1)['status'], - 'Backup STATUS should be "ERROR"') - self.assertEqual( - 'ORPHAN', - self.show_pb(backup_dir, 'node', backup_id_2)['status'], - 'Backup STATUS should be "ORPHAN"') - self.assertEqual( - 'ORPHAN', - self.show_pb(backup_dir, 'node', backup_id_3)['status'], - 'Backup STATUS should be "ORPHAN"') - - # @unittest.skip("skip") - def test_validate_error_intermediate_backups(self): - """ - make archive node, take FULL, PAGE1, PAGE2 backups, - change backup status of FULL and PAGE1 to ERROR, - run validate on instance - purpose of this test is to be sure that not only - CORRUPT backup descendants can be orphanized - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # FULL - backup_id_1 = self.backup_node(backup_dir, 'node', node) - - # PAGE1 - backup_id_2 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # PAGE2 - backup_id_3 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # Change FULL backup status to ERROR - control_path = os.path.join( - backup_dir, 'backups', 'node', backup_id_1, 'backup.control') - - with open(control_path, 'r') as f: - actual_control = f.read() - - new_control_file = '' - for line in actual_control.splitlines(): - new_control_file += line.replace( - 'status = OK', 'status = ERROR') - new_control_file += '\n' - - with open(control_path, 'wt') as f: - f.write(new_control_file) - f.flush() - f.close() - - # Validate instance - try: - self.validate_pb(backup_dir, options=["-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because backup has status ERROR.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - "WARNING: Backup {0} is orphaned because " - "his parent {1} has status: ERROR".format( - backup_id_2, backup_id_1) in e.message and - 'WARNING: Backup {0} has status ERROR. Skip validation'.format( - backup_id_1) in e.message and - "WARNING: Some backups are not valid" in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertEqual( - 'ERROR', - self.show_pb(backup_dir, 'node', backup_id_1)['status'], - 'Backup STATUS should be "ERROR"') - self.assertEqual( - 'ORPHAN', - self.show_pb(backup_dir, 'node', backup_id_2)['status'], - 'Backup STATUS should be "ORPHAN"') - self.assertEqual( - 'ORPHAN', - self.show_pb(backup_dir, 'node', backup_id_3)['status'], - 'Backup STATUS should be "ORPHAN"') - - # @unittest.skip("skip") - def test_validate_corrupted_intermediate_backups_1(self): - """ - make archive node, FULL1, PAGE1, PAGE2, PAGE3, PAGE4, PAGE5, FULL2, - corrupt file in PAGE1 and PAGE4, run validate on PAGE3, - expect PAGE1 to gain status CORRUPT, PAGE2, PAGE3, PAGE4 and PAGE5 - to gain status ORPHAN - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # FULL1 - backup_id_1 = self.backup_node(backup_dir, 'node', node) - - # PAGE1 - node.safe_psql( - "postgres", - "create table t_heap as select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,10000) i") - backup_id_2 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # PAGE2 - node.safe_psql( - "postgres", - "insert into t_heap select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,10000) i") - file_page_2 = node.safe_psql( - "postgres", - "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip() - backup_id_3 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # PAGE3 - node.safe_psql( - "postgres", - "insert into t_heap select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(10000,20000) i") - backup_id_4 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # PAGE4 - node.safe_psql( - "postgres", - "insert into t_heap select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(20000,30000) i") - backup_id_5 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # PAGE5 - node.safe_psql( - "postgres", - "create table t_heap1 as select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,10000) i") - file_page_5 = node.safe_psql( - "postgres", - "select pg_relation_filepath('t_heap1')").decode('utf-8').rstrip() - backup_id_6 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # PAGE6 - node.safe_psql( - "postgres", - "insert into t_heap select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(30000,40000) i") - backup_id_7 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # FULL2 - backup_id_8 = self.backup_node(backup_dir, 'node', node) - - # Corrupt some file in PAGE2 and PAGE5 backups - file_page1 = os.path.join( - backup_dir, 'backups', 'node', backup_id_3, 'database', file_page_2) - with open(file_page1, "rb+", 0) as f: - f.seek(84) - f.write(b"blah") - f.flush() - f.close - - file_page4 = os.path.join( - backup_dir, 'backups', 'node', backup_id_6, 'database', file_page_5) - with open(file_page4, "rb+", 0) as f: - f.seek(42) - f.write(b"blah") - f.flush() - f.close - - # Validate PAGE3 - try: - self.validate_pb( - backup_dir, 'node', - backup_id=backup_id_4, options=["-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because of data files corruption.\n" - " Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'INFO: Validating parents for backup {0}'.format( - backup_id_4) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertTrue( - 'INFO: Validating backup {0}'.format( - backup_id_1) in e.message and - 'INFO: Backup {0} data files are valid'.format( - backup_id_1) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertTrue( - 'INFO: Validating backup {0}'.format( - backup_id_2) in e.message and - 'INFO: Backup {0} data files are valid'.format( - backup_id_2) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertTrue( - 'INFO: Validating backup {0}'.format( - backup_id_3) in e.message and - 'WARNING: Invalid CRC of backup file' in e.message and - 'WARNING: Backup {0} data files are corrupted'.format( - backup_id_3) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertTrue( - 'WARNING: Backup {0} is orphaned because ' - 'his parent {1} has status: CORRUPT'.format( - backup_id_4, backup_id_3) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertTrue( - 'WARNING: Backup {0} is orphaned because ' - 'his parent {1} has status: CORRUPT'.format( - backup_id_5, backup_id_3) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertTrue( - 'WARNING: Backup {0} is orphaned because ' - 'his parent {1} has status: CORRUPT'.format( - backup_id_6, backup_id_3) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertTrue( - 'WARNING: Backup {0} is orphaned because ' - 'his parent {1} has status: CORRUPT'.format( - backup_id_7, backup_id_3) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertTrue( - 'ERROR: Backup {0} is orphan'.format(backup_id_4) in e.message, - '\n Unexpected Error Message: {0}\n ' - 'CMD: {1}'.format(repr(e.message), self.cmd)) - - self.assertEqual( - 'OK', self.show_pb(backup_dir, 'node', backup_id_1)['status'], - 'Backup STATUS should be "OK"') - self.assertEqual( - 'OK', self.show_pb(backup_dir, 'node', backup_id_2)['status'], - 'Backup STATUS should be "OK"') - self.assertEqual( - 'CORRUPT', self.show_pb(backup_dir, 'node', backup_id_3)['status'], - 'Backup STATUS should be "CORRUPT"') - self.assertEqual( - 'ORPHAN', self.show_pb(backup_dir, 'node', backup_id_4)['status'], - 'Backup STATUS should be "ORPHAN"') - self.assertEqual( - 'ORPHAN', self.show_pb(backup_dir, 'node', backup_id_5)['status'], - 'Backup STATUS should be "ORPHAN"') - self.assertEqual( - 'ORPHAN', self.show_pb(backup_dir, 'node', backup_id_6)['status'], - 'Backup STATUS should be "ORPHAN"') - self.assertEqual( - 'ORPHAN', self.show_pb(backup_dir, 'node', backup_id_7)['status'], - 'Backup STATUS should be "ORPHAN"') - self.assertEqual( - 'OK', self.show_pb(backup_dir, 'node', backup_id_8)['status'], - 'Backup STATUS should be "OK"') - - # @unittest.skip("skip") - def test_validate_specific_target_corrupted_intermediate_backups(self): - """ - make archive node, take FULL1, PAGE1, PAGE2, PAGE3, PAGE4, PAGE5, FULL2 - corrupt file in PAGE1 and PAGE4, run validate on PAGE3 to specific xid, - expect PAGE1 to gain status CORRUPT, PAGE2, PAGE3, PAGE4 and PAGE5 to - gain status ORPHAN - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # FULL1 - backup_id_1 = self.backup_node(backup_dir, 'node', node) - - # PAGE1 - node.safe_psql( - "postgres", - "create table t_heap as select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,10000) i") - backup_id_2 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # PAGE2 - node.safe_psql( - "postgres", - "insert into t_heap select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,10000) i") - file_page_2 = node.safe_psql( - "postgres", - "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip() - backup_id_3 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # PAGE3 - node.safe_psql( - "postgres", - "insert into t_heap select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(10000,20000) i") - backup_id_4 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # PAGE4 - node.safe_psql( - "postgres", - "insert into t_heap select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(20000,30000) i") - - target_xid = node.safe_psql( - "postgres", - "insert into t_heap select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(30001, 30001) i RETURNING (xmin)").decode('utf-8').rstrip() - - backup_id_5 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # PAGE5 - node.safe_psql( - "postgres", - "create table t_heap1 as select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,10000) i") - file_page_5 = node.safe_psql( - "postgres", - "select pg_relation_filepath('t_heap1')").decode('utf-8').rstrip() - backup_id_6 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # PAGE6 - node.safe_psql( - "postgres", - "insert into t_heap select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(30000,40000) i") - backup_id_7 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # FULL2 - backup_id_8 = self.backup_node(backup_dir, 'node', node) - - # Corrupt some file in PAGE2 and PAGE5 backups - file_page1 = os.path.join( - backup_dir, 'backups', 'node', - backup_id_3, 'database', file_page_2) - with open(file_page1, "rb+", 0) as f: - f.seek(84) - f.write(b"blah") - f.flush() - f.close - - file_page4 = os.path.join( - backup_dir, 'backups', 'node', - backup_id_6, 'database', file_page_5) - with open(file_page4, "rb+", 0) as f: - f.seek(42) - f.write(b"blah") - f.flush() - f.close - - # Validate PAGE3 - try: - self.validate_pb( - backup_dir, 'node', - options=[ - '-i', backup_id_4, '--xid={0}'.format(target_xid), "-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because of data files corruption.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'INFO: Validating parents for backup {0}'.format( - backup_id_4) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertTrue( - 'INFO: Validating backup {0}'.format( - backup_id_1) in e.message and - 'INFO: Backup {0} data files are valid'.format( - backup_id_1) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertTrue( - 'INFO: Validating backup {0}'.format( - backup_id_2) in e.message and - 'INFO: Backup {0} data files are valid'.format( - backup_id_2) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertTrue( - 'INFO: Validating backup {0}'.format( - backup_id_3) in e.message and - 'WARNING: Invalid CRC of backup file' in e.message and - 'WARNING: Backup {0} data files are corrupted'.format( - backup_id_3) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertTrue( - 'WARNING: Backup {0} is orphaned because his ' - 'parent {1} has status: CORRUPT'.format( - backup_id_4, backup_id_3) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertTrue( - 'WARNING: Backup {0} is orphaned because his ' - 'parent {1} has status: CORRUPT'.format( - backup_id_5, backup_id_3) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertTrue( - 'WARNING: Backup {0} is orphaned because his ' - 'parent {1} has status: CORRUPT'.format( - backup_id_6, backup_id_3) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertTrue( - 'WARNING: Backup {0} is orphaned because his ' - 'parent {1} has status: CORRUPT'.format( - backup_id_7, backup_id_3) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertTrue( - 'ERROR: Backup {0} is orphan'.format( - backup_id_4) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertEqual('OK', self.show_pb(backup_dir, 'node', backup_id_1)['status'], 'Backup STATUS should be "OK"') - self.assertEqual('OK', self.show_pb(backup_dir, 'node', backup_id_2)['status'], 'Backup STATUS should be "OK"') - self.assertEqual('CORRUPT', self.show_pb(backup_dir, 'node', backup_id_3)['status'], 'Backup STATUS should be "CORRUPT"') - self.assertEqual('ORPHAN', self.show_pb(backup_dir, 'node', backup_id_4)['status'], 'Backup STATUS should be "ORPHAN"') - self.assertEqual('ORPHAN', self.show_pb(backup_dir, 'node', backup_id_5)['status'], 'Backup STATUS should be "ORPHAN"') - self.assertEqual('ORPHAN', self.show_pb(backup_dir, 'node', backup_id_6)['status'], 'Backup STATUS should be "ORPHAN"') - self.assertEqual('ORPHAN', self.show_pb(backup_dir, 'node', backup_id_7)['status'], 'Backup STATUS should be "ORPHAN"') - self.assertEqual('OK', self.show_pb(backup_dir, 'node', backup_id_8)['status'], 'Backup STATUS should be "OK"') - - # @unittest.skip("skip") - def test_validate_instance_with_several_corrupt_backups(self): - """ - make archive node, take FULL1, PAGE1_1, FULL2, PAGE2_1 backups, FULL3 - corrupt file in FULL and FULL2 and run validate on instance, - expect FULL1 to gain status CORRUPT, PAGE1_1 to gain status ORPHAN - FULL2 to gain status CORRUPT, PAGE2_1 to gain status ORPHAN - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - "postgres", - "create table t_heap as select generate_series(0,1) i") - # FULL1 - backup_id_1 = self.backup_node( - backup_dir, 'node', node, options=['--no-validate']) - - # FULL2 - backup_id_2 = self.backup_node(backup_dir, 'node', node) - rel_path = node.safe_psql( - "postgres", - "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip() - - node.safe_psql( - "postgres", - "insert into t_heap values(2)") - - backup_id_3 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # FULL3 - backup_id_4 = self.backup_node(backup_dir, 'node', node) - - node.safe_psql( - "postgres", - "insert into t_heap values(3)") - - backup_id_5 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # FULL4 - backup_id_6 = self.backup_node( - backup_dir, 'node', node, options=['--no-validate']) - - # Corrupt some files in FULL2 and FULL3 backup - os.remove(os.path.join( - backup_dir, 'backups', 'node', backup_id_2, - 'database', rel_path)) - os.remove(os.path.join( - backup_dir, 'backups', 'node', backup_id_4, - 'database', rel_path)) - - # Validate Instance - try: - self.validate_pb(backup_dir, 'node', options=["-j", "4", "--log-level-file=LOG"]) - self.assertEqual( - 1, 0, - "Expecting Error because of data files corruption.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - "INFO: Validate backups of the instance 'node'" in e.message, - "\n Unexpected Error Message: {0}\n " - "CMD: {1}".format(repr(e.message), self.cmd)) - self.assertTrue( - 'WARNING: Some backups are not valid' in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertEqual( - 'OK', self.show_pb(backup_dir, 'node', backup_id_1)['status'], - 'Backup STATUS should be "OK"') - self.assertEqual( - 'CORRUPT', self.show_pb(backup_dir, 'node', backup_id_2)['status'], - 'Backup STATUS should be "CORRUPT"') - self.assertEqual( - 'ORPHAN', self.show_pb(backup_dir, 'node', backup_id_3)['status'], - 'Backup STATUS should be "ORPHAN"') - self.assertEqual( - 'CORRUPT', self.show_pb(backup_dir, 'node', backup_id_4)['status'], - 'Backup STATUS should be "CORRUPT"') - self.assertEqual( - 'ORPHAN', self.show_pb(backup_dir, 'node', backup_id_5)['status'], - 'Backup STATUS should be "ORPHAN"') - self.assertEqual( - 'OK', self.show_pb(backup_dir, 'node', backup_id_6)['status'], - 'Backup STATUS should be "OK"') - - # @unittest.skip("skip") - def test_validate_instance_with_several_corrupt_backups_interrupt(self): - """ - check that interrupt during validation is handled correctly - """ - self._check_gdb_flag_or_skip_test() - - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - "postgres", - "create table t_heap as select generate_series(0,1) i") - # FULL1 - backup_id_1 = self.backup_node( - backup_dir, 'node', node, options=['--no-validate']) - - # FULL2 - backup_id_2 = self.backup_node(backup_dir, 'node', node) - rel_path = node.safe_psql( - "postgres", - "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip() - - node.safe_psql( - "postgres", - "insert into t_heap values(2)") - - backup_id_3 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # FULL3 - backup_id_4 = self.backup_node(backup_dir, 'node', node) - - node.safe_psql( - "postgres", - "insert into t_heap values(3)") - - backup_id_5 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # FULL4 - backup_id_6 = self.backup_node( - backup_dir, 'node', node, options=['--no-validate']) - - # Corrupt some files in FULL2 and FULL3 backup - os.remove(os.path.join( - backup_dir, 'backups', 'node', backup_id_1, - 'database', rel_path)) - os.remove(os.path.join( - backup_dir, 'backups', 'node', backup_id_3, - 'database', rel_path)) - - # Validate Instance - gdb = self.validate_pb( - backup_dir, 'node', options=["-j", "4", "--log-level-file=LOG"], gdb=True) - - gdb.set_breakpoint('validate_file_pages') - gdb.run_until_break() - gdb.continue_execution_until_break() - gdb.remove_all_breakpoints() - gdb._execute('signal SIGINT') - gdb.continue_execution_until_error() - - self.assertEqual( - 'DONE', self.show_pb(backup_dir, 'node', backup_id_1)['status'], - 'Backup STATUS should be "OK"') - self.assertEqual( - 'OK', self.show_pb(backup_dir, 'node', backup_id_2)['status'], - 'Backup STATUS should be "OK"') - self.assertEqual( - 'OK', self.show_pb(backup_dir, 'node', backup_id_3)['status'], - 'Backup STATUS should be "CORRUPT"') - self.assertEqual( - 'OK', self.show_pb(backup_dir, 'node', backup_id_4)['status'], - 'Backup STATUS should be "ORPHAN"') - self.assertEqual( - 'OK', self.show_pb(backup_dir, 'node', backup_id_5)['status'], - 'Backup STATUS should be "OK"') - self.assertEqual( - 'DONE', self.show_pb(backup_dir, 'node', backup_id_6)['status'], - 'Backup STATUS should be "OK"') - - log_file = os.path.join(backup_dir, 'log', 'pg_probackup.log') - with open(log_file, 'r') as f: - log_content = f.read() - self.assertNotIn( - 'Interrupted while locking backup', log_content) - - # @unittest.skip("skip") - def test_validate_instance_with_corrupted_page(self): - """ - make archive node, take FULL, PAGE1, PAGE2, FULL2, PAGE3 backups, - corrupt file in PAGE1 backup and run validate on instance, - expect PAGE1 to gain status CORRUPT, PAGE2 to gain status ORPHAN - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - "postgres", - "create table t_heap as select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,10000) i") - # FULL1 - backup_id_1 = self.backup_node(backup_dir, 'node', node) - - node.safe_psql( - "postgres", - "create table t_heap1 as select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,10000) i") - file_path_t_heap1 = node.safe_psql( - "postgres", - "select pg_relation_filepath('t_heap1')").decode('utf-8').rstrip() - # PAGE1 - backup_id_2 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - node.safe_psql( - "postgres", - "insert into t_heap select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(20000,30000) i") - # PAGE2 - backup_id_3 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - # FULL1 - backup_id_4 = self.backup_node( - backup_dir, 'node', node) - # PAGE3 - backup_id_5 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # Corrupt some file in FULL backup - file_full = os.path.join( - backup_dir, 'backups', 'node', backup_id_2, - 'database', file_path_t_heap1) - with open(file_full, "rb+", 0) as f: - f.seek(84) - f.write(b"blah") - f.flush() - f.close - - # Validate Instance - try: - self.validate_pb(backup_dir, 'node', options=["-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because of data files corruption.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - "INFO: Validate backups of the instance 'node'" in e.message, - "\n Unexpected Error Message: {0}\n " - "CMD: {1}".format(repr(e.message), self.cmd)) - self.assertTrue( - 'INFO: Validating backup {0}'.format( - backup_id_5) in e.message and - 'INFO: Backup {0} data files are valid'.format( - backup_id_5) in e.message and - 'INFO: Backup {0} WAL segments are valid'.format( - backup_id_5) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertTrue( - 'INFO: Validating backup {0}'.format( - backup_id_4) in e.message and - 'INFO: Backup {0} data files are valid'.format( - backup_id_4) in e.message and - 'INFO: Backup {0} WAL segments are valid'.format( - backup_id_4) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertTrue( - 'INFO: Validating backup {0}'.format( - backup_id_3) in e.message and - 'INFO: Backup {0} data files are valid'.format( - backup_id_3) in e.message and - 'INFO: Backup {0} WAL segments are valid'.format( - backup_id_3) in e.message and - 'WARNING: Backup {0} is orphaned because ' - 'his parent {1} has status: CORRUPT'.format( - backup_id_3, backup_id_2) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertTrue( - 'INFO: Validating backup {0}'.format( - backup_id_2) in e.message and - 'WARNING: Invalid CRC of backup file' in e.message and - 'WARNING: Backup {0} data files are corrupted'.format( - backup_id_2) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertTrue( - 'INFO: Validating backup {0}'.format( - backup_id_1) in e.message and - 'INFO: Backup {0} data files are valid'.format( - backup_id_1) in e.message and - 'INFO: Backup {0} WAL segments are valid'.format( - backup_id_1) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertTrue( - 'WARNING: Some backups are not valid' in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertEqual( - 'OK', self.show_pb(backup_dir, 'node', backup_id_1)['status'], - 'Backup STATUS should be "OK"') - self.assertEqual( - 'CORRUPT', self.show_pb(backup_dir, 'node', backup_id_2)['status'], - 'Backup STATUS should be "CORRUPT"') - self.assertEqual( - 'ORPHAN', self.show_pb(backup_dir, 'node', backup_id_3)['status'], - 'Backup STATUS should be "ORPHAN"') - self.assertEqual( - 'OK', self.show_pb(backup_dir, 'node', backup_id_4)['status'], - 'Backup STATUS should be "OK"') - self.assertEqual( - 'OK', self.show_pb(backup_dir, 'node', backup_id_5)['status'], - 'Backup STATUS should be "OK"') - - # @unittest.skip("skip") - def test_validate_instance_with_corrupted_full_and_try_restore(self): - """make archive node, take FULL, PAGE1, PAGE2, FULL2, PAGE3 backups, - corrupt file in FULL backup and run validate on instance, - expect FULL to gain status CORRUPT, PAGE1 and PAGE2 to gain status ORPHAN, - try to restore backup with --no-validation option""" - node = self.make_simple_node(base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - "postgres", - "create table t_heap as select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,10000) i") - file_path_t_heap = node.safe_psql( - "postgres", - "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip() - # FULL1 - backup_id_1 = self.backup_node(backup_dir, 'node', node) - - node.safe_psql( - "postgres", - "insert into t_heap select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,10000) i") - # PAGE1 - backup_id_2 = self.backup_node(backup_dir, 'node', node, backup_type='page') - - # PAGE2 - node.safe_psql( - "postgres", - "insert into t_heap select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(20000,30000) i") - backup_id_3 = self.backup_node(backup_dir, 'node', node, backup_type='page') - - # FULL1 - backup_id_4 = self.backup_node(backup_dir, 'node', node) - - # PAGE3 - node.safe_psql( - "postgres", - "insert into t_heap select i as id, " - "md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(30000,40000) i") - backup_id_5 = self.backup_node(backup_dir, 'node', node, backup_type='page') - - # Corrupt some file in FULL backup - file_full = os.path.join( - backup_dir, 'backups', 'node', - backup_id_1, 'database', file_path_t_heap) - with open(file_full, "rb+", 0) as f: - f.seek(84) - f.write(b"blah") - f.flush() - f.close - - # Validate Instance - try: - self.validate_pb(backup_dir, 'node', options=["-j", "4"]) - self.assertEqual(1, 0, "Expecting Error because of data files corruption.\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'INFO: Validating backup {0}'.format(backup_id_1) in e.message - and "INFO: Validate backups of the instance 'node'" in e.message - and 'WARNING: Invalid CRC of backup file' in e.message - and 'WARNING: Backup {0} data files are corrupted'.format(backup_id_1) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) - - self.assertEqual('CORRUPT', self.show_pb(backup_dir, 'node', backup_id_1)['status'], 'Backup STATUS should be "CORRUPT"') - self.assertEqual('ORPHAN', self.show_pb(backup_dir, 'node', backup_id_2)['status'], 'Backup STATUS should be "ORPHAN"') - self.assertEqual('ORPHAN', self.show_pb(backup_dir, 'node', backup_id_3)['status'], 'Backup STATUS should be "ORPHAN"') - self.assertEqual('OK', self.show_pb(backup_dir, 'node', backup_id_4)['status'], 'Backup STATUS should be "OK"') - self.assertEqual('OK', self.show_pb(backup_dir, 'node', backup_id_5)['status'], 'Backup STATUS should be "OK"') - - node.cleanup() - restore_out = self.restore_node( - backup_dir, 'node', node, - options=["--no-validate"]) - self.assertIn( - "INFO: Restore of backup {0} completed.".format(backup_id_5), - restore_out, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) - - # @unittest.skip("skip") - def test_validate_instance_with_corrupted_full(self): - """make archive node, take FULL, PAGE1, PAGE2, FULL2, PAGE3 backups, - corrupt file in FULL backup and run validate on instance, - expect FULL to gain status CORRUPT, PAGE1 and PAGE2 to gain status ORPHAN""" - node = self.make_simple_node(base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.safe_psql( - "postgres", - "create table t_heap as select i as id, " - "md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,10000) i") - - file_path_t_heap = node.safe_psql( - "postgres", - "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip() - # FULL1 - backup_id_1 = self.backup_node(backup_dir, 'node', node) - - node.safe_psql( - "postgres", - "insert into t_heap select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,10000) i") - - # PAGE1 - backup_id_2 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # PAGE2 - node.safe_psql( - "postgres", - "insert into t_heap select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(20000,30000) i") - - backup_id_3 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - # FULL1 - backup_id_4 = self.backup_node( - backup_dir, 'node', node) - - # PAGE3 - node.safe_psql( - "postgres", - "insert into t_heap select i as id, " - "md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(30000,40000) i") - backup_id_5 = self.backup_node(backup_dir, 'node', node, backup_type='page') - - # Corrupt some file in FULL backup - file_full = os.path.join( - backup_dir, 'backups', 'node', - backup_id_1, 'database', file_path_t_heap) - with open(file_full, "rb+", 0) as f: - f.seek(84) - f.write(b"blah") - f.flush() - f.close - - # Validate Instance - try: - self.validate_pb(backup_dir, 'node', options=["-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because of data files corruption.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'INFO: Validating backup {0}'.format(backup_id_1) in e.message - and "INFO: Validate backups of the instance 'node'" in e.message - and 'WARNING: Invalid CRC of backup file' in e.message - and 'WARNING: Backup {0} data files are corrupted'.format(backup_id_1) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) - - self.assertEqual('CORRUPT', self.show_pb(backup_dir, 'node', backup_id_1)['status'], 'Backup STATUS should be "CORRUPT"') - self.assertEqual('ORPHAN', self.show_pb(backup_dir, 'node', backup_id_2)['status'], 'Backup STATUS should be "ORPHAN"') - self.assertEqual('ORPHAN', self.show_pb(backup_dir, 'node', backup_id_3)['status'], 'Backup STATUS should be "ORPHAN"') - self.assertEqual('OK', self.show_pb(backup_dir, 'node', backup_id_4)['status'], 'Backup STATUS should be "OK"') - self.assertEqual('OK', self.show_pb(backup_dir, 'node', backup_id_5)['status'], 'Backup STATUS should be "OK"') - - # @unittest.skip("skip") - def test_validate_corrupt_wal_1(self): - """make archive node, take FULL1, PAGE1,PAGE2,FULL2,PAGE3,PAGE4 backups, corrupt all wal files, run validate, expect errors""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - backup_id_1 = self.backup_node(backup_dir, 'node', node) - - with node.connect("postgres") as con: - con.execute("CREATE TABLE tbl0005 (a text)") - con.commit() - - backup_id_2 = self.backup_node(backup_dir, 'node', node) - - # Corrupt WAL - wals_dir = os.path.join(backup_dir, 'wal', 'node') - wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join(wals_dir, f)) and not f.endswith('.backup')] - wals.sort() - for wal in wals: - with open(os.path.join(wals_dir, wal), "rb+", 0) as f: - f.seek(42) - f.write(b"blablablaadssaaaaaaaaaaaaaaa") - f.flush() - f.close - - # Simple validate - try: - self.validate_pb(backup_dir, 'node', options=["-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because of wal segments corruption.\n" - " Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'WARNING: Backup' in e.message and - 'WAL segments are corrupted' in e.message and - "WARNING: There are not enough WAL " - "records to consistenly restore backup" in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertEqual( - 'CORRUPT', - self.show_pb(backup_dir, 'node', backup_id_1)['status'], - 'Backup STATUS should be "CORRUPT"') - self.assertEqual( - 'CORRUPT', - self.show_pb(backup_dir, 'node', backup_id_2)['status'], - 'Backup STATUS should be "CORRUPT"') - - # @unittest.skip("skip") - def test_validate_corrupt_wal_2(self): - """make archive node, make full backup, corrupt all wal files, run validate to real xid, expect errors""" - node = self.make_simple_node(base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - with node.connect("postgres") as con: - con.execute("CREATE TABLE tbl0005 (a text)") - con.commit() - - backup_id = self.backup_node(backup_dir, 'node', node) - target_xid = None - with node.connect("postgres") as con: - res = con.execute( - "INSERT INTO tbl0005 VALUES ('inserted') RETURNING (xmin)") - con.commit() - target_xid = res[0][0] - - # Corrupt WAL - wals_dir = os.path.join(backup_dir, 'wal', 'node') - wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join(wals_dir, f)) and not f.endswith('.backup')] - wals.sort() - for wal in wals: - with open(os.path.join(wals_dir, wal), "rb+", 0) as f: - f.seek(128) - f.write(b"blablablaadssaaaaaaaaaaaaaaa") - f.flush() - f.close - - # Validate to xid - try: - self.validate_pb( - backup_dir, - 'node', - backup_id, - options=[ - "--xid={0}".format(target_xid), "-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because of wal segments corruption.\n" - " Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'WARNING: Backup' in e.message and - 'WAL segments are corrupted' in e.message and - "WARNING: There are not enough WAL " - "records to consistenly restore backup" in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertEqual( - 'CORRUPT', - self.show_pb(backup_dir, 'node', backup_id)['status'], - 'Backup STATUS should be "CORRUPT"') - - # @unittest.skip("skip") - def test_validate_wal_lost_segment_1(self): - """make archive node, make archive full backup, - delete from archive wal segment which belong to previous backup - run validate, expecting error because of missing wal segment - make sure that backup status is 'CORRUPT' - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - node.pgbench_init(scale=3) - - backup_id = self.backup_node(backup_dir, 'node', node) - - # Delete wal segment - wals_dir = os.path.join(backup_dir, 'wal', 'node') - wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join(wals_dir, f)) and not f.endswith('.backup')] - wals.sort() - file = os.path.join(backup_dir, 'wal', 'node', wals[-1]) - os.remove(file) - - # cut out '.gz' - if self.archive_compress: - file = file[:-3] - - try: - self.validate_pb(backup_dir, 'node', options=["-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because of wal segment disappearance.\n" - " Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - "is absent" in e.message and - "WARNING: There are not enough WAL records to consistenly " - "restore backup {0}".format(backup_id) in e.message and - "WARNING: Backup {0} WAL segments are corrupted".format( - backup_id) in e.message and - "WARNING: Some backups are not valid" in e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - self.assertEqual( - 'CORRUPT', - self.show_pb(backup_dir, 'node', backup_id)['status'], - 'Backup {0} should have STATUS "CORRUPT"') - - # Run validate again - try: - self.validate_pb(backup_dir, 'node', backup_id, options=["-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because of backup corruption.\n" - " Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'INFO: Revalidating backup {0}'.format(backup_id), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'ERROR: Backup {0} is corrupt.'.format(backup_id), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - # @unittest.skip("skip") - def test_validate_corrupt_wal_between_backups(self): - """ - make archive node, make full backup, corrupt all wal files, - run validate to real xid, expect errors - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - backup_id = self.backup_node(backup_dir, 'node', node) - - # make some wals - node.pgbench_init(scale=3) - - with node.connect("postgres") as con: - con.execute("CREATE TABLE tbl0005 (a text)") - con.commit() - - with node.connect("postgres") as con: - res = con.execute( - "INSERT INTO tbl0005 VALUES ('inserted') RETURNING (xmin)") - con.commit() - target_xid = res[0][0] - - if self.get_version(node) < self.version_to_num('10.0'): - walfile = node.safe_psql( - 'postgres', - 'select pg_xlogfile_name(pg_current_xlog_location())').decode('utf-8').rstrip() - else: - walfile = node.safe_psql( - 'postgres', - 'select pg_walfile_name(pg_current_wal_lsn())').decode('utf-8').rstrip() - - if self.archive_compress: - walfile = walfile + '.gz' - self.switch_wal_segment(node) - - # generate some wals - node.pgbench_init(scale=3) - - self.backup_node(backup_dir, 'node', node) - - # Corrupt WAL - wals_dir = os.path.join(backup_dir, 'wal', 'node') - with open(os.path.join(wals_dir, walfile), "rb+", 0) as f: - f.seek(9000) - f.write(b"b") - f.flush() - f.close - - # Validate to xid - try: - self.validate_pb( - backup_dir, - 'node', - backup_id, - options=[ - "--xid={0}".format(target_xid), "-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because of wal segments corruption.\n" - " Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'ERROR: Not enough WAL records to xid' in e.message and - 'WARNING: Recovery can be done up to time' in e.message and - "ERROR: Not enough WAL records to xid {0}\n".format( - target_xid), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertEqual( - 'OK', - self.show_pb(backup_dir, 'node')[0]['status'], - 'Backup STATUS should be "OK"') - - self.assertEqual( - 'OK', - self.show_pb(backup_dir, 'node')[1]['status'], - 'Backup STATUS should be "OK"') - - # @unittest.skip("skip") - def test_pgpro702_688(self): - """ - make node without archiving, make stream backup, - get Recovery Time, validate to Recovery Time - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - backup_id = self.backup_node( - backup_dir, 'node', node, options=["--stream"]) - recovery_time = self.show_pb( - backup_dir, 'node', backup_id=backup_id)['recovery-time'] - - try: - self.validate_pb( - backup_dir, 'node', - options=["--time={0}".format(recovery_time), "-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because of wal segment disappearance.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - 'WAL archive is empty. You cannot restore backup to a ' - 'recovery target without WAL archive', e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - # @unittest.skip("skip") - def test_pgpro688(self): - """ - make node with archiving, make backup, get Recovery Time, - validate to Recovery Time. Waiting PGPRO-688. RESOLVED - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - backup_id = self.backup_node(backup_dir, 'node', node) - recovery_time = self.show_pb( - backup_dir, 'node', backup_id)['recovery-time'] - - self.validate_pb( - backup_dir, 'node', options=["--time={0}".format(recovery_time), - "-j", "4"]) - - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_pgpro561(self): - """ - make node with archiving, make stream backup, - restore it to node1, check that archiving is not successful on node1 - """ - node1 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node1'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node1', node1) - self.set_archiving(backup_dir, 'node1', node1) - node1.slow_start() - - backup_id = self.backup_node( - backup_dir, 'node1', node1, options=["--stream"]) - - node2 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node2')) - node2.cleanup() - - node1.psql( - "postgres", - "create table t_heap as select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,256) i") - - self.backup_node( - backup_dir, 'node1', node1, - backup_type='page', options=["--stream"]) - self.restore_node(backup_dir, 'node1', data_dir=node2.data_dir) - - self.set_auto_conf( - node2, {'port': node2.port, 'archive_mode': 'off'}) - - node2.slow_start() - - self.set_auto_conf( - node2, {'archive_mode': 'on'}) - - node2.stop() - node2.slow_start() - - timeline_node1 = node1.get_control_data()["Latest checkpoint's TimeLineID"] - timeline_node2 = node2.get_control_data()["Latest checkpoint's TimeLineID"] - self.assertEqual( - timeline_node1, timeline_node2, - "Timelines on Master and Node1 should be equal. " - "This is unexpected") - - archive_command_node1 = node1.safe_psql( - "postgres", "show archive_command") - archive_command_node2 = node2.safe_psql( - "postgres", "show archive_command") - self.assertEqual( - archive_command_node1, archive_command_node2, - "Archive command on Master and Node should be equal. " - "This is unexpected") - - # result = node2.safe_psql("postgres", "select last_failed_wal from pg_stat_get_archiver() where last_failed_wal is not NULL") - ## self.assertEqual(res, six.b(""), 'Restored Node1 failed to archive segment {0} due to having the same archive command as Master'.format(res.rstrip())) - # if result == "": - # self.assertEqual(1, 0, 'Error is expected due to Master and Node1 having the common archive and archive_command') - - node1.psql( - "postgres", - "create table t_heap_1 as select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,10) i") - - self.switch_wal_segment(node1) - -# wals_dir = os.path.join(backup_dir, 'wal', 'node1') -# wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join( -# wals_dir, f)) and not f.endswith('.backup') and not f.endswith('.part')] -# wals = map(str, wals) -# print(wals) - - self.switch_wal_segment(node2) - -# wals_dir = os.path.join(backup_dir, 'wal', 'node1') -# wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join( -# wals_dir, f)) and not f.endswith('.backup') and not f.endswith('.part')] -# wals = map(str, wals) -# print(wals) - - time.sleep(5) - - log_file = os.path.join(node2.logs_dir, 'postgresql.log') - with open(log_file, 'r') as f: - log_content = f.read() - self.assertTrue( - 'LOG: archive command failed with exit code 1' in log_content and - 'DETAIL: The failed archive command was:' in log_content and - 'WAL file already exists in archive with different checksum' in log_content, - 'Expecting error messages about failed archive_command' - ) - self.assertFalse( - 'pg_probackup archive-push completed successfully' in log_content) - - # @unittest.skip("skip") - def test_validate_corrupted_full(self): - """ - make node with archiving, take full backup, and three page backups, - take another full backup and three page backups - corrupt second full backup, run validate, check that - second full backup became CORRUPT and his page backups are ORPHANs - remove corruption and run valudate again, check that - second full backup and his page backups are OK - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'checkpoint_timeout': '30'}) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - self.backup_node(backup_dir, 'node', node) - self.backup_node(backup_dir, 'node', node, backup_type='page') - self.backup_node(backup_dir, 'node', node, backup_type='page') - - backup_id = self.backup_node(backup_dir, 'node', node) - self.backup_node(backup_dir, 'node', node, backup_type='page') - self.backup_node(backup_dir, 'node', node, backup_type='page') - - node.safe_psql( - "postgres", - "alter system set archive_command = 'false'") - node.reload() - try: - self.backup_node( - backup_dir, 'node', node, - backup_type='page', options=['--archive-timeout=1s']) - self.assertEqual( - 1, 0, - "Expecting Error because of data file dissapearance.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - pass - - self.assertTrue( - self.show_pb(backup_dir, 'node')[6]['status'] == 'ERROR') - self.set_archiving(backup_dir, 'node', node) - node.reload() - self.backup_node(backup_dir, 'node', node, backup_type='page') - - file = os.path.join( - backup_dir, 'backups', 'node', - backup_id, 'database', 'postgresql.auto.conf') - - file_new = os.path.join(backup_dir, 'postgresql.auto.conf') - os.rename(file, file_new) - - try: - self.validate_pb(backup_dir, options=["-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because of data file dissapearance.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - 'Validating backup {0}'.format(backup_id), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'WARNING: Backup {0} data files are corrupted'.format( - backup_id), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'WARNING: Some backups are not valid'.format( - backup_id), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') - self.assertTrue( - self.show_pb(backup_dir, 'node')[3]['status'] == 'CORRUPT') - self.assertTrue( - self.show_pb(backup_dir, 'node')[4]['status'] == 'ORPHAN') - self.assertTrue( - self.show_pb(backup_dir, 'node')[5]['status'] == 'ORPHAN') - self.assertTrue( - self.show_pb(backup_dir, 'node')[6]['status'] == 'ERROR') - self.assertTrue( - self.show_pb(backup_dir, 'node')[7]['status'] == 'ORPHAN') - - os.rename(file_new, file) - try: - self.validate_pb(backup_dir, options=["-j", "4"]) - except ProbackupException as e: - self.assertIn( - 'WARNING: Some backups are not valid'.format( - backup_id), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'OK') - self.assertTrue( - self.show_pb(backup_dir, 'node')[6]['status'] == 'ERROR') - self.assertTrue(self.show_pb(backup_dir, 'node')[7]['status'] == 'OK') - - # @unittest.skip("skip") - def test_validate_corrupted_full_1(self): - """ - make node with archiving, take full backup, and three page backups, - take another full backup and four page backups - corrupt second full backup, run validate, check that - second full backup became CORRUPT and his page backups are ORPHANs - remove corruption from full backup and corrupt his second page backup - run valudate again, check that - second full backup and his firts page backups are OK, - second page should be CORRUPT - third page should be ORPHAN - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - self.backup_node(backup_dir, 'node', node) - self.backup_node(backup_dir, 'node', node, backup_type='page') - self.backup_node(backup_dir, 'node', node, backup_type='page') - - backup_id = self.backup_node(backup_dir, 'node', node) - self.backup_node(backup_dir, 'node', node, backup_type='page') - backup_id_page = self.backup_node( - backup_dir, 'node', node, backup_type='page') - self.backup_node(backup_dir, 'node', node, backup_type='page') - - file = os.path.join( - backup_dir, 'backups', 'node', - backup_id, 'database', 'postgresql.auto.conf') - - file_new = os.path.join(backup_dir, 'postgresql.auto.conf') - os.rename(file, file_new) - - try: - self.validate_pb(backup_dir, options=["-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because of data file dissapearance.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - 'Validating backup {0}'.format(backup_id), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'WARNING: Backup {0} data files are corrupted'.format( - backup_id), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'WARNING: Some backups are not valid'.format( - backup_id), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'CORRUPT') - self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') - - os.rename(file_new, file) - - file = os.path.join( - backup_dir, 'backups', 'node', - backup_id_page, 'database', 'backup_label') - - file_new = os.path.join(backup_dir, 'backup_label') - os.rename(file, file_new) - - try: - self.validate_pb(backup_dir, options=["-j", "4"]) - except ProbackupException as e: - self.assertIn( - 'WARNING: Some backups are not valid'.format( - backup_id), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'CORRUPT') - self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'ORPHAN') - - # @unittest.skip("skip") - def test_validate_corrupted_full_2(self): - """ - PAGE2_2b - PAGE2_2a - PAGE2_4 - PAGE2_4 <- validate - PAGE2_3 - PAGE2_2 <- CORRUPT - PAGE2_1 - FULL2 - PAGE1_1 - FULL1 - corrupt second page backup, run validate on PAGE2_3, check that - PAGE2_2 became CORRUPT and his descendants are ORPHANs, - take two more PAGE backups, which now trace their origin - to PAGE2_1 - latest OK backup, - run validate on PAGE2_3, check that PAGE2_2a and PAGE2_2b are OK, - - remove corruption from PAGE2_2 and run validate on PAGE2_4 - """ - - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - self.backup_node(backup_dir, 'node', node) - self.backup_node(backup_dir, 'node', node, backup_type='page') - - self.backup_node(backup_dir, 'node', node) - self.backup_node(backup_dir, 'node', node, backup_type='page') - corrupt_id = self.backup_node( - backup_dir, 'node', node, backup_type='page') - self.backup_node(backup_dir, 'node', node, backup_type='page') - validate_id = self.backup_node( - backup_dir, 'node', node, backup_type='page') - self.backup_node(backup_dir, 'node', node, backup_type='page') - - file = os.path.join( - backup_dir, 'backups', 'node', - corrupt_id, 'database', 'backup_label') - - file_new = os.path.join(backup_dir, 'backup_label') - os.rename(file, file_new) - - try: - self.validate_pb(backup_dir, 'node', validate_id, - options=["-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because of data file dissapearance.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - 'INFO: Validating parents for backup {0}'.format(validate_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'INFO: Validating backup {0}'.format( - self.show_pb(backup_dir, 'node')[2]['id']), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'INFO: Validating backup {0}'.format( - self.show_pb(backup_dir, 'node')[3]['id']), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'INFO: Validating backup {0}'.format( - corrupt_id), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'WARNING: Backup {0} data files are corrupted'.format( - corrupt_id), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertTrue(self.show_pb(backup_dir, 'node')[7]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'CORRUPT') - self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') - - # THIS IS GOLD!!!! - self.backup_node(backup_dir, 'node', node, backup_type='page') - self.backup_node(backup_dir, 'node', node, backup_type='page') - - try: - self.validate_pb(backup_dir, 'node', options=["-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because of data file dissapearance.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - 'Backup {0} data files are valid'.format( - self.show_pb(backup_dir, 'node')[9]['id']), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertIn( - 'Backup {0} data files are valid'.format( - self.show_pb(backup_dir, 'node')[8]['id']), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertIn( - 'WARNING: Backup {0} has parent {1} with status: CORRUPT'.format( - self.show_pb(backup_dir, 'node')[7]['id'], corrupt_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertIn( - 'WARNING: Backup {0} has parent {1} with status: CORRUPT'.format( - self.show_pb(backup_dir, 'node')[6]['id'], corrupt_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertIn( - 'WARNING: Backup {0} has parent {1} with status: CORRUPT'.format( - self.show_pb(backup_dir, 'node')[5]['id'], corrupt_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertIn( - 'INFO: Revalidating backup {0}'.format( - corrupt_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertIn( - 'WARNING: Some backups are not valid', e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertTrue(self.show_pb(backup_dir, 'node')[9]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[8]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[7]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'CORRUPT') - self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') - - # revalidate again - - try: - self.validate_pb(backup_dir, 'node', validate_id, - options=["-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because of data file dissapearance.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - 'WARNING: Backup {0} has status: ORPHAN'.format(validate_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertIn( - 'Backup {0} has parent {1} with status: CORRUPT'.format( - self.show_pb(backup_dir, 'node')[7]['id'], corrupt_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertIn( - 'Backup {0} has parent {1} with status: CORRUPT'.format( - self.show_pb(backup_dir, 'node')[6]['id'], corrupt_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertIn( - 'Backup {0} has parent {1} with status: CORRUPT'.format( - self.show_pb(backup_dir, 'node')[5]['id'], corrupt_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertIn( - 'INFO: Validating parents for backup {0}'.format( - validate_id), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertIn( - 'INFO: Validating backup {0}'.format( - self.show_pb(backup_dir, 'node')[2]['id']), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertIn( - 'INFO: Validating backup {0}'.format( - self.show_pb(backup_dir, 'node')[3]['id']), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertIn( - 'INFO: Revalidating backup {0}'.format( - corrupt_id), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertIn( - 'WARNING: Backup {0} data files are corrupted'.format( - corrupt_id), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertIn( - 'ERROR: Backup {0} is orphan.'.format( - validate_id), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - # Fix CORRUPT - os.rename(file_new, file) - - output = self.validate_pb(backup_dir, 'node', validate_id, - options=["-j", "4"]) - - self.assertIn( - 'WARNING: Backup {0} has status: ORPHAN'.format(validate_id), - output, - '\n Unexpected Output Message: {0}\n'.format( - repr(output))) - - self.assertIn( - 'Backup {0} has parent {1} with status: CORRUPT'.format( - self.show_pb(backup_dir, 'node')[7]['id'], corrupt_id), - output, - '\n Unexpected Output Message: {0}\n'.format( - repr(output))) - - self.assertIn( - 'Backup {0} has parent {1} with status: CORRUPT'.format( - self.show_pb(backup_dir, 'node')[6]['id'], corrupt_id), - output, - '\n Unexpected Output Message: {0}\n'.format( - repr(output))) - - self.assertIn( - 'Backup {0} has parent {1} with status: CORRUPT'.format( - self.show_pb(backup_dir, 'node')[5]['id'], corrupt_id), - output, - '\n Unexpected Output Message: {0}\n'.format( - repr(output))) - - self.assertIn( - 'INFO: Validating parents for backup {0}'.format( - validate_id), output, - '\n Unexpected Output Message: {0}\n'.format( - repr(output))) - - self.assertIn( - 'INFO: Validating backup {0}'.format( - self.show_pb(backup_dir, 'node')[2]['id']), output, - '\n Unexpected Output Message: {0}\n'.format( - repr(output))) - - self.assertIn( - 'INFO: Validating backup {0}'.format( - self.show_pb(backup_dir, 'node')[3]['id']), output, - '\n Unexpected Output Message: {0}\n'.format( - repr(output))) - - self.assertIn( - 'INFO: Revalidating backup {0}'.format( - corrupt_id), output, - '\n Unexpected Output Message: {0}\n'.format( - repr(output))) - - self.assertIn( - 'Backup {0} data files are valid'.format( - corrupt_id), output, - '\n Unexpected Output Message: {0}\n'.format( - repr(output))) - - self.assertIn( - 'INFO: Revalidating backup {0}'.format( - self.show_pb(backup_dir, 'node')[5]['id']), output, - '\n Unexpected Output Message: {0}\n'.format( - repr(output))) - - self.assertIn( - 'Backup {0} data files are valid'.format( - self.show_pb(backup_dir, 'node')[5]['id']), output, - '\n Unexpected Output Message: {0}\n'.format( - repr(output))) - - self.assertIn( - 'INFO: Revalidating backup {0}'.format( - validate_id), output, - '\n Unexpected Output Message: {0}\n'.format( - repr(output))) - - self.assertIn( - 'Backup {0} data files are valid'.format( - validate_id), output, - '\n Unexpected Output Message: {0}\n'.format( - repr(output))) - - self.assertIn( - 'INFO: Backup {0} WAL segments are valid'.format( - validate_id), output, - '\n Unexpected Output Message: {0}\n'.format( - repr(output))) - - self.assertIn( - 'INFO: Backup {0} is valid.'.format( - validate_id), output, - '\n Unexpected Output Message: {0}\n'.format( - repr(output))) - - self.assertIn( - 'INFO: Validate of backup {0} completed.'.format( - validate_id), output, - '\n Unexpected Output Message: {0}\n'.format( - repr(output))) - - # Now we have two perfectly valid backup chains based on FULL2 - - self.assertTrue(self.show_pb(backup_dir, 'node')[9]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[8]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[7]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') - - # @unittest.skip("skip") - def test_validate_corrupted_full_missing(self): - """ - make node with archiving, take full backup, and three page backups, - take another full backup and four page backups - corrupt second full backup, run validate, check that - second full backup became CORRUPT and his page backups are ORPHANs - remove corruption from full backup and remove his second page backup - run valudate again, check that - second full backup and his firts page backups are OK, - third page should be ORPHAN - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - self.backup_node(backup_dir, 'node', node) - self.backup_node(backup_dir, 'node', node, backup_type='page') - self.backup_node(backup_dir, 'node', node, backup_type='page') - self.backup_node(backup_dir, 'node', node, backup_type='page') - - backup_id = self.backup_node(backup_dir, 'node', node) - self.backup_node(backup_dir, 'node', node, backup_type='page') - backup_id_page = self.backup_node( - backup_dir, 'node', node, backup_type='page') - self.backup_node(backup_dir, 'node', node, backup_type='page') - self.backup_node(backup_dir, 'node', node, backup_type='page') - - file = os.path.join( - backup_dir, 'backups', 'node', - backup_id, 'database', 'postgresql.auto.conf') - - file_new = os.path.join(backup_dir, 'postgresql.auto.conf') - os.rename(file, file_new) - - try: - self.validate_pb(backup_dir, options=["-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because of data file dissapearance.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - 'Validating backup {0}'.format(backup_id), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'WARNING: Backup {0} data files are corrupted'.format( - backup_id), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'WARNING: Backup {0} is orphaned because his parent {1} has status: CORRUPT'.format( - self.show_pb(backup_dir, 'node')[5]['id'], backup_id), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertTrue(self.show_pb(backup_dir, 'node')[8]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[7]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'CORRUPT') - self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') - - # Full backup is fixed - os.rename(file_new, file) - - # break PAGE - old_directory = os.path.join( - backup_dir, 'backups', 'node', backup_id_page) - new_directory = os.path.join(backup_dir, backup_id_page) - os.rename(old_directory, new_directory) - - try: - self.validate_pb(backup_dir, options=["-j", "4"]) - except ProbackupException as e: - self.assertIn( - 'WARNING: Some backups are not valid', e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertIn( - 'WARNING: Backup {0} has missing parent {1}'.format( - self.show_pb(backup_dir, 'node')[7]['id'], - backup_id_page), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertIn( - 'WARNING: Backup {0} has missing parent {1}'.format( - self.show_pb(backup_dir, 'node')[6]['id'], - backup_id_page), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertIn( - 'WARNING: Backup {0} has parent {1} with status: CORRUPT'.format( - self.show_pb(backup_dir, 'node')[5]['id'], backup_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertTrue(self.show_pb(backup_dir, 'node')[7]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'ORPHAN') - # missing backup is here - self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') - - # validate should be idempotent - user running validate - # second time must be provided with ID of missing backup - - try: - self.validate_pb(backup_dir, options=["-j", "4"]) - except ProbackupException as e: - self.assertIn( - 'WARNING: Some backups are not valid', e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertIn( - 'WARNING: Backup {0} has missing parent {1}'.format( - self.show_pb(backup_dir, 'node')[7]['id'], - backup_id_page), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertIn( - 'WARNING: Backup {0} has missing parent {1}'.format( - self.show_pb(backup_dir, 'node')[6]['id'], - backup_id_page), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertTrue(self.show_pb(backup_dir, 'node')[7]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'ORPHAN') - # missing backup is here - self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') - - # fix missing PAGE backup - os.rename(new_directory, old_directory) - # exit(1) - - self.assertTrue(self.show_pb(backup_dir, 'node')[8]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[7]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') - - output = self.validate_pb(backup_dir, options=["-j", "4"]) - - self.assertIn( - 'INFO: All backups are valid', - output, - '\n Unexpected Error Message: {0}\n'.format( - repr(output))) - - self.assertIn( - 'WARNING: Backup {0} has parent {1} with status: ORPHAN'.format( - self.show_pb(backup_dir, 'node')[8]['id'], - self.show_pb(backup_dir, 'node')[6]['id']), - output, - '\n Unexpected Error Message: {0}\n'.format( - repr(output))) - - self.assertIn( - 'WARNING: Backup {0} has parent {1} with status: ORPHAN'.format( - self.show_pb(backup_dir, 'node')[7]['id'], - self.show_pb(backup_dir, 'node')[6]['id']), - output, - '\n Unexpected Error Message: {0}\n'.format( - repr(output))) - - self.assertIn( - 'Revalidating backup {0}'.format( - self.show_pb(backup_dir, 'node')[6]['id']), - output, - '\n Unexpected Error Message: {0}\n'.format( - repr(output))) - - self.assertIn( - 'Revalidating backup {0}'.format( - self.show_pb(backup_dir, 'node')[7]['id']), - output, - '\n Unexpected Error Message: {0}\n'.format( - repr(output))) - - self.assertIn( - 'Revalidating backup {0}'.format( - self.show_pb(backup_dir, 'node')[8]['id']), - output, - '\n Unexpected Error Message: {0}\n'.format( - repr(output))) - - self.assertTrue(self.show_pb(backup_dir, 'node')[8]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[7]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') - - def test_file_size_corruption_no_validate(self): - - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - # initdb_params=['--data-checksums'], - ) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - - node.slow_start() - - node.safe_psql( - "postgres", - "create table t_heap as select 1 as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,1000) i") - node.safe_psql( - "postgres", - "CHECKPOINT;") - - heap_path = node.safe_psql( - "postgres", - "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip() - heap_size = node.safe_psql( - "postgres", - "select pg_relation_size('t_heap')") - - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type="full", - options=["-j", "4"], asynchronous=False, gdb=False) - - node.stop() - node.cleanup() - - # Let`s do file corruption - with open( - os.path.join( - backup_dir, "backups", 'node', backup_id, - "database", heap_path), "rb+", 0) as f: - f.truncate(int(heap_size) - 4096) - f.flush() - f.close - - node.cleanup() - - try: - self.restore_node( - backup_dir, 'node', node, - options=["--no-validate"]) - except ProbackupException as e: - self.assertTrue( - "ERROR: Backup files restoring failed" in e.message, - repr(e.message)) - - # @unittest.skip("skip") - def test_validate_specific_backup_with_missing_backup(self): - """ - PAGE3_2 - PAGE3_1 - FULL3 - PAGE2_5 - PAGE2_4 <- validate - PAGE2_3 - PAGE2_2 <- missing - PAGE2_1 - FULL2 - PAGE1_2 - PAGE1_1 - FULL1 - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # CHAIN1 - self.backup_node(backup_dir, 'node', node) - self.backup_node(backup_dir, 'node', node, backup_type='page') - self.backup_node(backup_dir, 'node', node, backup_type='page') - - # CHAIN2 - self.backup_node(backup_dir, 'node', node) - self.backup_node(backup_dir, 'node', node, backup_type='page') - missing_id = self.backup_node( - backup_dir, 'node', node, backup_type='page') - self.backup_node(backup_dir, 'node', node, backup_type='page') - validate_id = self.backup_node( - backup_dir, 'node', node, backup_type='page') - self.backup_node(backup_dir, 'node', node, backup_type='page') - - # CHAIN3 - self.backup_node(backup_dir, 'node', node) - self.backup_node(backup_dir, 'node', node, backup_type='page') - self.backup_node(backup_dir, 'node', node, backup_type='page') - - old_directory = os.path.join(backup_dir, 'backups', 'node', missing_id) - new_directory = os.path.join(backup_dir, missing_id) - - os.rename(old_directory, new_directory) - - try: - self.validate_pb(backup_dir, 'node', validate_id, - options=["-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because of backup dissapearance.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - 'WARNING: Backup {0} is orphaned because his parent {1} is missing'.format( - self.show_pb(backup_dir, 'node')[7]['id'], missing_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'WARNING: Backup {0} is orphaned because his parent {1} is missing'.format( - self.show_pb(backup_dir, 'node')[6]['id'], missing_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'WARNING: Backup {0} is orphaned because his parent {1} is missing'.format( - self.show_pb(backup_dir, 'node')[5]['id'], missing_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertTrue(self.show_pb(backup_dir, 'node')[10]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[9]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[8]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[7]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'ORPHAN') - # missing backup - self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') - - try: - self.validate_pb(backup_dir, 'node', validate_id, - options=["-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because of backup dissapearance.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - 'WARNING: Backup {0} has missing parent {1}'.format( - self.show_pb(backup_dir, 'node')[7]['id'], missing_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'WARNING: Backup {0} has missing parent {1}'.format( - self.show_pb(backup_dir, 'node')[6]['id'], missing_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'WARNING: Backup {0} has missing parent {1}'.format( - self.show_pb(backup_dir, 'node')[5]['id'], missing_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - os.rename(new_directory, old_directory) - - # Revalidate backup chain - self.validate_pb(backup_dir, 'node', validate_id, options=["-j", "4"]) - - self.assertTrue(self.show_pb(backup_dir, 'node')[11]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[10]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[9]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[8]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[7]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') - - # @unittest.skip("skip") - def test_validate_specific_backup_with_missing_backup_1(self): - """ - PAGE3_2 - PAGE3_1 - FULL3 - PAGE2_5 - PAGE2_4 <- validate - PAGE2_3 - PAGE2_2 <- missing - PAGE2_1 - FULL2 <- missing - PAGE1_2 - PAGE1_1 - FULL1 - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # CHAIN1 - self.backup_node(backup_dir, 'node', node) - self.backup_node(backup_dir, 'node', node, backup_type='page') - self.backup_node(backup_dir, 'node', node, backup_type='page') - - # CHAIN2 - missing_full_id = self.backup_node(backup_dir, 'node', node) - self.backup_node(backup_dir, 'node', node, backup_type='page') - missing_page_id = self.backup_node( - backup_dir, 'node', node, backup_type='page') - self.backup_node(backup_dir, 'node', node, backup_type='page') - validate_id = self.backup_node( - backup_dir, 'node', node, backup_type='page') - self.backup_node(backup_dir, 'node', node, backup_type='page') - - # CHAIN3 - self.backup_node(backup_dir, 'node', node) - self.backup_node(backup_dir, 'node', node, backup_type='page') - self.backup_node(backup_dir, 'node', node, backup_type='page') - - page_old_directory = os.path.join( - backup_dir, 'backups', 'node', missing_page_id) - page_new_directory = os.path.join(backup_dir, missing_page_id) - os.rename(page_old_directory, page_new_directory) - - full_old_directory = os.path.join( - backup_dir, 'backups', 'node', missing_full_id) - full_new_directory = os.path.join(backup_dir, missing_full_id) - os.rename(full_old_directory, full_new_directory) - - try: - self.validate_pb(backup_dir, 'node', validate_id, - options=["-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because of backup dissapearance.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - 'WARNING: Backup {0} is orphaned because his parent {1} is missing'.format( - self.show_pb(backup_dir, 'node')[6]['id'], missing_page_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'WARNING: Backup {0} is orphaned because his parent {1} is missing'.format( - self.show_pb(backup_dir, 'node')[5]['id'], missing_page_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'WARNING: Backup {0} is orphaned because his parent {1} is missing'.format( - self.show_pb(backup_dir, 'node')[4]['id'], missing_page_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertTrue(self.show_pb(backup_dir, 'node')[9]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[8]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[7]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'ORPHAN') - # PAGE2_1 - self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'OK') # <- SHit - # FULL2 - self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') - - os.rename(page_new_directory, page_old_directory) - os.rename(full_new_directory, full_old_directory) - - # Revalidate backup chain - self.validate_pb(backup_dir, 'node', validate_id, options=["-j", "4"]) - - self.assertTrue(self.show_pb(backup_dir, 'node')[11]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[10]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[9]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[8]['status'] == 'ORPHAN') # <- Fail - self.assertTrue(self.show_pb(backup_dir, 'node')[7]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') - - # @unittest.skip("skip") - def test_validate_with_missing_backup_1(self): - """ - PAGE3_2 - PAGE3_1 - FULL3 - PAGE2_5 - PAGE2_4 <- validate - PAGE2_3 - PAGE2_2 <- missing - PAGE2_1 - FULL2 <- missing - PAGE1_2 - PAGE1_1 - FULL1 - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # CHAIN1 - self.backup_node(backup_dir, 'node', node) - self.backup_node(backup_dir, 'node', node, backup_type='page') - self.backup_node(backup_dir, 'node', node, backup_type='page') - - # CHAIN2 - missing_full_id = self.backup_node(backup_dir, 'node', node) - self.backup_node(backup_dir, 'node', node, backup_type='page') - missing_page_id = self.backup_node( - backup_dir, 'node', node, backup_type='page') - self.backup_node(backup_dir, 'node', node, backup_type='page') - validate_id = self.backup_node( - backup_dir, 'node', node, backup_type='page') - self.backup_node(backup_dir, 'node', node, backup_type='page') - - # CHAIN3 - self.backup_node(backup_dir, 'node', node) - self.backup_node(backup_dir, 'node', node, backup_type='page') - self.backup_node(backup_dir, 'node', node, backup_type='page') - - # Break PAGE - page_old_directory = os.path.join( - backup_dir, 'backups', 'node', missing_page_id) - page_new_directory = os.path.join(backup_dir, missing_page_id) - os.rename(page_old_directory, page_new_directory) - - # Break FULL - full_old_directory = os.path.join( - backup_dir, 'backups', 'node', missing_full_id) - full_new_directory = os.path.join(backup_dir, missing_full_id) - os.rename(full_old_directory, full_new_directory) - - try: - self.validate_pb(backup_dir, 'node', validate_id, - options=["-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because of backup dissapearance.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - 'WARNING: Backup {0} is orphaned because his parent {1} is missing'.format( - self.show_pb(backup_dir, 'node')[6]['id'], missing_page_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'WARNING: Backup {0} is orphaned because his parent {1} is missing'.format( - self.show_pb(backup_dir, 'node')[5]['id'], missing_page_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'WARNING: Backup {0} is orphaned because his parent {1} is missing'.format( - self.show_pb(backup_dir, 'node')[4]['id'], missing_page_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertTrue(self.show_pb(backup_dir, 'node')[9]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[8]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[7]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'ORPHAN') - # PAGE2_2 is missing - self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'OK') - # FULL1 - is missing - self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') - - os.rename(page_new_directory, page_old_directory) - - # Revalidate backup chain - try: - self.validate_pb(backup_dir, 'node', validate_id, - options=["-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because of backup dissapearance.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - 'WARNING: Backup {0} has status: ORPHAN'.format( - validate_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'WARNING: Backup {0} has missing parent {1}'.format( - self.show_pb(backup_dir, 'node')[7]['id'], - missing_full_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'WARNING: Backup {0} has missing parent {1}'.format( - self.show_pb(backup_dir, 'node')[6]['id'], - missing_full_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'WARNING: Backup {0} has missing parent {1}'.format( - self.show_pb(backup_dir, 'node')[5]['id'], - missing_full_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'WARNING: Backup {0} is orphaned because his parent {1} is missing'.format( - self.show_pb(backup_dir, 'node')[4]['id'], - missing_full_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'WARNING: Backup {0} is orphaned because his parent {1} is missing'.format( - self.show_pb(backup_dir, 'node')[3]['id'], - missing_full_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertTrue(self.show_pb(backup_dir, 'node')[10]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[9]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[8]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[7]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'ORPHAN') - # FULL1 - is missing - self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') - - os.rename(full_new_directory, full_old_directory) - - # Revalidate chain - self.validate_pb(backup_dir, 'node', validate_id, options=["-j", "4"]) - - self.assertTrue(self.show_pb(backup_dir, 'node')[11]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[10]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[9]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[8]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[7]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') - - # @unittest.skip("skip") - def test_validate_with_missing_backup_2(self): - """ - PAGE3_2 - PAGE3_1 - FULL3 - PAGE2_5 - PAGE2_4 - PAGE2_3 - PAGE2_2 <- missing - PAGE2_1 - FULL2 <- missing - PAGE1_2 - PAGE1_1 - FULL1 - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # CHAIN1 - self.backup_node(backup_dir, 'node', node) - self.backup_node(backup_dir, 'node', node, backup_type='page') - self.backup_node(backup_dir, 'node', node, backup_type='page') - - # CHAIN2 - missing_full_id = self.backup_node(backup_dir, 'node', node) - self.backup_node(backup_dir, 'node', node, backup_type='page') - missing_page_id = self.backup_node( - backup_dir, 'node', node, backup_type='page') - self.backup_node(backup_dir, 'node', node, backup_type='page') - self.backup_node( - backup_dir, 'node', node, backup_type='page') - self.backup_node(backup_dir, 'node', node, backup_type='page') - - # CHAIN3 - self.backup_node(backup_dir, 'node', node) - self.backup_node(backup_dir, 'node', node, backup_type='page') - self.backup_node(backup_dir, 'node', node, backup_type='page') - - page_old_directory = os.path.join(backup_dir, 'backups', 'node', missing_page_id) - page_new_directory = os.path.join(backup_dir, missing_page_id) - os.rename(page_old_directory, page_new_directory) - - full_old_directory = os.path.join(backup_dir, 'backups', 'node', missing_full_id) - full_new_directory = os.path.join(backup_dir, missing_full_id) - os.rename(full_old_directory, full_new_directory) - - try: - self.validate_pb(backup_dir, 'node', options=["-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because of backup dissapearance.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - 'WARNING: Backup {0} is orphaned because his parent {1} is missing'.format( - self.show_pb(backup_dir, 'node')[6]['id'], missing_page_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'WARNING: Backup {0} is orphaned because his parent {1} is missing'.format( - self.show_pb(backup_dir, 'node')[5]['id'], missing_page_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'WARNING: Backup {0} is orphaned because his parent {1} is missing'.format( - self.show_pb(backup_dir, 'node')[4]['id'], missing_page_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'WARNING: Backup {0} is orphaned because his parent {1} is missing'.format( - self.show_pb(backup_dir, 'node')[3]['id'], missing_full_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertTrue(self.show_pb(backup_dir, 'node')[9]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[8]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[7]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'ORPHAN') - # PAGE2_2 is missing - self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'ORPHAN') - # FULL1 - is missing - self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') - - os.rename(page_new_directory, page_old_directory) - - # Revalidate backup chain - try: - self.validate_pb(backup_dir, 'node', options=["-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because of backup dissapearance.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - 'WARNING: Backup {0} has missing parent {1}'.format( - self.show_pb(backup_dir, 'node')[7]['id'], missing_full_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'WARNING: Backup {0} has missing parent {1}'.format( - self.show_pb(backup_dir, 'node')[6]['id'], missing_full_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'WARNING: Backup {0} has missing parent {1}'.format( - self.show_pb(backup_dir, 'node')[5]['id'], missing_full_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'WARNING: Backup {0} is orphaned because his parent {1} is missing'.format( - self.show_pb(backup_dir, 'node')[4]['id'], missing_full_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'WARNING: Backup {0} has missing parent {1}'.format( - self.show_pb(backup_dir, 'node')[3]['id'], missing_full_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertTrue(self.show_pb(backup_dir, 'node')[10]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[9]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[8]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[7]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'ORPHAN') - # FULL1 - is missing - self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') - - # @unittest.skip("skip") - def test_corrupt_pg_control_via_resetxlog(self): - """ PGPRO-2096 """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - backup_id = self.backup_node(backup_dir, 'node', node) - - if self.get_version(node) < 100000: - pg_resetxlog_path = self.get_bin_path('pg_resetxlog') - wal_dir = 'pg_xlog' - else: - pg_resetxlog_path = self.get_bin_path('pg_resetwal') - wal_dir = 'pg_wal' - - os.mkdir( - os.path.join( - backup_dir, 'backups', 'node', backup_id, 'database', wal_dir, 'archive_status')) - - pg_control_path = os.path.join( - backup_dir, 'backups', 'node', - backup_id, 'database', 'global', 'pg_control') - - md5_before = hashlib.md5( - open(pg_control_path, 'rb').read()).hexdigest() - - self.run_binary( - [ - pg_resetxlog_path, - '-D', - os.path.join(backup_dir, 'backups', 'node', backup_id, 'database'), - '-o 42', - '-f' - ], - asynchronous=False) - - md5_after = hashlib.md5( - open(pg_control_path, 'rb').read()).hexdigest() - - if self.verbose: - print('\n MD5 BEFORE resetxlog: {0}\n MD5 AFTER resetxlog: {1}'.format( - md5_before, md5_after)) - - # Validate backup - try: - self.validate_pb(backup_dir, 'node', options=["-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because of pg_control change.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - 'data files are corrupted', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - # @unittest.skip("skip") - def test_validation_after_backup(self): - """""" - self._check_gdb_flag_or_skip_test() - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - # FULL backup - gdb = self.backup_node( - backup_dir, 'node', node, gdb=True, options=['--stream']) - - gdb.set_breakpoint('pgBackupValidate') - gdb.run_until_break() - - backup_id = self.show_pb(backup_dir, 'node')[0]['id'] - - file = os.path.join( - backup_dir, "backups", "node", backup_id, - "database", "postgresql.conf") - os.remove(file) - - gdb.continue_execution_until_exit() - - self.assertEqual( - 'CORRUPT', - self.show_pb(backup_dir, 'node', backup_id)['status'], - 'Backup STATUS should be "ERROR"') - - # @unittest.expectedFailure - # @unittest.skip("skip") - def test_validate_corrupt_tablespace_map(self): - """ - Check that corruption in tablespace_map is detected - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - self.create_tblspace_in_node(node, 'external_dir') - - node.safe_psql( - 'postgres', - 'CREATE TABLE t_heap(a int) TABLESPACE "external_dir"') - - # FULL backup - backup_id = self.backup_node( - backup_dir, 'node', node, options=['--stream']) - - tablespace_map = os.path.join( - backup_dir, 'backups', 'node', - backup_id, 'database', 'tablespace_map') - - # Corrupt tablespace_map file in FULL backup - with open(tablespace_map, "rb+", 0) as f: - f.seek(84) - f.write(b"blah") - f.flush() - f.close - - try: - self.validate_pb(backup_dir, 'node', backup_id=backup_id) - self.assertEqual( - 1, 0, - "Expecting Error because tablespace_map is corrupted.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - 'WARNING: Invalid CRC of backup file', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - #TODO fix the test - @unittest.expectedFailure - # @unittest.skip("skip") - def test_validate_target_lsn(self): - """ - Check validation to specific LSN - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # FULL backup - self.backup_node(backup_dir, 'node', node) - - node.safe_psql( - "postgres", - "create table t_heap as select 1 as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,10000) i") - - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - node_restored.cleanup() - - self.restore_node(backup_dir, 'node', node_restored) - - self.set_auto_conf( - node_restored, {'port': node_restored.port}) - - node_restored.slow_start() - - self.switch_wal_segment(node) - - backup_id = self.backup_node( - backup_dir, 'node', node_restored, - data_dir=node_restored.data_dir) - - target_lsn = self.show_pb(backup_dir, 'node')[1]['stop-lsn'] - - self.delete_pb(backup_dir, 'node', backup_id) - - self.validate_pb( - backup_dir, 'node', - options=[ - '--recovery-target-timeline=2', - '--recovery-target-lsn={0}'.format(target_lsn)]) - - @unittest.skip("skip") - def test_partial_validate_empty_and_mangled_database_map(self): - """ - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - - node.slow_start() - - # create databases - for i in range(1, 10, 1): - node.safe_psql( - 'postgres', - 'CREATE database db{0}'.format(i)) - - # FULL backup with database_map - backup_id = self.backup_node( - backup_dir, 'node', node, options=['--stream']) - pgdata = self.pgdata_content(node.data_dir) - - # truncate database_map - path = os.path.join( - backup_dir, 'backups', 'node', - backup_id, 'database', 'database_map') - with open(path, "w") as f: - f.close() - - try: - self.validate_pb( - backup_dir, 'node', - options=["--db-include=db1"]) - self.assertEqual( - 1, 0, - "Expecting Error because database_map is empty.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - "WARNING: Backup {0} data files are corrupted".format( - backup_id), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - # mangle database_map - with open(path, "w") as f: - f.write("42") - f.close() - - try: - self.validate_pb( - backup_dir, 'node', - options=["--db-include=db1"]) - self.assertEqual( - 1, 0, - "Expecting Error because database_map is empty.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - "WARNING: Backup {0} data files are corrupted".format( - backup_id), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - @unittest.skip("skip") - def test_partial_validate_exclude(self): - """""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - for i in range(1, 10, 1): - node.safe_psql( - 'postgres', - 'CREATE database db{0}'.format(i)) - - # FULL backup - backup_id = self.backup_node(backup_dir, 'node', node) - - try: - self.validate_pb( - backup_dir, 'node', - options=[ - "--db-include=db1", - "--db-exclude=db2"]) - self.assertEqual( - 1, 0, - "Expecting Error because of 'db-exclude' and 'db-include'.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: You cannot specify '--db-include' " - "and '--db-exclude' together", e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - try: - self.validate_pb( - backup_dir, 'node', - options=[ - "--db-exclude=db1", - "--db-exclude=db5", - "--log-level-console=verbose"]) - self.assertEqual( - 1, 0, - "Expecting Error because of missing backup ID.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: You must specify parameter (-i, --backup-id) for partial validation", - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - output = self.validate_pb( - backup_dir, 'node', backup_id, - options=[ - "--db-exclude=db1", - "--db-exclude=db5", - "--log-level-console=verbose"]) - - self.assertIn( - "VERBOSE: Skip file validation due to partial restore", output) - - @unittest.skip("skip") - def test_partial_validate_include(self): - """ - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - for i in range(1, 10, 1): - node.safe_psql( - 'postgres', - 'CREATE database db{0}'.format(i)) - - # FULL backup - backup_id = self.backup_node(backup_dir, 'node', node) - - try: - self.validate_pb( - backup_dir, 'node', - options=[ - "--db-include=db1", - "--db-exclude=db2"]) - self.assertEqual( - 1, 0, - "Expecting Error because of 'db-exclude' and 'db-include'.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: You cannot specify '--db-include' " - "and '--db-exclude' together", e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - output = self.validate_pb( - backup_dir, 'node', backup_id, - options=[ - "--db-include=db1", - "--db-include=db5", - "--db-include=postgres", - "--log-level-console=verbose"]) - - self.assertIn( - "VERBOSE: Skip file validation due to partial restore", output) - - output = self.validate_pb( - backup_dir, 'node', backup_id, - options=["--log-level-console=verbose"]) - - self.assertNotIn( - "VERBOSE: Skip file validation due to partial restore", output) - - # @unittest.skip("skip") - def test_not_validate_diffenent_pg_version(self): - """Do not validate backup, if binary is compiled with different PG version""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - backup_id = self.backup_node(backup_dir, 'node', node) - - control_file = os.path.join( - backup_dir, "backups", "node", backup_id, - "backup.control") - - pg_version = node.major_version - - if pg_version.is_integer(): - pg_version = int(pg_version) - - fake_new_pg_version = pg_version + 1 - - with open(control_file, 'r') as f: - data = f.read(); - - data = data.replace( - "server-version = {0}".format(str(pg_version)), - "server-version = {0}".format(str(fake_new_pg_version))) - - with open(control_file, 'w') as f: - f.write(data); - - try: - self.validate_pb(backup_dir) - self.assertEqual( - 1, 0, - "Expecting Error because validation is forbidden if server version of backup " - "is different from the server version of pg_probackup.\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: Backup {0} has server version".format(backup_id), - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - # @unittest.expectedFailure - # @unittest.skip("skip") - def test_validate_corrupt_page_header_map(self): - """ - Check that corruption in page_header_map is detected - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - ok_1 = self.backup_node(backup_dir, 'node', node, options=['--stream']) - - # FULL backup - backup_id = self.backup_node( - backup_dir, 'node', node, options=['--stream']) - - ok_2 = self.backup_node(backup_dir, 'node', node, options=['--stream']) - - page_header_map = os.path.join( - backup_dir, 'backups', 'node', backup_id, 'page_header_map') - - # Corrupt tablespace_map file in FULL backup - with open(page_header_map, "rb+", 0) as f: - f.seek(42) - f.write(b"blah") - f.flush() - f.close - - try: - self.validate_pb(backup_dir, 'node', backup_id=backup_id) - self.assertEqual( - 1, 0, - "Expecting Error because page_header is corrupted.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertRegex( - e.message, - r'WARNING: An error occured during metadata decompression for file "[\w/]+": (data|buffer) error', - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertIn("Backup {0} is corrupt".format(backup_id), e.message) - - try: - self.validate_pb(backup_dir) - self.assertEqual( - 1, 0, - "Expecting Error because page_header is corrupted.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'WARNING: An error occured during metadata decompression' in e.message and - 'data error' in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertIn("INFO: Backup {0} data files are valid".format(ok_1), e.message) - self.assertIn("WARNING: Backup {0} data files are corrupted".format(backup_id), e.message) - self.assertIn("INFO: Backup {0} data files are valid".format(ok_2), e.message) - - self.assertIn("WARNING: Some backups are not valid", e.message) - - # @unittest.expectedFailure - # @unittest.skip("skip") - def test_validate_truncated_page_header_map(self): - """ - Check that corruption in page_header_map is detected - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - ok_1 = self.backup_node(backup_dir, 'node', node, options=['--stream']) - - # FULL backup - backup_id = self.backup_node( - backup_dir, 'node', node, options=['--stream']) - - ok_2 = self.backup_node(backup_dir, 'node', node, options=['--stream']) - - page_header_map = os.path.join( - backup_dir, 'backups', 'node', backup_id, 'page_header_map') - - # truncate page_header_map file - with open(page_header_map, "rb+", 0) as f: - f.truncate(121) - f.flush() - f.close - - try: - self.validate_pb(backup_dir, 'node', backup_id=backup_id) - self.assertEqual( - 1, 0, - "Expecting Error because page_header is corrupted.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Backup {0} is corrupt'.format(backup_id), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - try: - self.validate_pb(backup_dir) - self.assertEqual( - 1, 0, - "Expecting Error because page_header is corrupted.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn("INFO: Backup {0} data files are valid".format(ok_1), e.message) - self.assertIn("WARNING: Backup {0} data files are corrupted".format(backup_id), e.message) - self.assertIn("INFO: Backup {0} data files are valid".format(ok_2), e.message) - self.assertIn("WARNING: Some backups are not valid", e.message) - - # @unittest.expectedFailure - # @unittest.skip("skip") - def test_validate_missing_page_header_map(self): - """ - Check that corruption in page_header_map is detected - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - ok_1 = self.backup_node(backup_dir, 'node', node, options=['--stream']) - - # FULL backup - backup_id = self.backup_node( - backup_dir, 'node', node, options=['--stream']) - - ok_2 = self.backup_node(backup_dir, 'node', node, options=['--stream']) - - page_header_map = os.path.join( - backup_dir, 'backups', 'node', backup_id, 'page_header_map') - - # unlink page_header_map file - os.remove(page_header_map) - - try: - self.validate_pb(backup_dir, 'node', backup_id=backup_id) - self.assertEqual( - 1, 0, - "Expecting Error because page_header is corrupted.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Backup {0} is corrupt'.format(backup_id), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - try: - self.validate_pb(backup_dir) - self.assertEqual( - 1, 0, - "Expecting Error because page_header is corrupted.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn("INFO: Backup {0} data files are valid".format(ok_1), e.message) - self.assertIn("WARNING: Backup {0} data files are corrupted".format(backup_id), e.message) - self.assertIn("INFO: Backup {0} data files are valid".format(ok_2), e.message) - self.assertIn("WARNING: Some backups are not valid", e.message) - - # @unittest.expectedFailure - # @unittest.skip("skip") - def test_no_validate_tablespace_map(self): - """ - Check that --no-validate is propagated to tablespace_map - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - self.create_tblspace_in_node(node, 'external_dir') - - node.safe_psql( - 'postgres', - 'CREATE TABLE t_heap(a int) TABLESPACE "external_dir"') - - tblspace_new = self.get_tblspace_path(node, 'external_dir_new') - - oid = node.safe_psql( - 'postgres', - "select oid from pg_tablespace where spcname = 'external_dir'").decode('utf-8').rstrip() - - # FULL backup - backup_id = self.backup_node( - backup_dir, 'node', node, options=['--stream']) - - pgdata = self.pgdata_content(node.data_dir) - - tablespace_map = os.path.join( - backup_dir, 'backups', 'node', - backup_id, 'database', 'tablespace_map') - - # overwrite tablespace_map file - with open(tablespace_map, "w") as f: - f.write("{0} {1}".format(oid, tblspace_new)) - f.close - - node.cleanup() - - self.restore_node(backup_dir, 'node', node, options=['--no-validate']) - - pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # check that tablespace restore as symlink - tablespace_link = os.path.join(node.data_dir, 'pg_tblspc', oid) - self.assertTrue( - os.path.islink(tablespace_link), - "'%s' is not a symlink" % tablespace_link) - - self.assertEqual( - os.readlink(tablespace_link), - tblspace_new, - "Symlink '{0}' do not points to '{1}'".format(tablespace_link, tblspace_new)) - -# validate empty backup list -# page from future during validate -# page from future during backup - -# corrupt block, so file become unaligned: -# 712 Assert(header.compressed_size <= BLCKSZ); -# 713 -# 714 read_len = fread(compressed_page.data, 1, -# 715 MAXALIGN(header.compressed_size), in); -# 716 if (read_len != MAXALIGN(header.compressed_size)) -# -> 717 elog(ERROR, "cannot read block %u of \"%s\" read %lu of %d", -# 718 blknum, file->path, read_len, header.compressed_size); From 08557855e2df28e07584725114aade6993ab004f Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 24 Nov 2022 12:29:44 +0300 Subject: [PATCH 395/525] fix error During refactoring base36enc it were mistakenly changed from current_backup->start_time to current_backup->parent_backup --- src/validate.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/validate.c b/src/validate.c index b89b67b84..9372b082c 100644 --- a/src/validate.c +++ b/src/validate.c @@ -508,7 +508,7 @@ do_validate_instance(InstanceState *instanceState) /* determine missing backup ID */ parent_backup_id = base36enc(tmp_backup->parent_backup); - current_backup_id = base36enc(current_backup->parent_backup); + current_backup_id = backup_id_of(current_backup); corrupted_backup_found = true; /* orphanize current_backup */ From 8488b289ba220440c53bd7dfd8bc682be8df953e Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 24 Nov 2022 14:23:45 +0300 Subject: [PATCH 396/525] fix test_backup_via_unprivileged_user with ptrack enabled It is garbage remained from old ptrack version support. --- tests/auth_test.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/tests/auth_test.py b/tests/auth_test.py index 7e0b6fcfb..525dee258 100644 --- a/tests/auth_test.py +++ b/tests/auth_test.py @@ -148,8 +148,6 @@ def test_backup_via_unprivileged_user(self): node.safe_psql( "test1", "create table t1 as select generate_series(0,100)") - if self.ptrack: - self.set_auto_conf(node, {'ptrack_enable': 'on'}) node.stop() node.slow_start() From e674202eac14e3ef65a63c4f15ba6ad0969bef04 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 24 Nov 2022 14:42:12 +0300 Subject: [PATCH 397/525] test_backup_via_unprivileged_user - test with ptrack correctly --- tests/auth_test.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/tests/auth_test.py b/tests/auth_test.py index 525dee258..d0be9f344 100644 --- a/tests/auth_test.py +++ b/tests/auth_test.py @@ -33,6 +33,7 @@ def test_backup_via_unprivileged_user(self): node = self.make_simple_node( base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, + ptrack_enable=self.ptrack, initdb_params=['--data-checksums']) backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') @@ -41,6 +42,11 @@ def test_backup_via_unprivileged_user(self): self.set_archiving(backup_dir, 'node', node) node.slow_start() + if self.ptrack: + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") + node.safe_psql("postgres", "CREATE ROLE backup with LOGIN") try: @@ -160,9 +166,10 @@ def test_backup_via_unprivileged_user(self): backup_dir, 'node', node, options=['-U', 'backup']) # PTRACK -# self.backup_node( -# backup_dir, 'node', node, -# backup_type='ptrack', options=['-U', 'backup']) + if self.ptrack: + self.backup_node( + backup_dir, 'node', node, + backup_type='ptrack', options=['-U', 'backup']) class AuthTest(unittest.TestCase): From 73cce507c2aeafc20437b2b0c69328120f8a5d15 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Fri, 25 Nov 2022 03:37:19 +0300 Subject: [PATCH 398/525] [PBCKP-358] fix CatchupTest.test_unclean_(delta|ptrack)_catchup and BugTest.test_minrecpoint_on_replica as well Tests were broken with introduction of "startness" handling in 9924ab014 [PBCKP-304] extended testgres.PosgresNode to ... since tests uses os.kill directly. --- tests/catchup.py | 4 ++-- tests/helpers/ptrack_helpers.py | 9 +++++++++ tests/pgpro2068.py | 3 +-- 3 files changed, 12 insertions(+), 4 deletions(-) diff --git a/tests/catchup.py b/tests/catchup.py index 88170c807..c94a5300d 100644 --- a/tests/catchup.py +++ b/tests/catchup.py @@ -972,7 +972,7 @@ def test_unclean_delta_catchup(self): self.set_auto_conf(dst_pg, dst_options) dst_pg.slow_start() self.assertNotEqual(dst_pg.pid, 0, "Cannot detect pid of running postgres") - os.kill(dst_pg.pid, signal.SIGKILL) + dst_pg.kill() # preparation 3: make changes on master (source) src_pg.pgbench_init(scale = 10) @@ -1061,7 +1061,7 @@ def test_unclean_ptrack_catchup(self): self.set_auto_conf(dst_pg, dst_options) dst_pg.slow_start() self.assertNotEqual(dst_pg.pid, 0, "Cannot detect pid of running postgres") - os.kill(dst_pg.pid, signal.SIGKILL) + dst_pg.kill() # preparation 3: make changes on master (source) src_pg.pgbench_init(scale = 10) diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index e35f57bce..ab6bdda68 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -3,6 +3,7 @@ import gc import unittest from sys import exit, argv, version_info +import signal import subprocess import shutil import six @@ -190,6 +191,14 @@ def stop(self, *args, **kwargs): self.is_started = False return result + def kill(self, someone = None): + if self.is_started: + sig = signal.SIGKILL if os.name != 'nt' else signal.SIGBREAK + if someone == None: + os.kill(self.pid, sig) + else: + os.kill(self.auxiliary_pids[someone][0], sig) + self.is_started = False class ProbackupTest(object): # Class attributes diff --git a/tests/pgpro2068.py b/tests/pgpro2068.py index 434ce2800..da76a8815 100644 --- a/tests/pgpro2068.py +++ b/tests/pgpro2068.py @@ -85,7 +85,6 @@ def test_minrecpoint_on_replica(self): # get pids of replica background workers startup_pid = replica.auxiliary_pids[ProcessType.Startup][0] checkpointer_pid = replica.auxiliary_pids[ProcessType.Checkpointer][0] - bgwriter_pid = replica.auxiliary_pids[ProcessType.BackgroundWriter][0] # break checkpointer on UpdateLastRemovedPtr gdb_checkpointer = self.gdb_attach(checkpointer_pid) @@ -108,7 +107,7 @@ def test_minrecpoint_on_replica(self): pgbench.stdout.close() # kill someone, we need a crash - os.kill(int(bgwriter_pid), 9) + replica.kill(someone=ProcessType.BackgroundWriter) gdb_recovery._execute('detach') gdb_checkpointer._execute('detach') From b7a183081ecee1622cc6f66f59d65abded73abae Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Fri, 25 Nov 2022 06:40:07 +0300 Subject: [PATCH 399/525] [PBCKP-357] fix UnicodeDecodeError Got in some tests: ``` Traceback (most recent call last): File "pg_probackup/tests/replica.py", line 625, in test_replica_stop_lsn_null_offset gdb_checkpointer = self.gdb_attach(bgwriter_pid) File "pg_probackup/tests/helpers/ptrack_helpers.py", line 1984, in gdb_attach return GDBobj([str(pid)], self, attach=True) File "pg_probackup/tests/helpers/ptrack_helpers.py", line 2054, in __init__ line = self.get_line() File "pg_probackup/tests/helpers/ptrack_helpers.py", line 2065, in get_line line = self.proc.stdout.readline() File "/usr/lib/python3.10/codecs.py", line 322, in decode (result, consumed) = self._buffer_decode(data, self.errors, final) UnicodeDecodeError: 'utf-8' codec can't decode byte 0xd0 in position 189: invalid continuation byte ``` Fixed with `errors='replace'` --- tests/helpers/ptrack_helpers.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index ab6bdda68..a141d3f44 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -2045,7 +2045,8 @@ def __init__(self, cmd, env, attach=False): stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=0, - universal_newlines=True + text=True, + errors='replace', ) self.gdb_pid = self.proc.pid From 7eab6e346353349fedfed3891babd29590fbc647 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Fri, 25 Nov 2022 07:56:12 +0300 Subject: [PATCH 400/525] [PBCKP-358] sort directories to not skip 'base/1' because of 'base/12699' Looks like os.walk may yield dirs unsorted. This way "check if directory already here as part of larger directory" man mistakenly skip short 'base/1' as "part of" 'base/12699' --- tests/helpers/ptrack_helpers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index a141d3f44..4484d3167 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -1799,7 +1799,7 @@ def pgdata_content(self, pgdata, ignore_ptrack=True, exclude_dirs=None): ) for root, dirs, files in os.walk(pgdata, topdown=False, followlinks=True): - for directory in dirs: + for directory in sorted(dirs): directory_path = os.path.join(root, directory) directory_relpath = os.path.relpath(directory_path, pgdata) From 19a7c5b01f678cc2ef458e4fd3cd017378de79bf Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Fri, 25 Nov 2022 07:59:14 +0300 Subject: [PATCH 401/525] tests: do not read whole file at once for digest. Iterate instead. --- tests/helpers/ptrack_helpers.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index 4484d3167..b0b997616 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -1774,9 +1774,9 @@ def pgdata_content(self, pgdata, ignore_ptrack=True, exclude_dirs=None): file_relpath = os.path.relpath(file_fullpath, pgdata) directory_dict['files'][file_relpath] = {'is_datafile': False} with open(file_fullpath, 'rb') as f: - content = f.read() # truncate cfm's content's zero tail if file_relpath.endswith('.cfm'): + content = f.read() zero64 = b"\x00"*64 l = len(content) while l > 64: @@ -1785,9 +1785,14 @@ def pgdata_content(self, pgdata, ignore_ptrack=True, exclude_dirs=None): break l = s content = content[:l] - directory_dict['files'][file_relpath]['md5'] = hashlib.md5(content).hexdigest() -# directory_dict['files'][file_relpath]['md5'] = hashlib.md5( -# f = open(file_fullpath, 'rb').read()).hexdigest() + digest = hashlib.md5(content) + else: + digest = hashlib.md5() + while True: + b = f.read(64*1024) + if not b: break + digest.update(b) + directory_dict['files'][file_relpath]['md5'] = digest.hexdigest() # crappy algorithm if file.isdigit(): From 20667e959478ec0108afecec65b077fce708a7f0 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Fri, 25 Nov 2022 08:30:45 +0300 Subject: [PATCH 402/525] tests: better directory collection in pgdata_content There is no need to walk pgdata twice. We could delete parent directories instead of skipping them. --- tests/helpers/ptrack_helpers.py | 24 +++++------------------- 1 file changed, 5 insertions(+), 19 deletions(-) diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index b0b997616..dafa1e0bb 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -1803,27 +1803,13 @@ def pgdata_content(self, pgdata, ignore_ptrack=True, exclude_dirs=None): file_fullpath, size_in_pages ) - for root, dirs, files in os.walk(pgdata, topdown=False, followlinks=True): - for directory in sorted(dirs): + for directory in dirs: directory_path = os.path.join(root, directory) directory_relpath = os.path.relpath(directory_path, pgdata) - - found = False - for d in dirs_to_ignore: - if d in directory_relpath: - found = True - break - - # check if directory already here as part of larger directory - if not found: - for d in directory_dict['dirs']: - # print("OLD dir {0}".format(d)) - if directory_relpath in d: - found = True - break - - if not found: - directory_dict['dirs'][directory_relpath] = {} + parent = os.path.dirname(directory_relpath) + if parent in directory_dict['dirs']: + del directory_dict['dirs'][parent] + directory_dict['dirs'][directory_relpath] = {} # get permissions for every file and directory for file in directory_dict['dirs']: From 1617eb34ecccd8a5f408b7a4712611bb0f7681d1 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Fri, 25 Nov 2022 10:54:41 +0300 Subject: [PATCH 403/525] tests: prettify pgdata_content and compare_pgdata --- tests/helpers/ptrack_helpers.py | 234 ++++++++++++++++---------------- 1 file changed, 116 insertions(+), 118 deletions(-) diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index dafa1e0bb..fc193fba4 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -1772,7 +1772,8 @@ def pgdata_content(self, pgdata, ignore_ptrack=True, exclude_dirs=None): file_fullpath = os.path.join(root, file) file_relpath = os.path.relpath(file_fullpath, pgdata) - directory_dict['files'][file_relpath] = {'is_datafile': False} + cfile = ContentFile(file.isdigit()) + directory_dict['files'][file_relpath] = cfile with open(file_fullpath, 'rb') as f: # truncate cfm's content's zero tail if file_relpath.endswith('.cfm'): @@ -1792,14 +1793,12 @@ def pgdata_content(self, pgdata, ignore_ptrack=True, exclude_dirs=None): b = f.read(64*1024) if not b: break digest.update(b) - directory_dict['files'][file_relpath]['md5'] = digest.hexdigest() + cfile.md5 = digest.hexdigest() # crappy algorithm - if file.isdigit(): - directory_dict['files'][file_relpath]['is_datafile'] = True + if cfile.is_datafile: size_in_pages = os.path.getsize(file_fullpath)/8192 - directory_dict['files'][file_relpath][ - 'md5_per_page'] = self.get_md5_per_page_for_fork( + cfile.md5_per_page = self.get_md5_per_page_for_fork( file_fullpath, size_in_pages ) @@ -1809,18 +1808,16 @@ def pgdata_content(self, pgdata, ignore_ptrack=True, exclude_dirs=None): parent = os.path.dirname(directory_relpath) if parent in directory_dict['dirs']: del directory_dict['dirs'][parent] - directory_dict['dirs'][directory_relpath] = {} + directory_dict['dirs'][directory_relpath] = ContentDir() # get permissions for every file and directory - for file in directory_dict['dirs']: + for file, cfile in directory_dict['dirs'].items(): full_path = os.path.join(pgdata, file) - directory_dict['dirs'][file]['mode'] = os.stat( - full_path).st_mode + cfile.mode = os.stat(full_path).st_mode - for file in directory_dict['files']: + for file, cdir in directory_dict['files'].items(): full_path = os.path.join(pgdata, file) - directory_dict['files'][file]['mode'] = os.stat( - full_path).st_mode + cdir.mode = os.stat(full_path).st_mode return directory_dict @@ -1852,123 +1849,117 @@ def compare_pgdata(self, original_pgdata, restored_pgdata, exclusion_dict = dict error_message = 'Restored PGDATA is not equal to original!\n' # Compare directories - for directory in restored_pgdata['dirs']: - if directory not in original_pgdata['dirs']: + restored_dirs = set(restored_pgdata['dirs']) + original_dirs = set(restored_pgdata['dirs']) + + for directory in sorted(restored_dirs - original_dirs): + fail = True + error_message += '\nDirectory was not present' + error_message += ' in original PGDATA: {0}\n'.format( + os.path.join(restored_pgdata['pgdata'], directory)) + + for directory in sorted(original_dirs - restored_dirs): + fail = True + error_message += '\nDirectory dissappeared' + error_message += ' in restored PGDATA: {0}\n'.format( + os.path.join(restored_pgdata['pgdata'], directory)) + + for directory in sorted(original_dirs & restored_dirs): + original = original_pgdata['dirs'][directory] + restored = restored_pgdata['dirs'][directory] + if original.mode != restored.mode: fail = True - error_message += '\nDirectory was not present' - error_message += ' in original PGDATA: {0}\n'.format( - os.path.join(restored_pgdata['pgdata'], directory)) - else: - if ( - restored_pgdata['dirs'][directory]['mode'] != - original_pgdata['dirs'][directory]['mode'] - ): - fail = True - error_message += '\nDir permissions mismatch:\n' - error_message += ' Dir old: {0} Permissions: {1}\n'.format( - os.path.join(original_pgdata['pgdata'], directory), - original_pgdata['dirs'][directory]['mode']) - error_message += ' Dir new: {0} Permissions: {1}\n'.format( - os.path.join(restored_pgdata['pgdata'], directory), - restored_pgdata['dirs'][directory]['mode']) - - for directory in original_pgdata['dirs']: - if directory not in restored_pgdata['dirs']: - fail = True - error_message += '\nDirectory dissappeared' - error_message += ' in restored PGDATA: {0}\n'.format( - os.path.join(restored_pgdata['pgdata'], directory)) - - for file in restored_pgdata['files']: + error_message += '\nDir permissions mismatch:\n' + error_message += ' Dir old: {0} Permissions: {1}\n'.format( + os.path.join(original_pgdata['pgdata'], directory), + original.mode) + error_message += ' Dir new: {0} Permissions: {1}\n'.format( + os.path.join(restored_pgdata['pgdata'], directory), + restored.mode) + + restored_files = set(restored_pgdata['files']) + original_files = set(restored_pgdata['files']) + + for file in sorted(restored_files - original_files): # File is present in RESTORED PGDATA # but not present in ORIGINAL # only backup_label is allowed - if file not in original_pgdata['files']: - fail = True - error_message += '\nFile is not present' - error_message += ' in original PGDATA: {0}\n'.format( - os.path.join(restored_pgdata['pgdata'], file)) - - for file in original_pgdata['files']: - if file in restored_pgdata['files']: + fail = True + error_message += '\nFile is not present' + error_message += ' in original PGDATA: {0}\n'.format( + os.path.join(restored_pgdata['pgdata'], file)) + + for file in sorted(original_files - restored_files): + error_message += ( + '\nFile disappearance.\n ' + 'File: {0}\n').format( + os.path.join(restored_pgdata['pgdata'], file) + ) + fail = True - if ( - restored_pgdata['files'][file]['mode'] != - original_pgdata['files'][file]['mode'] - ): + for file in sorted(original_files & restored_files): + original = original_pgdata['files'][file] + restored = restored_pgdata['files'][file] + if restored.mode != original.mode: + fail = True + error_message += '\nFile permissions mismatch:\n' + error_message += ' File_old: {0} Permissions: {1:o}\n'.format( + os.path.join(original_pgdata['pgdata'], file), + original.mode) + error_message += ' File_new: {0} Permissions: {1:o}\n'.format( + os.path.join(restored_pgdata['pgdata'], file), + restored.mode) + + if original.md5 != restored.md5: + if file not in exclusion_dict: fail = True - error_message += '\nFile permissions mismatch:\n' - error_message += ' File_old: {0} Permissions: {1:o}\n'.format( + error_message += ( + '\nFile Checksum mismatch.\n' + 'File_old: {0}\nChecksum_old: {1}\n' + 'File_new: {2}\nChecksum_new: {3}\n').format( os.path.join(original_pgdata['pgdata'], file), - original_pgdata['files'][file]['mode']) - error_message += ' File_new: {0} Permissions: {1:o}\n'.format( + original.md5, os.path.join(restored_pgdata['pgdata'], file), - restored_pgdata['files'][file]['mode']) + restored.md5 + ) - if ( - original_pgdata['files'][file]['md5'] != - restored_pgdata['files'][file]['md5'] - ): - if file not in exclusion_dict: - fail = True - error_message += ( - '\nFile Checksum mismatch.\n' - 'File_old: {0}\nChecksum_old: {1}\n' - 'File_new: {2}\nChecksum_new: {3}\n').format( - os.path.join(original_pgdata['pgdata'], file), - original_pgdata['files'][file]['md5'], - os.path.join(restored_pgdata['pgdata'], file), - restored_pgdata['files'][file]['md5'] - ) + if not original.is_datafile: + continue - if original_pgdata['files'][file]['is_datafile']: - for page in original_pgdata['files'][file]['md5_per_page']: - if page not in restored_pgdata['files'][file]['md5_per_page']: - error_message += ( - '\n Page {0} dissappeared.\n ' - 'File: {1}\n').format( - page, - os.path.join( - restored_pgdata['pgdata'], - file - ) - ) - continue - - if not (file in exclusion_dict and page in exclusion_dict[file]): - if ( - original_pgdata['files'][file]['md5_per_page'][page] != - restored_pgdata['files'][file]['md5_per_page'][page] - ): - fail = True - error_message += ( - '\n Page checksum mismatch: {0}\n ' - ' PAGE Checksum_old: {1}\n ' - ' PAGE Checksum_new: {2}\n ' - ' File: {3}\n' - ).format( - page, - original_pgdata['files'][file][ - 'md5_per_page'][page], - restored_pgdata['files'][file][ - 'md5_per_page'][page], - os.path.join( - restored_pgdata['pgdata'], file) - ) - for page in restored_pgdata['files'][file]['md5_per_page']: - if page not in original_pgdata['files'][file]['md5_per_page']: - error_message += '\n Extra page {0}\n File: {1}\n'.format( - page, - os.path.join( - restored_pgdata['pgdata'], file)) + original_pages = set(original.md5_per_page) + restored_pages = set(restored.md5_per_page) - else: - error_message += ( - '\nFile disappearance.\n ' - 'File: {0}\n').format( - os.path.join(restored_pgdata['pgdata'], file) + for page in sorted(original_pages - restored_pages): + error_message += '\n Page {0} dissappeared.\n File: {1}\n'.format( + page, + os.path.join(restored_pgdata['pgdata'], file) ) - fail = True + + + for page in sorted(restored_pages - original_pages): + error_message += '\n Extra page {0}\n File: {1}\n'.format( + page, + os.path.join(restored_pgdata['pgdata'], file)) + + for page in sorted(original_pages & restored_pages): + if file in exclusion_dict and page in exclusion_dict[file]: + continue + + if original.md5_per_page[page] != restored.md5_per_page[page]: + fail = True + error_message += ( + '\n Page checksum mismatch: {0}\n ' + ' PAGE Checksum_old: {1}\n ' + ' PAGE Checksum_new: {2}\n ' + ' File: {3}\n' + ).format( + page, + original.md5_per_page[page], + restored.md5_per_page[page], + os.path.join( + restored_pgdata['pgdata'], file) + ) + self.assertFalse(fail, error_message) def gdb_attach(self, pid): @@ -2221,3 +2212,10 @@ def _execute(self, cmd, running=True): # if running and line.startswith('*running'): break return output +class ContentFile(object): + __slots__ = ('is_datafile', 'mode', 'md5', 'md5_per_page') + def __init__(self, is_datafile: bool): + self.is_datafile = is_datafile + +class ContentDir(object): + __slots__ = ('mode') \ No newline at end of file From 440441dc6fdb0562775c400e6109912ff371cd70 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Fri, 25 Nov 2022 11:26:54 +0300 Subject: [PATCH 404/525] ... fix names --- tests/helpers/ptrack_helpers.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index fc193fba4..ab164855a 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -1811,13 +1811,13 @@ def pgdata_content(self, pgdata, ignore_ptrack=True, exclude_dirs=None): directory_dict['dirs'][directory_relpath] = ContentDir() # get permissions for every file and directory - for file, cfile in directory_dict['dirs'].items(): - full_path = os.path.join(pgdata, file) - cfile.mode = os.stat(full_path).st_mode + for dir, cdir in directory_dict['dirs'].items(): + full_path = os.path.join(pgdata, dir) + cdir.mode = os.stat(full_path).st_mode - for file, cdir in directory_dict['files'].items(): + for file, cfile in directory_dict['files'].items(): full_path = os.path.join(pgdata, file) - cdir.mode = os.stat(full_path).st_mode + cfile.mode = os.stat(full_path).st_mode return directory_dict From d0494662875b9d324e15c6fa346da8e29858239b Mon Sep 17 00:00:00 2001 From: Viktoria Shepard Date: Fri, 25 Nov 2022 12:57:43 +0300 Subject: [PATCH 405/525] PBCKP-306 add '_test' to test files --- ...CVE_2018_1058.py => CVE_2018_1058_test.py} | 0 tests/__init__.py | 86 +++++++++---------- tests/{archive.py => archive_test.py} | 0 tests/{backup.py => backup_test.py} | 0 tests/{catchup.py => catchup_test.py} | 0 tests/{cfs_backup.py => cfs_backup_test.py} | 0 tests/{cfs_catchup.py => cfs_catchup_test.py} | 0 tests/{cfs_restore.py => cfs_restore_test.py} | 0 ..._backup.py => cfs_validate_backup_test.py} | 0 tests/{checkdb.py => checkdb_test.py} | 0 ...compatibility.py => compatibility_test.py} | 0 tests/{compression.py => compression_test.py} | 0 tests/{config.py => config_test.py} | 0 tests/{delete.py => delete_test.py} | 0 tests/{delta.py => delta_test.py} | 0 tests/{exclude.py => exclude_test.py} | 0 tests/{external.py => external_test.py} | 0 ...lse_positive.py => false_positive_test.py} | 0 .../{incr_restore.py => incr_restore_test.py} | 0 tests/{init.py => init_test.py} | 0 tests/{locking.py => locking_test.py} | 0 tests/{logging.py => logging_test.py} | 0 tests/{merge.py => merge_test.py} | 0 tests/{option.py => option_test.py} | 0 tests/{page.py => page_test.py} | 0 tests/{pgpro2068.py => pgpro2068_test.py} | 0 tests/{pgpro560.py => pgpro560_test.py} | 0 tests/{pgpro589.py => pgpro589_test.py} | 0 tests/{ptrack.py => ptrack_test.py} | 0 tests/{remote.py => remote_test.py} | 0 tests/{replica.py => replica_test.py} | 0 tests/{restore.py => restore_test.py} | 0 tests/{retention.py => retention_test.py} | 0 tests/{set_backup.py => set_backup_test.py} | 0 tests/{show.py => show_test.py} | 0 ...me_consuming.py => time_consuming_test.py} | 0 tests/{time_stamp.py => time_stamp_test.py} | 0 tests/{validate.py => validate_test.py} | 0 38 files changed, 43 insertions(+), 43 deletions(-) rename tests/{CVE_2018_1058.py => CVE_2018_1058_test.py} (100%) rename tests/{archive.py => archive_test.py} (100%) rename tests/{backup.py => backup_test.py} (100%) rename tests/{catchup.py => catchup_test.py} (100%) rename tests/{cfs_backup.py => cfs_backup_test.py} (100%) rename tests/{cfs_catchup.py => cfs_catchup_test.py} (100%) rename tests/{cfs_restore.py => cfs_restore_test.py} (100%) rename tests/{cfs_validate_backup.py => cfs_validate_backup_test.py} (100%) rename tests/{checkdb.py => checkdb_test.py} (100%) rename tests/{compatibility.py => compatibility_test.py} (100%) rename tests/{compression.py => compression_test.py} (100%) rename tests/{config.py => config_test.py} (100%) rename tests/{delete.py => delete_test.py} (100%) rename tests/{delta.py => delta_test.py} (100%) rename tests/{exclude.py => exclude_test.py} (100%) rename tests/{external.py => external_test.py} (100%) rename tests/{false_positive.py => false_positive_test.py} (100%) rename tests/{incr_restore.py => incr_restore_test.py} (100%) rename tests/{init.py => init_test.py} (100%) rename tests/{locking.py => locking_test.py} (100%) rename tests/{logging.py => logging_test.py} (100%) rename tests/{merge.py => merge_test.py} (100%) rename tests/{option.py => option_test.py} (100%) rename tests/{page.py => page_test.py} (100%) rename tests/{pgpro2068.py => pgpro2068_test.py} (100%) rename tests/{pgpro560.py => pgpro560_test.py} (100%) rename tests/{pgpro589.py => pgpro589_test.py} (100%) rename tests/{ptrack.py => ptrack_test.py} (100%) rename tests/{remote.py => remote_test.py} (100%) rename tests/{replica.py => replica_test.py} (100%) rename tests/{restore.py => restore_test.py} (100%) rename tests/{retention.py => retention_test.py} (100%) rename tests/{set_backup.py => set_backup_test.py} (100%) rename tests/{show.py => show_test.py} (100%) rename tests/{time_consuming.py => time_consuming_test.py} (100%) rename tests/{time_stamp.py => time_stamp_test.py} (100%) rename tests/{validate.py => validate_test.py} (100%) diff --git a/tests/CVE_2018_1058.py b/tests/CVE_2018_1058_test.py similarity index 100% rename from tests/CVE_2018_1058.py rename to tests/CVE_2018_1058_test.py diff --git a/tests/__init__.py b/tests/__init__.py index 40d5faf65..c8d2c70c3 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -1,13 +1,13 @@ import unittest import os -from . import init, merge, option, show, compatibility, \ - backup, delete, delta, restore, validate, \ - retention, pgpro560, pgpro589, pgpro2068, false_positive, replica, \ - compression, page, ptrack, archive, exclude, cfs_backup, cfs_restore, \ - cfs_validate_backup, auth_test, time_stamp, logging, \ - locking, remote, external, config, checkdb, set_backup, incr_restore, \ - catchup, CVE_2018_1058, time_consuming +from . import init_test, merge_test, option_test, show_test, compatibility_test, \ + backup_test, delete_test, delta_test, restore_test, validate_test, \ + retention_test, pgpro560_test, pgpro589_test, pgpro2068_test, false_positive_test, replica_test, \ + compression_test, page_test, ptrack_test, archive_test, exclude_test, cfs_backup_test, cfs_restore_test, \ + cfs_validate_backup_test, auth_test, time_stamp_test, logging_test, \ + locking_test, remote_test, external_test, config_test, checkdb_test, set_backup_test, incr_restore_test, \ + catchup_test, CVE_2018_1058_test, time_consuming_test def load_tests(loader, tests, pattern): @@ -19,50 +19,50 @@ def load_tests(loader, tests, pattern): if 'PG_PROBACKUP_PTRACK' in os.environ: if os.environ['PG_PROBACKUP_PTRACK'] == 'ON': - suite.addTests(loader.loadTestsFromModule(ptrack)) + suite.addTests(loader.loadTestsFromModule(ptrack_test)) # PG_PROBACKUP_LONG section for tests that are long # by design e.g. they contain loops, sleeps and so on if 'PG_PROBACKUP_LONG' in os.environ: if os.environ['PG_PROBACKUP_LONG'] == 'ON': - suite.addTests(loader.loadTestsFromModule(time_consuming)) + suite.addTests(loader.loadTestsFromModule(time_consuming_test)) suite.addTests(loader.loadTestsFromModule(auth_test)) - suite.addTests(loader.loadTestsFromModule(archive)) - suite.addTests(loader.loadTestsFromModule(backup)) - suite.addTests(loader.loadTestsFromModule(catchup)) + suite.addTests(loader.loadTestsFromModule(archive_test)) + suite.addTests(loader.loadTestsFromModule(backup_test)) + suite.addTests(loader.loadTestsFromModule(catchup_test)) if 'PGPROBACKUPBIN_OLD' in os.environ and os.environ['PGPROBACKUPBIN_OLD']: - suite.addTests(loader.loadTestsFromModule(compatibility)) - suite.addTests(loader.loadTestsFromModule(checkdb)) - suite.addTests(loader.loadTestsFromModule(config)) - suite.addTests(loader.loadTestsFromModule(cfs_backup)) - suite.addTests(loader.loadTestsFromModule(cfs_restore)) - suite.addTests(loader.loadTestsFromModule(cfs_validate_backup)) - suite.addTests(loader.loadTestsFromModule(compression)) - suite.addTests(loader.loadTestsFromModule(delete)) - suite.addTests(loader.loadTestsFromModule(delta)) - suite.addTests(loader.loadTestsFromModule(exclude)) - suite.addTests(loader.loadTestsFromModule(external)) - suite.addTests(loader.loadTestsFromModule(false_positive)) - suite.addTests(loader.loadTestsFromModule(init)) - suite.addTests(loader.loadTestsFromModule(incr_restore)) - suite.addTests(loader.loadTestsFromModule(locking)) - suite.addTests(loader.loadTestsFromModule(logging)) - suite.addTests(loader.loadTestsFromModule(merge)) - suite.addTests(loader.loadTestsFromModule(option)) - suite.addTests(loader.loadTestsFromModule(page)) - suite.addTests(loader.loadTestsFromModule(pgpro560)) - suite.addTests(loader.loadTestsFromModule(pgpro589)) - suite.addTests(loader.loadTestsFromModule(pgpro2068)) - suite.addTests(loader.loadTestsFromModule(remote)) - suite.addTests(loader.loadTestsFromModule(replica)) - suite.addTests(loader.loadTestsFromModule(restore)) - suite.addTests(loader.loadTestsFromModule(retention)) - suite.addTests(loader.loadTestsFromModule(set_backup)) - suite.addTests(loader.loadTestsFromModule(show)) - suite.addTests(loader.loadTestsFromModule(time_stamp)) - suite.addTests(loader.loadTestsFromModule(validate)) - suite.addTests(loader.loadTestsFromModule(CVE_2018_1058)) + suite.addTests(loader.loadTestsFromModule(compatibility_test)) + suite.addTests(loader.loadTestsFromModule(checkdb_test)) + suite.addTests(loader.loadTestsFromModule(config_test)) + suite.addTests(loader.loadTestsFromModule(cfs_backup_test)) + suite.addTests(loader.loadTestsFromModule(cfs_restore_test)) + suite.addTests(loader.loadTestsFromModule(cfs_validate_backup_test)) + suite.addTests(loader.loadTestsFromModule(compression_test)) + suite.addTests(loader.loadTestsFromModule(delete_test)) + suite.addTests(loader.loadTestsFromModule(delta_test)) + suite.addTests(loader.loadTestsFromModule(exclude_test)) + suite.addTests(loader.loadTestsFromModule(external_test)) + suite.addTests(loader.loadTestsFromModule(false_positive_test)) + suite.addTests(loader.loadTestsFromModule(init_test)) + suite.addTests(loader.loadTestsFromModule(incr_restore_test)) + suite.addTests(loader.loadTestsFromModule(locking_test)) + suite.addTests(loader.loadTestsFromModule(logging_test)) + suite.addTests(loader.loadTestsFromModule(merge_test)) + suite.addTests(loader.loadTestsFromModule(option_test)) + suite.addTests(loader.loadTestsFromModule(page_test)) + suite.addTests(loader.loadTestsFromModule(pgpro560_test)) + suite.addTests(loader.loadTestsFromModule(pgpro589_test)) + suite.addTests(loader.loadTestsFromModule(pgpro2068_test)) + suite.addTests(loader.loadTestsFromModule(remote_test)) + suite.addTests(loader.loadTestsFromModule(replica_test)) + suite.addTests(loader.loadTestsFromModule(restore_test)) + suite.addTests(loader.loadTestsFromModule(retention_test)) + suite.addTests(loader.loadTestsFromModule(set_backup_test)) + suite.addTests(loader.loadTestsFromModule(show_test)) + suite.addTests(loader.loadTestsFromModule(time_stamp_test)) + suite.addTests(loader.loadTestsFromModule(validate_test)) + suite.addTests(loader.loadTestsFromModule(CVE_2018_1058_test)) return suite diff --git a/tests/archive.py b/tests/archive_test.py similarity index 100% rename from tests/archive.py rename to tests/archive_test.py diff --git a/tests/backup.py b/tests/backup_test.py similarity index 100% rename from tests/backup.py rename to tests/backup_test.py diff --git a/tests/catchup.py b/tests/catchup_test.py similarity index 100% rename from tests/catchup.py rename to tests/catchup_test.py diff --git a/tests/cfs_backup.py b/tests/cfs_backup_test.py similarity index 100% rename from tests/cfs_backup.py rename to tests/cfs_backup_test.py diff --git a/tests/cfs_catchup.py b/tests/cfs_catchup_test.py similarity index 100% rename from tests/cfs_catchup.py rename to tests/cfs_catchup_test.py diff --git a/tests/cfs_restore.py b/tests/cfs_restore_test.py similarity index 100% rename from tests/cfs_restore.py rename to tests/cfs_restore_test.py diff --git a/tests/cfs_validate_backup.py b/tests/cfs_validate_backup_test.py similarity index 100% rename from tests/cfs_validate_backup.py rename to tests/cfs_validate_backup_test.py diff --git a/tests/checkdb.py b/tests/checkdb_test.py similarity index 100% rename from tests/checkdb.py rename to tests/checkdb_test.py diff --git a/tests/compatibility.py b/tests/compatibility_test.py similarity index 100% rename from tests/compatibility.py rename to tests/compatibility_test.py diff --git a/tests/compression.py b/tests/compression_test.py similarity index 100% rename from tests/compression.py rename to tests/compression_test.py diff --git a/tests/config.py b/tests/config_test.py similarity index 100% rename from tests/config.py rename to tests/config_test.py diff --git a/tests/delete.py b/tests/delete_test.py similarity index 100% rename from tests/delete.py rename to tests/delete_test.py diff --git a/tests/delta.py b/tests/delta_test.py similarity index 100% rename from tests/delta.py rename to tests/delta_test.py diff --git a/tests/exclude.py b/tests/exclude_test.py similarity index 100% rename from tests/exclude.py rename to tests/exclude_test.py diff --git a/tests/external.py b/tests/external_test.py similarity index 100% rename from tests/external.py rename to tests/external_test.py diff --git a/tests/false_positive.py b/tests/false_positive_test.py similarity index 100% rename from tests/false_positive.py rename to tests/false_positive_test.py diff --git a/tests/incr_restore.py b/tests/incr_restore_test.py similarity index 100% rename from tests/incr_restore.py rename to tests/incr_restore_test.py diff --git a/tests/init.py b/tests/init_test.py similarity index 100% rename from tests/init.py rename to tests/init_test.py diff --git a/tests/locking.py b/tests/locking_test.py similarity index 100% rename from tests/locking.py rename to tests/locking_test.py diff --git a/tests/logging.py b/tests/logging_test.py similarity index 100% rename from tests/logging.py rename to tests/logging_test.py diff --git a/tests/merge.py b/tests/merge_test.py similarity index 100% rename from tests/merge.py rename to tests/merge_test.py diff --git a/tests/option.py b/tests/option_test.py similarity index 100% rename from tests/option.py rename to tests/option_test.py diff --git a/tests/page.py b/tests/page_test.py similarity index 100% rename from tests/page.py rename to tests/page_test.py diff --git a/tests/pgpro2068.py b/tests/pgpro2068_test.py similarity index 100% rename from tests/pgpro2068.py rename to tests/pgpro2068_test.py diff --git a/tests/pgpro560.py b/tests/pgpro560_test.py similarity index 100% rename from tests/pgpro560.py rename to tests/pgpro560_test.py diff --git a/tests/pgpro589.py b/tests/pgpro589_test.py similarity index 100% rename from tests/pgpro589.py rename to tests/pgpro589_test.py diff --git a/tests/ptrack.py b/tests/ptrack_test.py similarity index 100% rename from tests/ptrack.py rename to tests/ptrack_test.py diff --git a/tests/remote.py b/tests/remote_test.py similarity index 100% rename from tests/remote.py rename to tests/remote_test.py diff --git a/tests/replica.py b/tests/replica_test.py similarity index 100% rename from tests/replica.py rename to tests/replica_test.py diff --git a/tests/restore.py b/tests/restore_test.py similarity index 100% rename from tests/restore.py rename to tests/restore_test.py diff --git a/tests/retention.py b/tests/retention_test.py similarity index 100% rename from tests/retention.py rename to tests/retention_test.py diff --git a/tests/set_backup.py b/tests/set_backup_test.py similarity index 100% rename from tests/set_backup.py rename to tests/set_backup_test.py diff --git a/tests/show.py b/tests/show_test.py similarity index 100% rename from tests/show.py rename to tests/show_test.py diff --git a/tests/time_consuming.py b/tests/time_consuming_test.py similarity index 100% rename from tests/time_consuming.py rename to tests/time_consuming_test.py diff --git a/tests/time_stamp.py b/tests/time_stamp_test.py similarity index 100% rename from tests/time_stamp.py rename to tests/time_stamp_test.py diff --git a/tests/validate.py b/tests/validate_test.py similarity index 100% rename from tests/validate.py rename to tests/validate_test.py From be949fd91e9ef37d8e68b2cec8210348939b0a29 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Fri, 25 Nov 2022 16:23:56 +0300 Subject: [PATCH 406/525] fix memory leak in config_get_opt --- src/utils/configuration.c | 3 +++ src/utils/pgut.c | 6 ++++++ src/utils/pgut.h | 1 + 3 files changed, 10 insertions(+) diff --git a/src/utils/configuration.c b/src/utils/configuration.c index 93f29c488..193d1c680 100644 --- a/src/utils/configuration.c +++ b/src/utils/configuration.c @@ -538,6 +538,9 @@ config_get_opt(int argc, char **argv, ConfigOption cmd_options[], assign_option(opt, optarg, SOURCE_CMD); } + pgut_free(optstring); + pgut_free(longopts); + return optind; } diff --git a/src/utils/pgut.c b/src/utils/pgut.c index 2cf0ccbe7..6123c18d8 100644 --- a/src/utils/pgut.c +++ b/src/utils/pgut.c @@ -993,6 +993,12 @@ pgut_str_strip_trailing_filename(const char *filepath, const char *filename) return pgut_strndup(filepath, fp_len); } +void +pgut_free(void *p) +{ + free(p); +} + FILE * pgut_fopen(const char *path, const char *mode, bool missing_ok) { diff --git a/src/utils/pgut.h b/src/utils/pgut.h index 116ee41c0..f8554f9d0 100644 --- a/src/utils/pgut.h +++ b/src/utils/pgut.h @@ -64,6 +64,7 @@ extern void *pgut_realloc(void *p, size_t size); extern char *pgut_strdup(const char *str); extern char *pgut_strndup(const char *str, size_t n); extern char *pgut_str_strip_trailing_filename(const char *filepath, const char *filename); +extern void pgut_free(void *p); #define pgut_new(type) ((type *) pgut_malloc(sizeof(type))) #define pgut_new0(type) ((type *) pgut_malloc0(sizeof(type))) From 15a5c5dad7fcb441883046e4caa1f01089f35005 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 29 Nov 2022 11:12:20 +0300 Subject: [PATCH 407/525] [PBCKP-354] Pg15: continue reading if error "missing contrecord" is met. Pg15 now reports if it didn't met expected contrecord. Absence of this message was long standing bug in previous postgres versions. This situation could happen if WAL segment was rewritten after restart. It causes "tests.validate.ValidateTest.test_validate_wal_unreal_values" to hang but (looks like) for other reason: test tries to read "in future". Probably we should stop reading logs here. But since we always did continue here, lets continue as well. --- src/parsexlog.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/src/parsexlog.c b/src/parsexlog.c index f12aae904..bcdd814d6 100644 --- a/src/parsexlog.c +++ b/src/parsexlog.c @@ -1443,7 +1443,14 @@ XLogThreadWorker(void *arg) * Usually SimpleXLogPageRead() does it by itself. But here we need * to do it manually to support threads. */ - if (reader_data->need_switch && errormsg == NULL) + if (reader_data->need_switch && ( + errormsg == NULL || + /* + * Pg15 now informs if "contrecord" is missing. + * TODO: probably we should abort reading logs at this moment. + * But we continue as we did with bug present in Pg < 15. + */ + strncmp(errormsg, "missing contrecord", 18) == 0)) { if (SwitchThreadToNextWal(xlogreader, thread_arg)) continue; From 0a5fc87dbd704e0ee5e430b0406bb0328c553055 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 29 Nov 2022 15:12:32 +0300 Subject: [PATCH 408/525] [PBCKP-360] fix exception check in test_validate_corrupt_page_header_map zlib decompression could mark error either as "data error" or "buffer error". One of check did consider it, other didn't. Make them same. And use `assertRaises` for good (requires python 3.2 at least) --- tests/validate.py | 51 +++++++++++++++++++---------------------------- 1 file changed, 21 insertions(+), 30 deletions(-) diff --git a/tests/validate.py b/tests/validate.py index 966ad81a8..a6776d571 100644 --- a/tests/validate.py +++ b/tests/validate.py @@ -4007,43 +4007,34 @@ def test_validate_corrupt_page_header_map(self): f.seek(42) f.write(b"blah") f.flush() - f.close - try: + with self.assertRaises(ProbackupException) as cm: self.validate_pb(backup_dir, 'node', backup_id=backup_id) - self.assertEqual( - 1, 0, - "Expecting Error because page_header is corrupted.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertRegex( - e.message, - r'WARNING: An error occured during metadata decompression for file "[\w/]+": (data|buffer) error', - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn("Backup {0} is corrupt".format(backup_id), e.message) + e = cm.exception + self.assertRegex( + cm.exception.message, + r'WARNING: An error occured during metadata decompression for file "[\w/]+": (data|buffer) error', + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) - try: + self.assertIn("Backup {0} is corrupt".format(backup_id), e.message) + + with self.assertRaises(ProbackupException) as cm: self.validate_pb(backup_dir) - self.assertEqual( - 1, 0, - "Expecting Error because page_header is corrupted.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'WARNING: An error occured during metadata decompression' in e.message and - 'data error' in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn("INFO: Backup {0} data files are valid".format(ok_1), e.message) - self.assertIn("WARNING: Backup {0} data files are corrupted".format(backup_id), e.message) - self.assertIn("INFO: Backup {0} data files are valid".format(ok_2), e.message) + e = cm.exception + self.assertRegex( + e.message, + r'WARNING: An error occured during metadata decompression for file "[\w/]+": (data|buffer) error', + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) - self.assertIn("WARNING: Some backups are not valid", e.message) + self.assertIn("INFO: Backup {0} data files are valid".format(ok_1), e.message) + self.assertIn("WARNING: Backup {0} data files are corrupted".format(backup_id), e.message) + self.assertIn("INFO: Backup {0} data files are valid".format(ok_2), e.message) + + self.assertIn("WARNING: Some backups are not valid", e.message) # Clean after yourself self.del_test_dir(module_name, fname) From 3b72dd66af0bae871184cbf969c18e70b1b0b4b8 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 29 Nov 2022 16:24:49 +0300 Subject: [PATCH 409/525] fix github tests.init_test --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index ab1a5888d..6f99d0f27 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -87,7 +87,7 @@ jobs: If (!$Env:MODE -Or $Env:MODE -Eq "basic") { $Env:PG_PROBACKUP_TEST_BASIC = "ON" python -m unittest -v tests - python -m unittest -v tests.init + python -m unittest -v tests.init_test } else { python -m unittest -v tests.$Env:MODE } From 3b2efe63a23363fe3b6fea9c7c441aede39a6581 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Wed, 30 Nov 2022 14:58:36 +0300 Subject: [PATCH 410/525] and again try fix travis tests.init_test --- travis/run_tests.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/travis/run_tests.sh b/travis/run_tests.sh index 1823b05de..84d7aa173 100755 --- a/travis/run_tests.sh +++ b/travis/run_tests.sh @@ -111,7 +111,7 @@ if [ "$MODE" = "basic" ]; then export PG_PROBACKUP_TEST_BASIC=ON echo PG_PROBACKUP_TEST_BASIC=${PG_PROBACKUP_TEST_BASIC} python3 -m unittest -v tests - python3 -m unittest -v tests.init + python3 -m unittest -v tests.init_test else echo PG_PROBACKUP_TEST_BASIC=${PG_PROBACKUP_TEST_BASIC} python3 -m unittest -v tests.$MODE From 7e59a19df1f71796d0e154e259732f7ac8c4a4c3 Mon Sep 17 00:00:00 2001 From: Elena Indrupskaya Date: Wed, 30 Nov 2022 15:56:41 +0300 Subject: [PATCH 411/525] [DOC] {PBCKP-320] Remove duplicate descriptions of backup/catchup modes [skip-travis] --- doc/pgprobackup.xml | 121 +++++++++----------------------------------- 1 file changed, 23 insertions(+), 98 deletions(-) diff --git a/doc/pgprobackup.xml b/doc/pgprobackup.xml index 6babf00f7..7c8610681 100644 --- a/doc/pgprobackup.xml +++ b/doc/pgprobackup.xml @@ -312,7 +312,7 @@ doc/src/sgml/pgprobackup.sgml - + FULL backups contain all the data files required to restore the database cluster. @@ -328,7 +328,7 @@ doc/src/sgml/pgprobackup.sgml - + DELTA backup. In this mode, pg_probackup reads all data files in the data directory and copies only those pages that have changed since the previous backup. This @@ -337,7 +337,7 @@ doc/src/sgml/pgprobackup.sgml - + PAGE backup. In this mode, pg_probackup scans all WAL files in the archive from the moment the previous full or incremental backup was taken. Newly created backups @@ -352,7 +352,7 @@ doc/src/sgml/pgprobackup.sgml - + PTRACK backup. In this mode, PostgreSQL tracks page changes on the fly. Continuous archiving is not necessary for it to operate. Each time a relation page is updated, @@ -443,7 +443,7 @@ doc/src/sgml/pgprobackup.sgml parameters and have the same major release number. Depending on cluster configuration, PostgreSQL itself may apply additional restrictions, such as CPU architecture - or libc/libicu versions. + or libc/icu versions. @@ -1274,36 +1274,11 @@ pg_probackup backup -B backup_dir --instance Where backup_mode can take one of the following values: + FULL, + DELTA, + PAGE, and + PTRACK. - - - - FULL — creates a full backup that contains all the data - files of the cluster to be restored. - - - - - DELTA — reads all data files in the data directory and - creates an incremental backup for pages that have changed - since the previous backup. - - - - - PAGE — creates an incremental backup based on the WAL - files that have been generated since the previous full or - incremental backup was taken. Only changed blocks are read - from data files. - - - - - PTRACK — creates an incremental backup tracking page - changes on the fly. - - - When restoring a cluster from an incremental backup, pg_probackup relies on the parent full backup and all the @@ -3532,25 +3507,25 @@ pg_probackup catchup -b catchup_mode --source-pgdata= Where catchup_mode can take one of the - following values: FULL, DELTA, or PTRACK. + following values: - - FULL — creates a full copy of the PostgreSQL instance. + + FULL — creates a full copy of the PostgreSQL instance. The data directory of the destination instance must be empty for this mode. - - DELTA — reads all data files in the data directory and + + DELTA — reads all data files in the data directory and creates an incremental copy for pages that have changed since the destination instance was shut down. - - PTRACK — tracking page changes on the fly, + + PTRACK — tracking page changes on the fly, only reads and copies pages that have changed since the point of divergence of the source and destination instances. @@ -3817,35 +3792,10 @@ pg_probackup backup -B backup_dir -b bac Specifies the backup mode to use. Possible values are: - - - - - FULL — creates a full backup that contains all the data - files of the cluster to be restored. - - - - - DELTA — reads all data files in the data directory and - creates an incremental backup for pages that have changed - since the previous backup. - - - - - PAGE — creates an incremental PAGE backup based on the WAL - files that have changed since the previous full or - incremental backup was taken. - - - - - PTRACK — creates an incremental PTRACK backup tracking - page changes on the fly. - - - + FULL, + DELTA, + PAGE, and + PTRACK. @@ -4540,34 +4490,9 @@ pg_probackup catchup -b catchup_mode Specifies the catchup mode to use. Possible values are: - - - - - FULL — creates a full copy of the PostgreSQL instance. - - - - - DELTA — reads all data files in the data directory and - creates an incremental copy for pages that have changed - since the destination instance was shut down. - - - - - PTRACK — tracking page changes on the fly, - only reads and copies pages that have changed since the point of divergence - of the source and destination instances. - - - PTRACK catchup mode requires PTRACK - not earlier than 2.0 and hence, PostgreSQL not earlier than 11. - - - - - + FULL, + DELTA, and + PTRACK. From 8fa063f688c8de69e785ad0188feae36586b7a30 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Wed, 30 Nov 2022 16:34:00 +0300 Subject: [PATCH 412/525] travis: and backup_test --- .travis.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index 8315f7842..17e6d2579 100644 --- a/.travis.yml +++ b/.travis.yml @@ -35,8 +35,8 @@ env: - PG_VERSION=10 PG_BRANCH=REL_10_STABLE - PG_VERSION=9.6 PG_BRANCH=REL9_6_STABLE - PG_VERSION=9.5 PG_BRANCH=REL9_5_STABLE - - PG_VERSION=15 PG_BRANCH=REL_15_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=backup.BackupTest.test_full_backup - - PG_VERSION=15 PG_BRANCH=REL_15_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=backup.BackupTest.test_full_backup_stream + - PG_VERSION=15 PG_BRANCH=REL_15_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=backup_test.BackupTest.test_full_backup + - PG_VERSION=15 PG_BRANCH=REL_15_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=backup_test.BackupTest.test_full_backup_stream # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=backup # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=catchup # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=checkdb From 619816012a8c71ab7d6362d60947f97066b9a98d Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Wed, 30 Nov 2022 21:50:20 +0300 Subject: [PATCH 413/525] fix ArchiveTest.test_pgpro434_4 for Pg15 --- tests/archive_test.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/archive_test.py b/tests/archive_test.py index 5e59dd268..f6cd50a9f 100644 --- a/tests/archive_test.py +++ b/tests/archive_test.py @@ -317,7 +317,10 @@ def test_pgpro434_4(self): os.environ["PGAPPNAME"] = "pg_probackup" postgres_gdb = self.gdb_attach(pid) - postgres_gdb.set_breakpoint('do_pg_stop_backup') + if self.get_version(node) < 150000: + postgres_gdb.set_breakpoint('do_pg_stop_backup') + else: + postgres_gdb.set_breakpoint('do_pg_backup_stop') postgres_gdb.continue_execution_until_running() gdb.continue_execution_until_exit() From 9bcefb2569c33e58e623bdcb9c8a991d1721126f Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Fri, 2 Dec 2022 15:10:22 +0300 Subject: [PATCH 414/525] [PBCKP-327] test_ptrack_multiple_segments: try to avoid memory consumption --- tests/helpers/ptrack_helpers.py | 28 ++++++++++++++++++++++++++++ tests/ptrack_test.py | 10 ++++++---- 2 files changed, 34 insertions(+), 4 deletions(-) diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index ab164855a..2a4d4c271 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -15,6 +15,8 @@ from time import sleep import re import json +from hashlib import md5 +import random idx_ptrack = { 't_heap': { @@ -200,6 +202,32 @@ def kill(self, someone = None): os.kill(self.auxiliary_pids[someone][0], sig) self.is_started = False + def table_checksum(self, table, sort, dbname="postgres"): + curname = "cur_"+str(random.randint(0,2**48)) + + sum = md5(b"\x01") + + con = self.connect(dbname=dbname) + + con.execute(f""" + DECLARE {curname} NO SCROLL CURSOR FOR + SELECT t::text FROM {table} as t ORDER BY {sort}; + """) + + while True: + rows = con.execute(f"FETCH FORWARD 10000 FROM {curname}") + if not rows: + break + for row in rows: + sum.update(row[0].encode('utf8')) + sum.update(b'\x00') + + con.execute(f"CLOSE {curname}; ROLLBACK;") + + con.close() + sum.update(b'\x02') + return sum.hexdigest() + class ProbackupTest(object): # Class attributes enterprise = is_enterprise() diff --git a/tests/ptrack_test.py b/tests/ptrack_test.py index 6e5786f8c..ed4498a61 100644 --- a/tests/ptrack_test.py +++ b/tests/ptrack_test.py @@ -2039,6 +2039,8 @@ def test_ptrack_multiple_segments(self): # CREATE TABLE node.pgbench_init(scale=100, options=['--tablespace=somedata']) + result = node.table_checksum("pgbench_accounts", "aid", + dbname="postgres") # FULL BACKUP self.backup_node(backup_dir, 'node', node, options=['--stream']) @@ -2075,7 +2077,8 @@ def test_ptrack_multiple_segments(self): # GET LOGICAL CONTENT FROM NODE # it`s stupid, because hint`s are ignored by ptrack - result = node.safe_psql("postgres", "select * from pgbench_accounts") + result = node.table_checksum("pgbench_accounts", "aid", + dbname="postgres") # FIRTS PTRACK BACKUP self.backup_node( backup_dir, 'node', node, backup_type='ptrack', options=['--stream']) @@ -2108,9 +2111,8 @@ def test_ptrack_multiple_segments(self): restored_node, {'port': restored_node.port}) restored_node.slow_start() - result_new = restored_node.safe_psql( - "postgres", - "select * from pgbench_accounts") + result_new = restored_node.table_checksum("pgbench_accounts", "aid", + dbname="postgres") # COMPARE RESTORED FILES self.assertEqual(result, result_new, 'data is lost') From 8d8a92c1d14e7ddeb98a671fbbe1ce2e0e590cf9 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Fri, 2 Dec 2022 16:55:38 +0300 Subject: [PATCH 415/525] tests: more usages for table_checksum and reduce batch twice for sanity --- tests/archive_test.py | 8 ++------ tests/cfs_backup_test.py | 28 ++++++++++++---------------- tests/helpers/ptrack_helpers.py | 2 +- tests/page_test.py | 6 ++---- tests/ptrack_test.py | 6 ++---- tests/replica_test.py | 4 ++-- 6 files changed, 21 insertions(+), 33 deletions(-) diff --git a/tests/archive_test.py b/tests/archive_test.py index f6cd50a9f..1bf8a1d45 100644 --- a/tests/archive_test.py +++ b/tests/archive_test.py @@ -2215,9 +2215,7 @@ def test_multi_timeline_recovery_prefetching(self): node.slow_start() node.pgbench_init(scale=20) - result = node.safe_psql( - 'postgres', - 'select * from pgbench_accounts') + result = node.table_checksum("pgbench_accounts", "aid") node.stop() node.cleanup() @@ -2242,9 +2240,7 @@ def test_multi_timeline_recovery_prefetching(self): node.slow_start() - result_new = node.safe_psql( - 'postgres', - 'select * from pgbench_accounts') + result_new = node.table_checksum("pgbench_accounts", "aid") self.assertEqual(result, result_new) diff --git a/tests/cfs_backup_test.py b/tests/cfs_backup_test.py index 28ef275df..adfcaef19 100644 --- a/tests/cfs_backup_test.py +++ b/tests/cfs_backup_test.py @@ -761,7 +761,7 @@ def test_multiple_segments(self): 't_heap', tblspace_name) ) - full_result = self.node.safe_psql("postgres", "SELECT * FROM t_heap") + full_result = self.node.table_checksum("t_heap", "id") try: backup_id_full = self.backup_node( @@ -783,7 +783,7 @@ def test_multiple_segments(self): 't_heap') ) - page_result = self.node.safe_psql("postgres", "SELECT * FROM t_heap") + page_result = self.node.table_checksum("t_heap", "id") try: backup_id_page = self.backup_node( @@ -824,7 +824,7 @@ def test_multiple_segments(self): self.node.slow_start() self.assertEqual( full_result, - self.node.safe_psql("postgres", "SELECT * FROM t_heap"), + self.node.table_checksum("t_heap", "id"), 'Lost data after restore') # CHECK PAGE BACKUP @@ -843,7 +843,7 @@ def test_multiple_segments(self): self.node.slow_start() self.assertEqual( page_result, - self.node.safe_psql("postgres", "SELECT * FROM t_heap"), + self.node.table_checksum("t_heap", "id"), 'Lost data after restore') # @unittest.expectedFailure @@ -877,10 +877,8 @@ def test_multiple_segments_in_multiple_tablespaces(self): "FROM generate_series(0,1005000) i".format( 't_heap_2', tblspace_name_2)) - full_result_1 = self.node.safe_psql( - "postgres", "SELECT * FROM t_heap_1") - full_result_2 = self.node.safe_psql( - "postgres", "SELECT * FROM t_heap_2") + full_result_1 = self.node.table_checksum("t_heap_1", "id") + full_result_2 = self.node.table_checksum("t_heap_2", "id") try: backup_id_full = self.backup_node( @@ -911,10 +909,8 @@ def test_multiple_segments_in_multiple_tablespaces(self): 't_heap_2') ) - page_result_1 = self.node.safe_psql( - "postgres", "SELECT * FROM t_heap_1") - page_result_2 = self.node.safe_psql( - "postgres", "SELECT * FROM t_heap_2") + page_result_1 = self.node.table_checksum("t_heap_1", "id") + page_result_2 = self.node.table_checksum("t_heap_2", "id") try: backup_id_page = self.backup_node( @@ -955,11 +951,11 @@ def test_multiple_segments_in_multiple_tablespaces(self): self.assertEqual( full_result_1, - self.node.safe_psql("postgres", "SELECT * FROM t_heap_1"), + self.node.table_checksum("t_heap_1", "id"), 'Lost data after restore') self.assertEqual( full_result_2, - self.node.safe_psql("postgres", "SELECT * FROM t_heap_2"), + self.node.table_checksum("t_heap_2", "id"), 'Lost data after restore') # CHECK PAGE BACKUP @@ -976,11 +972,11 @@ def test_multiple_segments_in_multiple_tablespaces(self): self.assertEqual( page_result_1, - self.node.safe_psql("postgres", "SELECT * FROM t_heap_1"), + self.node.table_checksum("t_heap_1", "id"), 'Lost data after restore') self.assertEqual( page_result_2, - self.node.safe_psql("postgres", "SELECT * FROM t_heap_2"), + self.node.table_checksum("t_heap_2", "id"), 'Lost data after restore') # @unittest.expectedFailure diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index 2a4d4c271..6fe3d6333 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -215,7 +215,7 @@ def table_checksum(self, table, sort, dbname="postgres"): """) while True: - rows = con.execute(f"FETCH FORWARD 10000 FROM {curname}") + rows = con.execute(f"FETCH FORWARD 5000 FROM {curname}") if not rows: break for row in rows: diff --git a/tests/page_test.py b/tests/page_test.py index e77e5c827..4c5ba7f87 100644 --- a/tests/page_test.py +++ b/tests/page_test.py @@ -1191,8 +1191,7 @@ def test_multi_timeline_page(self): pgdata = self.pgdata_content(node.data_dir) - result = node.safe_psql( - "postgres", "select * from pgbench_accounts") + result = node.table_checksum("pgbench_accounts", "aid") node_restored = self.make_simple_node( base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) @@ -1204,8 +1203,7 @@ def test_multi_timeline_page(self): self.set_auto_conf(node_restored, {'port': node_restored.port}) node_restored.slow_start() - result_new = node_restored.safe_psql( - "postgres", "select * from pgbench_accounts") + result_new = node_restored.table_checksum("pgbench_accounts", "aid") self.assertEqual(result, result_new) diff --git a/tests/ptrack_test.py b/tests/ptrack_test.py index ed4498a61..14688fc11 100644 --- a/tests/ptrack_test.py +++ b/tests/ptrack_test.py @@ -375,7 +375,7 @@ def test_ptrack_eat_my_data(self): self.switch_wal_segment(node) - result = node.safe_psql("postgres", "SELECT * FROM pgbench_accounts") + result = node.table_checksum("pgbench_accounts", "aid") node_restored.cleanup() self.restore_node(backup_dir, 'node', node_restored) @@ -396,9 +396,7 @@ def test_ptrack_eat_my_data(self): # Logical comparison self.assertEqual( result, - node_restored.safe_psql( - 'postgres', - 'SELECT * FROM pgbench_accounts'), + node.table_checksum("pgbench_accounts", "aid"), 'Data loss') # @unittest.skip("skip") diff --git a/tests/replica_test.py b/tests/replica_test.py index 9c68de366..ecc92e19f 100644 --- a/tests/replica_test.py +++ b/tests/replica_test.py @@ -326,7 +326,7 @@ def test_replica_archive_page_backup(self): self.switch_wal_segment(master) - before = master.safe_psql("postgres", "SELECT * FROM pgbench_accounts") + before = master.table_checksum("pgbench_accounts", "aid") self.validate_pb(backup_dir, 'replica') self.assertEqual( @@ -342,7 +342,7 @@ def test_replica_archive_page_backup(self): node.slow_start() # CHECK DATA CORRECTNESS - after = node.safe_psql("postgres", "SELECT * FROM pgbench_accounts") + after = master.table_checksum("pgbench_accounts", "aid") self.assertEqual( before, after, 'Restored data is not equal to original') From 02e3fb0477f44f58b4e05115aa41709b2ec7ad8d Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Sun, 4 Dec 2022 02:26:54 +0300 Subject: [PATCH 416/525] tests: table_checksum needs no sorting in fact since we are compare table content exactly --- tests/archive_test.py | 4 ++-- tests/cfs_backup_test.py | 24 ++++++++++++------------ tests/helpers/ptrack_helpers.py | 23 ++++++++++------------- tests/page_test.py | 4 ++-- tests/ptrack_test.py | 13 +++++-------- tests/replica_test.py | 4 ++-- 6 files changed, 33 insertions(+), 39 deletions(-) diff --git a/tests/archive_test.py b/tests/archive_test.py index 1bf8a1d45..fb2600c4a 100644 --- a/tests/archive_test.py +++ b/tests/archive_test.py @@ -2215,7 +2215,7 @@ def test_multi_timeline_recovery_prefetching(self): node.slow_start() node.pgbench_init(scale=20) - result = node.table_checksum("pgbench_accounts", "aid") + result = node.table_checksum("pgbench_accounts") node.stop() node.cleanup() @@ -2240,7 +2240,7 @@ def test_multi_timeline_recovery_prefetching(self): node.slow_start() - result_new = node.table_checksum("pgbench_accounts", "aid") + result_new = node.table_checksum("pgbench_accounts") self.assertEqual(result, result_new) diff --git a/tests/cfs_backup_test.py b/tests/cfs_backup_test.py index adfcaef19..cd2826d21 100644 --- a/tests/cfs_backup_test.py +++ b/tests/cfs_backup_test.py @@ -761,7 +761,7 @@ def test_multiple_segments(self): 't_heap', tblspace_name) ) - full_result = self.node.table_checksum("t_heap", "id") + full_result = self.node.table_checksum("t_heap") try: backup_id_full = self.backup_node( @@ -783,7 +783,7 @@ def test_multiple_segments(self): 't_heap') ) - page_result = self.node.table_checksum("t_heap", "id") + page_result = self.node.table_checksum("t_heap") try: backup_id_page = self.backup_node( @@ -824,7 +824,7 @@ def test_multiple_segments(self): self.node.slow_start() self.assertEqual( full_result, - self.node.table_checksum("t_heap", "id"), + self.node.table_checksum("t_heap"), 'Lost data after restore') # CHECK PAGE BACKUP @@ -843,7 +843,7 @@ def test_multiple_segments(self): self.node.slow_start() self.assertEqual( page_result, - self.node.table_checksum("t_heap", "id"), + self.node.table_checksum("t_heap"), 'Lost data after restore') # @unittest.expectedFailure @@ -877,8 +877,8 @@ def test_multiple_segments_in_multiple_tablespaces(self): "FROM generate_series(0,1005000) i".format( 't_heap_2', tblspace_name_2)) - full_result_1 = self.node.table_checksum("t_heap_1", "id") - full_result_2 = self.node.table_checksum("t_heap_2", "id") + full_result_1 = self.node.table_checksum("t_heap_1") + full_result_2 = self.node.table_checksum("t_heap_2") try: backup_id_full = self.backup_node( @@ -909,8 +909,8 @@ def test_multiple_segments_in_multiple_tablespaces(self): 't_heap_2') ) - page_result_1 = self.node.table_checksum("t_heap_1", "id") - page_result_2 = self.node.table_checksum("t_heap_2", "id") + page_result_1 = self.node.table_checksum("t_heap_1") + page_result_2 = self.node.table_checksum("t_heap_2") try: backup_id_page = self.backup_node( @@ -951,11 +951,11 @@ def test_multiple_segments_in_multiple_tablespaces(self): self.assertEqual( full_result_1, - self.node.table_checksum("t_heap_1", "id"), + self.node.table_checksum("t_heap_1"), 'Lost data after restore') self.assertEqual( full_result_2, - self.node.table_checksum("t_heap_2", "id"), + self.node.table_checksum("t_heap_2"), 'Lost data after restore') # CHECK PAGE BACKUP @@ -972,11 +972,11 @@ def test_multiple_segments_in_multiple_tablespaces(self): self.assertEqual( page_result_1, - self.node.table_checksum("t_heap_1", "id"), + self.node.table_checksum("t_heap_1"), 'Lost data after restore') self.assertEqual( page_result_2, - self.node.table_checksum("t_heap_2", "id"), + self.node.table_checksum("t_heap_2"), 'Lost data after restore') # @unittest.expectedFailure diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index 6fe3d6333..555c0a73e 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -15,7 +15,6 @@ from time import sleep import re import json -from hashlib import md5 import random idx_ptrack = { @@ -202,30 +201,28 @@ def kill(self, someone = None): os.kill(self.auxiliary_pids[someone][0], sig) self.is_started = False - def table_checksum(self, table, sort, dbname="postgres"): - curname = "cur_"+str(random.randint(0,2**48)) - - sum = md5(b"\x01") - + def table_checksum(self, table, dbname="postgres"): con = self.connect(dbname=dbname) - con.execute(f""" - DECLARE {curname} NO SCROLL CURSOR FOR - SELECT t::text FROM {table} as t ORDER BY {sort}; - """) + curname = "cur_"+str(random.randint(0,2**48)) + + con.execute(""" + DECLARE %s NO SCROLL CURSOR FOR + SELECT t::text FROM %s as t + """ % (curname, table)) + sum = hashlib.md5() while True: - rows = con.execute(f"FETCH FORWARD 5000 FROM {curname}") + rows = con.execute("FETCH FORWARD 5000 FROM %s" % curname) if not rows: break for row in rows: + # hash uses SipHash since Python3.4, therefore it is good enough sum.update(row[0].encode('utf8')) - sum.update(b'\x00') con.execute(f"CLOSE {curname}; ROLLBACK;") con.close() - sum.update(b'\x02') return sum.hexdigest() class ProbackupTest(object): diff --git a/tests/page_test.py b/tests/page_test.py index 4c5ba7f87..be6116bbe 100644 --- a/tests/page_test.py +++ b/tests/page_test.py @@ -1191,7 +1191,7 @@ def test_multi_timeline_page(self): pgdata = self.pgdata_content(node.data_dir) - result = node.table_checksum("pgbench_accounts", "aid") + result = node.table_checksum("pgbench_accounts") node_restored = self.make_simple_node( base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) @@ -1203,7 +1203,7 @@ def test_multi_timeline_page(self): self.set_auto_conf(node_restored, {'port': node_restored.port}) node_restored.slow_start() - result_new = node_restored.table_checksum("pgbench_accounts", "aid") + result_new = node_restored.table_checksum("pgbench_accounts") self.assertEqual(result, result_new) diff --git a/tests/ptrack_test.py b/tests/ptrack_test.py index 14688fc11..b8a4065b0 100644 --- a/tests/ptrack_test.py +++ b/tests/ptrack_test.py @@ -375,7 +375,7 @@ def test_ptrack_eat_my_data(self): self.switch_wal_segment(node) - result = node.table_checksum("pgbench_accounts", "aid") + result = node.table_checksum("pgbench_accounts") node_restored.cleanup() self.restore_node(backup_dir, 'node', node_restored) @@ -396,7 +396,7 @@ def test_ptrack_eat_my_data(self): # Logical comparison self.assertEqual( result, - node.table_checksum("pgbench_accounts", "aid"), + node.table_checksum("pgbench_accounts"), 'Data loss') # @unittest.skip("skip") @@ -2037,8 +2037,7 @@ def test_ptrack_multiple_segments(self): # CREATE TABLE node.pgbench_init(scale=100, options=['--tablespace=somedata']) - result = node.table_checksum("pgbench_accounts", "aid", - dbname="postgres") + result = node.table_checksum("pgbench_accounts") # FULL BACKUP self.backup_node(backup_dir, 'node', node, options=['--stream']) @@ -2075,8 +2074,7 @@ def test_ptrack_multiple_segments(self): # GET LOGICAL CONTENT FROM NODE # it`s stupid, because hint`s are ignored by ptrack - result = node.table_checksum("pgbench_accounts", "aid", - dbname="postgres") + result = node.table_checksum("pgbench_accounts") # FIRTS PTRACK BACKUP self.backup_node( backup_dir, 'node', node, backup_type='ptrack', options=['--stream']) @@ -2109,8 +2107,7 @@ def test_ptrack_multiple_segments(self): restored_node, {'port': restored_node.port}) restored_node.slow_start() - result_new = restored_node.table_checksum("pgbench_accounts", "aid", - dbname="postgres") + result_new = restored_node.table_checksum("pgbench_accounts") # COMPARE RESTORED FILES self.assertEqual(result, result_new, 'data is lost') diff --git a/tests/replica_test.py b/tests/replica_test.py index ecc92e19f..577dcd3a5 100644 --- a/tests/replica_test.py +++ b/tests/replica_test.py @@ -326,7 +326,7 @@ def test_replica_archive_page_backup(self): self.switch_wal_segment(master) - before = master.table_checksum("pgbench_accounts", "aid") + before = master.table_checksum("pgbench_accounts") self.validate_pb(backup_dir, 'replica') self.assertEqual( @@ -342,7 +342,7 @@ def test_replica_archive_page_backup(self): node.slow_start() # CHECK DATA CORRECTNESS - after = master.table_checksum("pgbench_accounts", "aid") + after = master.table_checksum("pgbench_accounts") self.assertEqual( before, after, 'Restored data is not equal to original') From fc50cf0ddfc116460da742e658ad50a08775bf3f Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Sun, 4 Dec 2022 04:24:55 +0300 Subject: [PATCH 417/525] tests: fix travis uses old image with python3.5 --- tests/helpers/ptrack_helpers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index 555c0a73e..706506432 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -220,7 +220,7 @@ def table_checksum(self, table, dbname="postgres"): # hash uses SipHash since Python3.4, therefore it is good enough sum.update(row[0].encode('utf8')) - con.execute(f"CLOSE {curname}; ROLLBACK;") + con.execute("CLOSE %s; ROLLBACK;" % curname) con.close() return sum.hexdigest() From f2f47f77345d62e127019323f0cfbd8f06e2379b Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Sun, 4 Dec 2022 05:33:52 +0300 Subject: [PATCH 418/525] get rid of plpython usage --- tests/pgpro2068_test.py | 72 ++++++++++++++++------------------------- 1 file changed, 28 insertions(+), 44 deletions(-) diff --git a/tests/pgpro2068_test.py b/tests/pgpro2068_test.py index da76a8815..04f0eb6fa 100644 --- a/tests/pgpro2068_test.py +++ b/tests/pgpro2068_test.py @@ -53,11 +53,6 @@ def test_minrecpoint_on_replica(self): replica, {'port': replica.port, 'restart_after_crash': 'off'}) - # we need those later - node.safe_psql( - "postgres", - "CREATE EXTENSION plpython3u") - node.safe_psql( "postgres", "CREATE EXTENSION pageinspect") @@ -131,48 +126,37 @@ def test_minrecpoint_on_replica(self): recovery_config, "recovery_target_action = 'pause'") replica.slow_start(replica=True) + current_xlog_lsn_query = 'SELECT pg_last_wal_replay_lsn() INTO current_xlog_lsn' if self.get_version(node) < 100000: - script = ''' -DO -$$ -relations = plpy.execute("select class.oid from pg_class class WHERE class.relkind IN ('r', 'i', 't', 'm') and class.relpersistence = 'p'") -current_xlog_lsn = plpy.execute("SELECT min_recovery_end_location as lsn FROM pg_control_recovery()")[0]['lsn'] -plpy.notice('CURRENT LSN: {0}'.format(current_xlog_lsn)) -found_corruption = False -for relation in relations: - pages_from_future = plpy.execute("with number_of_blocks as (select blknum from generate_series(0, pg_relation_size({0}) / 8192 -1) as blknum) select blknum, lsn, checksum, flags, lower, upper, special, pagesize, version, prune_xid from number_of_blocks, page_header(get_raw_page('{0}'::oid::regclass::text, number_of_blocks.blknum::int)) where lsn > '{1}'::pg_lsn".format(relation['oid'], current_xlog_lsn)) - - if pages_from_future.nrows() == 0: - continue - - for page in pages_from_future: - plpy.notice('Found page from future. OID: {0}, BLKNUM: {1}, LSN: {2}'.format(relation['oid'], page['blknum'], page['lsn'])) - found_corruption = True -if found_corruption: - plpy.error('Found Corruption') -$$ LANGUAGE plpython3u; -''' - else: - script = ''' + current_xlog_lsn_query = 'SELECT min_recovery_end_location INTO current_xlog_lsn FROM pg_control_recovery()' + + script = f''' DO $$ -relations = plpy.execute("select class.oid from pg_class class WHERE class.relkind IN ('r', 'i', 't', 'm') and class.relpersistence = 'p'") -current_xlog_lsn = plpy.execute("select pg_last_wal_replay_lsn() as lsn")[0]['lsn'] -plpy.notice('CURRENT LSN: {0}'.format(current_xlog_lsn)) -found_corruption = False -for relation in relations: - pages_from_future = plpy.execute("with number_of_blocks as (select blknum from generate_series(0, pg_relation_size({0}) / 8192 -1) as blknum) select blknum, lsn, checksum, flags, lower, upper, special, pagesize, version, prune_xid from number_of_blocks, page_header(get_raw_page('{0}'::oid::regclass::text, number_of_blocks.blknum::int)) where lsn > '{1}'::pg_lsn".format(relation['oid'], current_xlog_lsn)) - - if pages_from_future.nrows() == 0: - continue - - for page in pages_from_future: - plpy.notice('Found page from future. OID: {0}, BLKNUM: {1}, LSN: {2}'.format(relation['oid'], page['blknum'], page['lsn'])) - found_corruption = True -if found_corruption: - plpy.error('Found Corruption') -$$ LANGUAGE plpython3u; -''' +DECLARE + roid oid; + current_xlog_lsn pg_lsn; + pages_from_future RECORD; + found_corruption bool := false; +BEGIN + {current_xlog_lsn_query}; + RAISE NOTICE 'CURRENT LSN: %', current_xlog_lsn; + FOR roid IN select oid from pg_class class where relkind IN ('r', 'i', 't', 'm') and relpersistence = 'p' LOOP + FOR pages_from_future IN + with number_of_blocks as (select blknum from generate_series(0, pg_relation_size(roid) / 8192 -1) as blknum ) + select blknum, lsn, checksum, flags, lower, upper, special, pagesize, version, prune_xid + from number_of_blocks, page_header(get_raw_page(roid::regclass::text, number_of_blocks.blknum::int)) + where lsn > current_xlog_lsn LOOP + RAISE NOTICE 'Found page from future. OID: %, BLKNUM: %, LSN: %', roid, pages_from_future.blknum, pages_from_future.lsn; + found_corruption := true; + END LOOP; + END LOOP; + IF found_corruption THEN + RAISE 'Found Corruption'; + END IF; +END; +$$ LANGUAGE plpgsql; +'''.format(current_xlog_lsn_query=current_xlog_lsn_query) # Find blocks from future replica.safe_psql( From 0f7e01b7f392d12a22d385e1d6241bce754b2cd8 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Sun, 4 Dec 2022 09:08:26 +0300 Subject: [PATCH 419/525] try travis without docker --- .travis.yml | 55 +++++++++++++--- travis/Dockerfile.in | 30 --------- travis/backup_restore.sh | 66 ------------------- travis/before-install.sh | 6 ++ travis/before-script-user.sh | 7 ++ travis/before-script.sh | 19 ++++++ travis/docker-compose.yml | 17 ----- travis/install.sh | 66 +++++++++++++++++++ travis/make_dockerfile.sh | 37 ----------- travis/run_tests.sh | 124 ----------------------------------- travis/script.sh | 41 ++++++++++++ 11 files changed, 185 insertions(+), 283 deletions(-) delete mode 100644 travis/Dockerfile.in delete mode 100644 travis/backup_restore.sh create mode 100755 travis/before-install.sh create mode 100755 travis/before-script-user.sh create mode 100755 travis/before-script.sh delete mode 100644 travis/docker-compose.yml create mode 100755 travis/install.sh delete mode 100755 travis/make_dockerfile.sh delete mode 100755 travis/run_tests.sh create mode 100755 travis/script.sh diff --git a/.travis.yml b/.travis.yml index 17e6d2579..074ae3d02 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,23 +1,60 @@ os: linux -dist: bionic +dist: jammy language: c -services: - - docker +cache: ccache + +addons: + apt: + packages: + - sudo + - libc-dev + - bison + - flex + - libreadline-dev + - zlib1g-dev + - libzstd-dev + - libssl-dev + - perl + - libperl-dev + - libdbi-perl + - cpanminus + - locales + - python3 + - python3-dev + - python3-pip + - libicu-dev + - libgss-dev + - libkrb5-dev + - libxml2-dev + - libxslt1-dev + - libldap2-dev + - tcl-dev + - diffutils + - gdb + - gettext + - lcov + - openssh-client + - openssh-server + - libipc-run-perl + - libtime-hires-perl + - libtimedate-perl + - libdbd-pg-perl before_install: - - cp travis/* . + - sudo travis/before-install.sh install: - - ./make_dockerfile.sh - - docker-compose build + - travis/install.sh + +before_script: + - sudo travis/before-script.sh + - travis/before-script-user.sh script: - - docker-compose run tests - # - docker-compose run $(bash <(curl -s https://codecov.io/env)) tests - # - docker run -v $(pwd):/tests --rm centos:7 /tests/travis/backup_restore.sh + - travis/script.sh notifications: email: diff --git a/travis/Dockerfile.in b/travis/Dockerfile.in deleted file mode 100644 index a67663d3b..000000000 --- a/travis/Dockerfile.in +++ /dev/null @@ -1,30 +0,0 @@ -FROM ololobus/postgres-dev:stretch - -USER root -RUN apt-get update -RUN apt-get -yq install python3 python3-pip - -# RUN curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py -# RUN python2 get-pip.py -RUN python3 -m pip install virtualenv - -# Environment -ENV PG_MAJOR=${PG_VERSION} PG_BRANCH=${PG_BRANCH} -ENV PTRACK_PATCH_PG_BRANCH=${PTRACK_PATCH_PG_BRANCH} -ENV PGPROBACKUP_GDB=${PGPROBACKUP_GDB} -ENV LANG=C.UTF-8 PGHOME=/pg/testdir/pgbin - -# Make directories -RUN mkdir -p /pg/testdir - -COPY run_tests.sh /run.sh -RUN chmod 755 /run.sh - -COPY . /pg/testdir -WORKDIR /pg/testdir - -# Grant privileges -RUN chown -R postgres:postgres /pg/testdir - -USER postgres -ENTRYPOINT MODE=${MODE} /run.sh diff --git a/travis/backup_restore.sh b/travis/backup_restore.sh deleted file mode 100644 index b3c9df1ed..000000000 --- a/travis/backup_restore.sh +++ /dev/null @@ -1,66 +0,0 @@ -#!/bin/sh -ex - -# vars -export PGVERSION=9.5.4 -export PATH=$PATH:/usr/pgsql-9.5/bin -export PGUSER=pgbench -export PGDATABASE=pgbench -export PGDATA=/var/lib/pgsql/9.5/data -export BACKUP_PATH=/backups -export ARCLOG_PATH=$BACKUP_PATH/backup/pg_xlog -export PGDATA2=/var/lib/pgsql/9.5/data2 -export PGBENCH_SCALE=100 -export PGBENCH_TIME=60 - -# prepare directory -cp -a /tests /build -pushd /build - -# download postgresql -yum install -y wget -wget -k https://ftp.postgresql.org/pub/source/v$PGVERSION/postgresql-$PGVERSION.tar.gz -O postgresql.tar.gz -tar xf postgresql.tar.gz - -# install pg_probackup -yum install -y https://download.postgresql.org/pub/repos/yum/9.5/redhat/rhel-7-x86_64/pgdg-centos95-9.5-2.noarch.rpm -yum install -y postgresql95-devel make gcc readline-devel openssl-devel pam-devel libxml2-devel libxslt-devel -make top_srcdir=postgresql-$PGVERSION -make install top_srcdir=postgresql-$PGVERSION - -# initialize cluster and database -yum install -y postgresql95-server -su postgres -c "/usr/pgsql-9.5/bin/initdb -D $PGDATA -k" -cat < $PGDATA/pg_hba.conf -local all all trust -host all all 127.0.0.1/32 trust -local replication pgbench trust -host replication pgbench 127.0.0.1/32 trust -EOF -cat < $PGDATA/postgresql.auto.conf -max_wal_senders = 2 -wal_level = logical -wal_log_hints = on -EOF -su postgres -c "/usr/pgsql-9.5/bin/pg_ctl start -w -D $PGDATA" -su postgres -c "createdb -U postgres $PGUSER" -su postgres -c "createuser -U postgres -a -d -E $PGUSER" -pgbench -i -s $PGBENCH_SCALE - -# Count current -COUNT=$(psql -Atc "select count(*) from pgbench_accounts") -pgbench -s $PGBENCH_SCALE -T $PGBENCH_TIME -j 2 -c 10 & - -# create backup -pg_probackup init -pg_probackup backup -b full --disable-ptrack-clear --stream -v -pg_probackup show -sleep $PGBENCH_TIME - -# restore from backup -chown -R postgres:postgres $BACKUP_PATH -su postgres -c "pg_probackup restore -D $PGDATA2" - -# start backup server -su postgres -c "/usr/pgsql-9.5/bin/pg_ctl stop -w -D $PGDATA" -su postgres -c "/usr/pgsql-9.5/bin/pg_ctl start -w -D $PGDATA2" -( psql -Atc "select count(*) from pgbench_accounts" | grep $COUNT ) || (cat $PGDATA2/pg_log/*.log ; exit 1) diff --git a/travis/before-install.sh b/travis/before-install.sh new file mode 100755 index 000000000..376de5e6e --- /dev/null +++ b/travis/before-install.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +set -xe + +mkdir /pg +chown travis /pg \ No newline at end of file diff --git a/travis/before-script-user.sh b/travis/before-script-user.sh new file mode 100755 index 000000000..d9c07f1e4 --- /dev/null +++ b/travis/before-script-user.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +set -xe + +ssh-keygen -t rsa -f ~/.ssh/id_rsa -q -N "" +cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys +ssh-keyscan -H localhost >> ~/.ssh/known_hosts diff --git a/travis/before-script.sh b/travis/before-script.sh new file mode 100755 index 000000000..ca59bcf23 --- /dev/null +++ b/travis/before-script.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +set -xe + +/etc/init.d/ssh start + +# Show pg_config path (just in case) +echo "############### pg_config path:" +which pg_config + +# Show pg_config just in case +echo "############### pg_config:" +pg_config + +# Show kernel parameters +echo "############### kernel params:" +cat /proc/sys/kernel/yama/ptrace_scope +sudo sysctl kernel.yama.ptrace_scope=0 +cat /proc/sys/kernel/yama/ptrace_scope diff --git a/travis/docker-compose.yml b/travis/docker-compose.yml deleted file mode 100644 index fc6545567..000000000 --- a/travis/docker-compose.yml +++ /dev/null @@ -1,17 +0,0 @@ -version: "3.7" -services: - tests: - build: - context: . - - cap_add: - - SYS_PTRACE - - security_opt: - - seccomp=unconfined - - # don't work - #sysctls: - # kernel.yama.ptrace_scope: 0 - privileged: true - diff --git a/travis/install.sh b/travis/install.sh new file mode 100755 index 000000000..43ada47b7 --- /dev/null +++ b/travis/install.sh @@ -0,0 +1,66 @@ +#!/usr/bin/env bash + +set -xe + +if [ -z ${PG_VERSION+x} ]; then + echo PG_VERSION is not set! + exit 1 +fi + +if [ -z ${PG_BRANCH+x} ]; then + echo PG_BRANCH is not set! + exit 1 +fi + +if [ -z ${PTRACK_PATCH_PG_BRANCH+x} ]; then + PTRACK_PATCH_PG_BRANCH=OFF +fi + +# fix +sudo chown -R travis /home/travis/.ccache + +export PGHOME=/pg + +# Clone Postgres +echo "############### Getting Postgres sources:" +git clone https://github.com/postgres/postgres.git -b $PG_BRANCH --depth=1 + +# Clone ptrack +if [ "$PTRACK_PATCH_PG_BRANCH" != "OFF" ]; then + git clone https://github.com/postgrespro/ptrack.git -b master --depth=1 postgres/contrib/ptrack + export PG_PROBACKUP_PTRACK=ON +else + export PG_PROBACKUP_PTRACK=OFF +fi + +# Compile and install Postgres +echo "############### Compiling Postgres:" +cd postgres # Go to postgres dir +if [ "$PG_PROBACKUP_PTRACK" = "ON" ]; then + git apply -3 contrib/ptrack/patches/${PTRACK_PATCH_PG_BRANCH}-ptrack-core.diff +fi +CC='ccache gcc' CFLAGS="-Og" ./configure --prefix=$PGHOME \ + --cache-file=~/.ccache/configure-cache \ + --enable-debug --enable-cassert --enable-depend \ + --enable-tap-tests --enable-nls +make -s -j$(nproc) install +make -s -j$(nproc) -C contrib/ install + +# Override default Postgres instance +export PATH=$PGHOME/bin:$PATH +export LD_LIBRARY_PATH=$PGHOME/lib +export PG_CONFIG=$(which pg_config) + +if [ "$PG_PROBACKUP_PTRACK" = "ON" ]; then + echo "############### Compiling Ptrack:" + make -C contrib/ptrack install +fi + +# Get amcheck if missing +if [ ! -d "contrib/amcheck" ]; then + echo "############### Getting missing amcheck:" + git clone https://github.com/petergeoghegan/amcheck.git --depth=1 contrib/amcheck + make -C contrib/amcheck install +fi + +pip3 install testgres \ No newline at end of file diff --git a/travis/make_dockerfile.sh b/travis/make_dockerfile.sh deleted file mode 100755 index e780649d9..000000000 --- a/travis/make_dockerfile.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/usr/bin/env sh - -if [ -z ${PG_VERSION+x} ]; then - echo PG_VERSION is not set! - exit 1 -fi - -if [ -z ${PG_BRANCH+x} ]; then - echo PG_BRANCH is not set! - exit 1 -fi - -if [ -z ${MODE+x} ]; then - MODE=basic -fi - -if [ -z ${PTRACK_PATCH_PG_BRANCH+x} ]; then - PTRACK_PATCH_PG_BRANCH=OFF -fi - -if [ -z ${PGPROBACKUP_GDB+x} ]; then - PGPROBACKUP_GDB=ON -fi - -echo PG_VERSION=${PG_VERSION} -echo PG_BRANCH=${PG_BRANCH} -echo MODE=${MODE} -echo PTRACK_PATCH_PG_BRANCH=${PTRACK_PATCH_PG_BRANCH} -echo PGPROBACKUP_GDB=${PGPROBACKUP_GDB} - -sed \ - -e 's/${PG_VERSION}/'${PG_VERSION}/g \ - -e 's/${PG_BRANCH}/'${PG_BRANCH}/g \ - -e 's/${MODE}/'${MODE}/g \ - -e 's/${PTRACK_PATCH_PG_BRANCH}/'${PTRACK_PATCH_PG_BRANCH}/g \ - -e 's/${PGPROBACKUP_GDB}/'${PGPROBACKUP_GDB}/g \ -Dockerfile.in > Dockerfile diff --git a/travis/run_tests.sh b/travis/run_tests.sh deleted file mode 100755 index 84d7aa173..000000000 --- a/travis/run_tests.sh +++ /dev/null @@ -1,124 +0,0 @@ -#!/usr/bin/env bash - -# -# Copyright (c) 2019-2022, Postgres Professional -# -set -xe - -sudo su -c 'mkdir /run/sshd' -sudo su -c 'apt-get update -y' -sudo su -c 'apt-get install openssh-client openssh-server -y' -sudo su -c '/etc/init.d/ssh start' - -ssh-keygen -t rsa -f ~/.ssh/id_rsa -q -N "" -cat ~/.ssh/id_rsa.pub > ~/.ssh/authorized_keys -ssh-keyscan -H localhost >> ~/.ssh/known_hosts - -PG_SRC=$PWD/postgres - -# # Here PG_VERSION is provided by postgres:X-alpine docker image -# curl "https://ftp.postgresql.org/pub/source/v$PG_VERSION/postgresql-$PG_VERSION.tar.bz2" -o postgresql.tar.bz2 -# echo "$PG_SHA256 *postgresql.tar.bz2" | sha256sum -c - - -# mkdir $PG_SRC - -# tar \ -# --extract \ -# --file postgresql.tar.bz2 \ -# --directory $PG_SRC \ -# --strip-components 1 - -# Clone Postgres -echo "############### Getting Postgres sources:" -git clone https://github.com/postgres/postgres.git -b $PG_BRANCH --depth=1 - -# Clone ptrack -if [ "$PTRACK_PATCH_PG_BRANCH" != "OFF" ]; then - git clone https://github.com/postgrespro/ptrack.git -b master --depth=1 - export PG_PROBACKUP_PTRACK=ON -else - export PG_PROBACKUP_PTRACK=OFF -fi - - -# Compile and install Postgres -echo "############### Compiling Postgres:" -cd postgres # Go to postgres dir -if [ "$PG_PROBACKUP_PTRACK" = "ON" ]; then - git apply -3 ../ptrack/patches/${PTRACK_PATCH_PG_BRANCH}-ptrack-core.diff -fi -CFLAGS="-O0" ./configure --prefix=$PGHOME --enable-debug --enable-cassert --enable-depend --enable-tap-tests --enable-nls --with-python -make -s -j$(nproc) install -#make -s -j$(nproc) -C 'src/common' install -#make -s -j$(nproc) -C 'src/port' install -#make -s -j$(nproc) -C 'src/interfaces' install -make -s -j$(nproc) -C contrib/ install - -# Override default Postgres instance -export PATH=$PGHOME/bin:$PATH -export LD_LIBRARY_PATH=$PGHOME/lib -export PG_CONFIG=$(which pg_config) - -if [ "$PG_PROBACKUP_PTRACK" = "ON" ]; then - echo "############### Compiling Ptrack:" - make USE_PGXS=1 -C ../ptrack install -fi - -# Get amcheck if missing -if [ ! -d "contrib/amcheck" ]; then - echo "############### Getting missing amcheck:" - git clone https://github.com/petergeoghegan/amcheck.git --depth=1 contrib/amcheck - make USE_PGXS=1 -C contrib/amcheck install -fi - -# Get back to testdir -cd .. - -# Show pg_config path (just in case) -echo "############### pg_config path:" -which pg_config - -# Show pg_config just in case -echo "############### pg_config:" -pg_config - -# Show kernel parameters -echo "############### kernel params:" -cat /proc/sys/kernel/yama/ptrace_scope -sudo sysctl kernel.yama.ptrace_scope=0 -cat /proc/sys/kernel/yama/ptrace_scope - -# Build and install pg_probackup (using PG_CPPFLAGS and SHLIB_LINK for gcov) -echo "############### Compiling and installing pg_probackup:" -# make USE_PGXS=1 PG_CPPFLAGS="-coverage" SHLIB_LINK="-coverage" top_srcdir=$CUSTOM_PG_SRC install -make USE_PGXS=1 top_srcdir=$PG_SRC install - -# Setup python environment -echo "############### Setting up python env:" -python3 -m virtualenv pyenv -source pyenv/bin/activate -pip3 install testgres - -echo "############### Testing:" -echo PG_PROBACKUP_PARANOIA=${PG_PROBACKUP_PARANOIA} -echo ARCHIVE_COMPRESSION=${ARCHIVE_COMPRESSION} -echo PGPROBACKUPBIN_OLD=${PGPROBACKUPBIN_OLD} -echo PGPROBACKUPBIN=${PGPROBACKUPBIN} -echo PGPROBACKUP_SSH_REMOTE=${PGPROBACKUP_SSH_REMOTE} -echo PGPROBACKUP_GDB=${PGPROBACKUP_GDB} -echo PG_PROBACKUP_PTRACK=${PG_PROBACKUP_PTRACK} -if [ "$MODE" = "basic" ]; then - export PG_PROBACKUP_TEST_BASIC=ON - echo PG_PROBACKUP_TEST_BASIC=${PG_PROBACKUP_TEST_BASIC} - python3 -m unittest -v tests - python3 -m unittest -v tests.init_test -else - echo PG_PROBACKUP_TEST_BASIC=${PG_PROBACKUP_TEST_BASIC} - python3 -m unittest -v tests.$MODE -fi - -# Generate *.gcov files -# gcov src/*.c src/*.h - -# Send coverage stats to Codecov -# bash <(curl -s https://codecov.io/bash) diff --git a/travis/script.sh b/travis/script.sh new file mode 100755 index 000000000..31ef09726 --- /dev/null +++ b/travis/script.sh @@ -0,0 +1,41 @@ +#!/usr/bin/env bash + +set -xe + +export PGHOME=/pg +export PG_SRC=$PWD/postgres +export PATH=$PGHOME/bin:$PATH +export LD_LIBRARY_PATH=$PGHOME/lib +export PG_CONFIG=$(which pg_config) + +# Build and install pg_probackup (using PG_CPPFLAGS and SHLIB_LINK for gcov) +echo "############### Compiling and installing pg_probackup:" +# make USE_PGXS=1 PG_CPPFLAGS="-coverage" SHLIB_LINK="-coverage" top_srcdir=$CUSTOM_PG_SRC install +make USE_PGXS=1 top_srcdir=$PG_SRC install + +if [ -z ${MODE+x} ]; then + MODE=basic +fi + +if [ -z ${PGPROBACKUP_GDB+x} ]; then + PGPROBACKUP_GDB=ON +fi + +echo "############### Testing:" +echo PG_PROBACKUP_PARANOIA=${PG_PROBACKUP_PARANOIA} +echo ARCHIVE_COMPRESSION=${ARCHIVE_COMPRESSION} +echo PGPROBACKUPBIN_OLD=${PGPROBACKUPBIN_OLD} +echo PGPROBACKUPBIN=${PGPROBACKUPBIN} +echo PGPROBACKUP_SSH_REMOTE=${PGPROBACKUP_SSH_REMOTE} +echo PGPROBACKUP_GDB=${PGPROBACKUP_GDB} +echo PG_PROBACKUP_PTRACK=${PG_PROBACKUP_PTRACK} + +if [ "$MODE" = "basic" ]; then + export PG_PROBACKUP_TEST_BASIC=ON + echo PG_PROBACKUP_TEST_BASIC=${PG_PROBACKUP_TEST_BASIC} + python3 -m unittest -v tests + python3 -m unittest -v tests.init_test +else + echo PG_PROBACKUP_TEST_BASIC=${PG_PROBACKUP_TEST_BASIC} + python3 -m unittest -v tests.$MODE +fi From c42f68ecca86de49aa55fbd4c98d3099062d54b0 Mon Sep 17 00:00:00 2001 From: Daniel Shelepanov Date: Mon, 5 Dec 2022 11:21:19 +0300 Subject: [PATCH 420/525] [PBCKP-382] version 15 compatibility bug --- tests/auth_test.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/auth_test.py b/tests/auth_test.py index d0be9f344..65c30e6ee 100644 --- a/tests/auth_test.py +++ b/tests/auth_test.py @@ -140,7 +140,6 @@ def test_backup_via_unprivileged_user(self): else: node.safe_psql( "postgres", - "GRANT EXECUTE ON FUNCTION pg_backup_stop() TO backup; " "GRANT EXECUTE ON FUNCTION pg_backup_stop(boolean) TO backup;") self.backup_node( From 3109634ecb4c5995cd76a2dfb122afa08e404916 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Mon, 5 Dec 2022 15:49:12 +0300 Subject: [PATCH 421/525] [PBCKP-325] change test_issue_231 to check backup id are different --- tests/backup_test.py | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/tests/backup_test.py b/tests/backup_test.py index db7ccf5a0..31f0b427a 100644 --- a/tests/backup_test.py +++ b/tests/backup_test.py @@ -2790,16 +2790,10 @@ def test_issue_231(self): datadir = os.path.join(node.data_dir, '123') - try: - self.backup_node( - backup_dir, 'node', node, data_dir='{0}'.format(datadir)) - except: - pass - - out = self.backup_node(backup_dir, 'node', node, options=['--stream'], return_id=False) + pb1 = self.backup_node(backup_dir, 'node', node, data_dir='{0}'.format(datadir)) + pb2 = self.backup_node(backup_dir, 'node', node, options=['--stream']) - # it is a bit racy - self.assertIn("WARNING: Cannot create directory", out) + self.assertNotEqual(pb1, pb2) def test_incr_backup_filenode_map(self): """ From 6bd71d866dda40c2d6697f3e86f488b13648bd61 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 6 Dec 2022 01:51:22 +0300 Subject: [PATCH 422/525] [PBCKP-382] and another one Pg15 pg_backup_stop() --- tests/auth_test.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/auth_test.py b/tests/auth_test.py index 65c30e6ee..52d7e1544 100644 --- a/tests/auth_test.py +++ b/tests/auth_test.py @@ -240,7 +240,6 @@ def setUpClass(cls): "GRANT EXECUTE ON FUNCTION current_setting(text) TO backup; " "GRANT EXECUTE ON FUNCTION pg_is_in_recovery() TO backup; " "GRANT EXECUTE ON FUNCTION pg_backup_start(text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_backup_stop() TO backup; " "GRANT EXECUTE ON FUNCTION pg_backup_stop(boolean) TO backup; " "GRANT EXECUTE ON FUNCTION pg_create_restore_point(text) TO backup; " "GRANT EXECUTE ON FUNCTION pg_switch_wal() TO backup; " From bef73b8ba61dce7e4c4704eaa8b2d31c4ec08fd8 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Sun, 4 Dec 2022 03:31:32 +0300 Subject: [PATCH 423/525] more usages for table_checksum --- tests/archive_test.py | 51 +++++++++++++-------------------- tests/catchup_test.py | 52 +++++++++++++++++----------------- tests/cfs_catchup_test.py | 8 +++--- tests/cfs_restore_test.py | 29 +++++++++---------- tests/compression_test.py | 48 +++++++++++++++---------------- tests/delta_test.py | 32 ++++++++++----------- tests/merge_test.py | 43 ++++++++-------------------- tests/page_test.py | 30 ++++++++------------ tests/ptrack_test.py | 59 +++++++++++++++++---------------------- tests/replica_test.py | 22 +++++++-------- tests/restore_test.py | 50 ++++++++++++++++----------------- 11 files changed, 185 insertions(+), 239 deletions(-) diff --git a/tests/archive_test.py b/tests/archive_test.py index fb2600c4a..b2217a7bf 100644 --- a/tests/archive_test.py +++ b/tests/archive_test.py @@ -35,7 +35,7 @@ def test_pgpro434_1(self): "md5(repeat(i::text,10))::tsvector as tsvector from " "generate_series(0,100) i") - result = node.safe_psql("postgres", "SELECT * FROM t_heap") + result = node.table_checksum("t_heap") self.backup_node( backup_dir, 'node', node) node.cleanup() @@ -58,7 +58,7 @@ def test_pgpro434_1(self): node.slow_start() self.assertEqual( - result, node.safe_psql("postgres", "SELECT * FROM t_heap"), + result, node.table_checksum("t_heap"), 'data after restore not equal to original data') # @unittest.skip("skip") @@ -152,7 +152,7 @@ def test_pgpro434_2(self): backup_id = self.backup_node(backup_dir, 'node', node) - result = node.safe_psql("postgres", "SELECT * FROM t_heap") + result = node.table_checksum("t_heap") node.safe_psql( "postgres", "insert into t_heap select 100503 as id, md5(i::text) as text, " @@ -204,11 +204,7 @@ def test_pgpro434_2(self): "select exists(select 1 from t_heap where id > 100500)")[0][0], 'data after restore not equal to original data') - self.assertEqual( - result, - node.safe_psql( - "postgres", - "SELECT * FROM t_heap"), + self.assertEqual(result, node.table_checksum("t_heap"), 'data after restore not equal to original data') # @unittest.skip("skip") @@ -702,7 +698,7 @@ def test_replica_archive(self): "from generate_series(0,2560) i") self.backup_node(backup_dir, 'master', master, options=['--stream']) - before = master.safe_psql("postgres", "SELECT * FROM t_heap") + before = master.table_checksum("t_heap") # Settings for Replica self.restore_node(backup_dir, 'master', replica) @@ -713,7 +709,7 @@ def test_replica_archive(self): replica.slow_start(replica=True) # Check data correctness on replica - after = replica.safe_psql("postgres", "SELECT * FROM t_heap") + after = replica.table_checksum("t_heap") self.assertEqual(before, after) # Change data on master, take FULL backup from replica, @@ -724,7 +720,7 @@ def test_replica_archive(self): "insert into t_heap select i as id, md5(i::text) as text, " "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(256,512) i") - before = master.safe_psql("postgres", "SELECT * FROM t_heap") + before = master.table_checksum("t_heap") backup_id = self.backup_node( backup_dir, 'replica', replica, @@ -748,7 +744,7 @@ def test_replica_archive(self): self.set_auto_conf(node, {'port': node.port}) node.slow_start() # CHECK DATA CORRECTNESS - after = node.safe_psql("postgres", "SELECT * FROM t_heap") + after = node.table_checksum("t_heap") self.assertEqual(before, after) # Change data on master, make PAGE backup from replica, @@ -760,7 +756,7 @@ def test_replica_archive(self): "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(512,80680) i") - before = master.safe_psql("postgres", "SELECT * FROM t_heap") + before = master.table_checksum("t_heap") self.wait_until_replica_catch_with_master(master, replica) @@ -787,7 +783,7 @@ def test_replica_archive(self): node.slow_start() # CHECK DATA CORRECTNESS - after = node.safe_psql("postgres", "SELECT * FROM t_heap") + after = node.table_checksum("t_heap") self.assertEqual(before, after) # @unittest.expectedFailure @@ -831,7 +827,7 @@ def test_master_and_replica_parallel_archiving(self): # TAKE FULL ARCHIVE BACKUP FROM MASTER self.backup_node(backup_dir, 'master', master) # GET LOGICAL CONTENT FROM MASTER - before = master.safe_psql("postgres", "SELECT * FROM t_heap") + before = master.table_checksum("t_heap") # GET PHYSICAL CONTENT FROM MASTER pgdata_master = self.pgdata_content(master.data_dir) @@ -849,7 +845,7 @@ def test_master_and_replica_parallel_archiving(self): replica.slow_start(replica=True) # CHECK LOGICAL CORRECTNESS on REPLICA - after = replica.safe_psql("postgres", "SELECT * FROM t_heap") + after = replica.table_checksum("t_heap") self.assertEqual(before, after) master.psql( @@ -923,7 +919,7 @@ def test_basic_master_and_replica_concurrent_archiving(self): # TAKE FULL ARCHIVE BACKUP FROM MASTER self.backup_node(backup_dir, 'master', master) # GET LOGICAL CONTENT FROM MASTER - before = master.safe_psql("postgres", "SELECT * FROM t_heap") + before = master.table_checksum("t_heap") # GET PHYSICAL CONTENT FROM MASTER pgdata_master = self.pgdata_content(master.data_dir) @@ -942,7 +938,7 @@ def test_basic_master_and_replica_concurrent_archiving(self): replica.slow_start(replica=True) # CHECK LOGICAL CORRECTNESS on REPLICA - after = replica.safe_psql("postgres", "SELECT * FROM t_heap") + after = replica.table_checksum("t_heap") self.assertEqual(before, after) master.psql( @@ -1107,7 +1103,7 @@ def test_archive_pg_receivexlog(self): node, backup_type='page' ) - result = node.safe_psql("postgres", "SELECT * FROM t_heap") + result = node.table_checksum("t_heap") self.validate_pb(backup_dir) # Check data correctness @@ -1117,9 +1113,7 @@ def test_archive_pg_receivexlog(self): self.assertEqual( result, - node.safe_psql( - "postgres", "SELECT * FROM t_heap" - ), + node.table_checksum("t_heap"), 'data after restore not equal to original data') # Clean after yourself @@ -1176,7 +1170,7 @@ def test_archive_pg_receivexlog_compression_pg10(self): backup_dir, 'node', node, backup_type='page' ) - result = node.safe_psql("postgres", "SELECT * FROM t_heap") + result = node.table_checksum("t_heap") self.validate_pb(backup_dir) # Check data correctness @@ -1185,7 +1179,7 @@ def test_archive_pg_receivexlog_compression_pg10(self): node.slow_start() self.assertEqual( - result, node.safe_psql("postgres", "SELECT * FROM t_heap"), + result, node.table_checksum("t_heap"), 'data after restore not equal to original data') # Clean after yourself @@ -2150,13 +2144,8 @@ def test_archive_pg_receivexlog_partial_handling(self): node_restored.slow_start() - result = node.safe_psql( - "postgres", - "select sum(id) from t_heap").decode('utf-8').rstrip() - - result_new = node_restored.safe_psql( - "postgres", - "select sum(id) from t_heap").decode('utf-8').rstrip() + result = node.table_checksum("t_heap") + result_new = node_restored.table_checksum("t_heap") self.assertEqual(result, result_new) diff --git a/tests/catchup_test.py b/tests/catchup_test.py index c94a5300d..21bcd7973 100644 --- a/tests/catchup_test.py +++ b/tests/catchup_test.py @@ -22,7 +22,7 @@ def test_basic_full_catchup(self): src_pg.safe_psql( "postgres", "CREATE TABLE ultimate_question AS SELECT 42 AS answer") - src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + src_query_result = src_pg.table_checksum("ultimate_question") # do full catchup dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) @@ -47,7 +47,7 @@ def test_basic_full_catchup(self): dst_pg.slow_start() # 2nd check: run verification query - dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + dst_query_result = dst_pg.table_checksum("ultimate_question") self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy') # Cleanup @@ -69,7 +69,7 @@ def test_full_catchup_with_tablespace(self): src_pg.safe_psql( "postgres", "CREATE TABLE ultimate_question TABLESPACE tblspace1 AS SELECT 42 AS answer") - src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + src_query_result = src_pg.table_checksum("ultimate_question") # do full catchup with tablespace mapping dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) @@ -105,7 +105,7 @@ def test_full_catchup_with_tablespace(self): dst_pg.slow_start() # 2nd check: run verification query - dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + dst_query_result = dst_pg.table_checksum("ultimate_question") self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy') # Cleanup @@ -146,7 +146,7 @@ def test_basic_delta_catchup(self): pgbench = src_pg.pgbench(options=['-T', '10', '--no-vacuum']) pgbench.wait() src_pg.safe_psql("postgres", "INSERT INTO ultimate_question VALUES(42)") - src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + src_query_result = src_pg.table_checksum("ultimate_question") # do delta catchup self.catchup_node( @@ -171,7 +171,7 @@ def test_basic_delta_catchup(self): dst_pg.slow_start(replica = True) # 2nd check: run verification query - dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + dst_query_result = dst_pg.table_checksum("ultimate_question") self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy') # Cleanup @@ -218,7 +218,7 @@ def test_basic_ptrack_catchup(self): pgbench = src_pg.pgbench(options=['-T', '10', '--no-vacuum']) pgbench.wait() src_pg.safe_psql("postgres", "INSERT INTO ultimate_question VALUES(42)") - src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + src_query_result = src_pg.table_checksum("ultimate_question") # do ptrack catchup self.catchup_node( @@ -243,7 +243,7 @@ def test_basic_ptrack_catchup(self): dst_pg.slow_start(replica = True) # 2nd check: run verification query - dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + dst_query_result = dst_pg.table_checksum("ultimate_question") self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy') # Cleanup @@ -282,7 +282,7 @@ def test_tli_delta_catchup(self): src_pg.slow_start(replica = True) src_pg.promote() src_pg.safe_psql("postgres", "CREATE TABLE ultimate_question AS SELECT 42 AS answer") - src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + src_query_result = src_pg.table_checksum("ultimate_question") # do catchup (src_tli = 2, dst_tli = 1) self.catchup_node( @@ -306,7 +306,7 @@ def test_tli_delta_catchup(self): dst_pg.slow_start(replica = True) # 2nd check: run verification query - dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + dst_query_result = dst_pg.table_checksum("ultimate_question") self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy') dst_pg.stop() @@ -364,7 +364,7 @@ def test_tli_ptrack_catchup(self): self.assertEqual(src_tli, "2", "Postgres didn't update TLI after promote") src_pg.safe_psql("postgres", "CREATE TABLE ultimate_question AS SELECT 42 AS answer") - src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + src_query_result = src_pg.table_checksum("ultimate_question") # do catchup (src_tli = 2, dst_tli = 1) self.catchup_node( @@ -388,7 +388,7 @@ def test_tli_ptrack_catchup(self): dst_pg.slow_start(replica = True) # 2nd check: run verification query - dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + dst_query_result = dst_pg.table_checksum("ultimate_question") self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy') dst_pg.stop() @@ -818,7 +818,7 @@ def test_tli_destination_mismatch(self): # preparation 3: "useful" changes src_pg.safe_psql("postgres", "CREATE TABLE ultimate_question AS SELECT 42 AS answer") - src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + src_query_result = src_pg.table_checksum("ultimate_question") # try catchup try: @@ -832,7 +832,7 @@ def test_tli_destination_mismatch(self): dst_options['port'] = str(dst_pg.port) self.set_auto_conf(dst_pg, dst_options) dst_pg.slow_start() - dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + dst_query_result = dst_pg.table_checksum("ultimate_question") dst_pg.stop() self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy') except ProbackupException as e: @@ -896,7 +896,7 @@ def test_tli_source_mismatch(self): # preparation 4: "useful" changes src_pg.safe_psql("postgres", "CREATE TABLE ultimate_question AS SELECT 42 AS answer") - src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + src_query_result = src_pg.table_checksum("ultimate_question") # try catchup try: @@ -910,7 +910,7 @@ def test_tli_source_mismatch(self): dst_options['port'] = str(dst_pg.port) self.set_auto_conf(dst_pg, dst_options) dst_pg.slow_start() - dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + dst_query_result = dst_pg.table_checksum("ultimate_question") dst_pg.stop() self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy') except ProbackupException as e: @@ -979,7 +979,7 @@ def test_unclean_delta_catchup(self): pgbench = src_pg.pgbench(options=['-T', '10', '--no-vacuum']) pgbench.wait() src_pg.safe_psql("postgres", "INSERT INTO ultimate_question VALUES(42)") - src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + src_query_result = src_pg.table_checksum("ultimate_question") # do delta catchup self.catchup_node( @@ -1004,7 +1004,7 @@ def test_unclean_delta_catchup(self): dst_pg.slow_start(replica = True) # 2nd check: run verification query - dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + dst_query_result = dst_pg.table_checksum("ultimate_question") self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy') # Cleanup @@ -1068,7 +1068,7 @@ def test_unclean_ptrack_catchup(self): pgbench = src_pg.pgbench(options=['-T', '10', '--no-vacuum']) pgbench.wait() src_pg.safe_psql("postgres", "INSERT INTO ultimate_question VALUES(42)") - src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + src_query_result = src_pg.table_checksum("ultimate_question") # do delta catchup self.catchup_node( @@ -1093,7 +1093,7 @@ def test_unclean_ptrack_catchup(self): dst_pg.slow_start(replica = True) # 2nd check: run verification query - dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + dst_query_result = dst_pg.table_checksum("ultimate_question") self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy') # Cleanup @@ -1367,9 +1367,9 @@ def test_config_exclusion(self): # check: run verification query src_pg.safe_psql("postgres", "INSERT INTO ultimate_question VALUES(42)") - src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + src_query_result = src_pg.table_checksum("ultimate_question") dst_pg.catchup() # wait for replication - dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + dst_query_result = dst_pg.table_checksum("ultimate_question") self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy') # preparation 4: make changes on master (source) @@ -1397,9 +1397,9 @@ def test_config_exclusion(self): # check: run verification query src_pg.safe_psql("postgres", "INSERT INTO ultimate_question VALUES(2*42)") - src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + src_query_result = src_pg.table_checksum("ultimate_question") dst_pg.catchup() # wait for replication - dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + dst_query_result = dst_pg.table_checksum("ultimate_question") self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy') # preparation 5: make changes on master (source) @@ -1426,9 +1426,9 @@ def test_config_exclusion(self): # check: run verification query src_pg.safe_psql("postgres", "INSERT INTO ultimate_question VALUES(3*42)") - src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + src_query_result = src_pg.table_checksum("ultimate_question") dst_pg.catchup() # wait for replication - dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + dst_query_result = dst_pg.table_checksum("ultimate_question") self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy') # Cleanup diff --git a/tests/cfs_catchup_test.py b/tests/cfs_catchup_test.py index 43c3f18f1..f6760b72c 100644 --- a/tests/cfs_catchup_test.py +++ b/tests/cfs_catchup_test.py @@ -25,7 +25,7 @@ def test_full_catchup_with_tablespace(self): src_pg.safe_psql( "postgres", "CREATE TABLE ultimate_question TABLESPACE tblspace1 AS SELECT 42 AS answer") - src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + src_query_result = src_pg.table_checksum("ultimate_question") src_pg.safe_psql( "postgres", "CHECKPOINT") @@ -76,7 +76,7 @@ def test_full_catchup_with_tablespace(self): dst_pg.slow_start() # 2nd check: run verification query - dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + dst_query_result = dst_pg.table_checksum("ultimate_question") self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy') # and now delta backup @@ -112,6 +112,6 @@ def test_full_catchup_with_tablespace(self): # 3rd check: run verification query - src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") - dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + src_query_result = src_pg.table_checksum("ultimate_question") + dst_query_result = dst_pg.table_checksum("ultimate_question") self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy') diff --git a/tests/cfs_restore_test.py b/tests/cfs_restore_test.py index 6b69b4ffe..e70af39b4 100644 --- a/tests/cfs_restore_test.py +++ b/tests/cfs_restore_test.py @@ -112,10 +112,7 @@ def add_data_in_cluster(self): MD5(repeat(i::text,10))::tsvector AS tsvector \ FROM generate_series(0,1e5) i'.format('t1', tblspace_name) ) - self.table_t1 = self.node.safe_psql( - "postgres", - "SELECT * FROM t1" - ) + self.table_t1 = self.node.table_checksum("t1") # --- Restore from full backup ---# # @unittest.expectedFailure @@ -154,8 +151,8 @@ def test_restore_from_fullbackup_to_old_location(self): ) self.assertEqual( - repr(self.node.safe_psql("postgres", "SELECT * FROM %s" % 't1')), - repr(self.table_t1) + self.node.table_checksum("t1"), + self.table_t1 ) # @unittest.expectedFailure @@ -193,8 +190,8 @@ def test_restore_from_fullbackup_to_old_location_3_jobs(self): ) self.assertEqual( - repr(self.node.safe_psql("postgres", "SELECT * FROM %s" % 't1')), - repr(self.table_t1) + self.node.table_checksum("t1"), + self.table_t1 ) # @unittest.expectedFailure @@ -236,8 +233,8 @@ def test_restore_from_fullbackup_to_new_location(self): ) self.assertEqual( - repr(node_new.safe_psql("postgres", "SELECT * FROM %s" % 't1')), - repr(self.table_t1) + self.node.table_checksum("t1"), + self.table_t1 ) node_new.cleanup() @@ -280,8 +277,8 @@ def test_restore_from_fullbackup_to_new_location_5_jobs(self): ) self.assertEqual( - repr(node_new.safe_psql("postgres", "SELECT * FROM %s" % 't1')), - repr(self.table_t1) + self.node.table_checksum("t1"), + self.table_t1 ) node_new.cleanup() @@ -328,8 +325,8 @@ def test_restore_from_fullbackup_to_old_location_tablespace_new_location(self): ) self.assertEqual( - repr(self.node.safe_psql("postgres", "SELECT * FROM %s" % 't1')), - repr(self.table_t1) + self.node.table_checksum("t1"), + self.table_t1 ) # @unittest.expectedFailure @@ -375,8 +372,8 @@ def test_restore_from_fullbackup_to_old_location_tablespace_new_location_3_jobs( ) self.assertEqual( - repr(self.node.safe_psql("postgres", "SELECT * FROM %s" % 't1')), - repr(self.table_t1) + self.node.table_checksum("t1"), + self.table_t1 ) # @unittest.expectedFailure diff --git a/tests/compression_test.py b/tests/compression_test.py index 94f2dffff..e779f6472 100644 --- a/tests/compression_test.py +++ b/tests/compression_test.py @@ -32,7 +32,7 @@ def test_basic_compression_stream_zlib(self): "create table t_heap as select i as id, md5(i::text) as text, " "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(0,256) i") - full_result = node.execute("postgres", "SELECT * FROM t_heap") + full_result = node.table_checksum("t_heap") full_backup_id = self.backup_node( backup_dir, 'node', node, backup_type='full', options=[ @@ -45,7 +45,7 @@ def test_basic_compression_stream_zlib(self): "insert into t_heap select i as id, md5(i::text) as text, " "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(256,512) i") - page_result = node.execute("postgres", "SELECT * FROM t_heap") + page_result = node.table_checksum("t_heap") page_backup_id = self.backup_node( backup_dir, 'node', node, backup_type='page', options=[ @@ -57,7 +57,7 @@ def test_basic_compression_stream_zlib(self): "insert into t_heap select i as id, md5(i::text) as text, " "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(512,768) i") - delta_result = node.execute("postgres", "SELECT * FROM t_heap") + delta_result = node.table_checksum("t_heap") delta_backup_id = self.backup_node( backup_dir, 'node', node, backup_type='delta', options=['--stream', '--compress-algorithm=zlib']) @@ -77,7 +77,7 @@ def test_basic_compression_stream_zlib(self): repr(self.output), self.cmd)) node.slow_start() - full_result_new = node.execute("postgres", "SELECT * FROM t_heap") + full_result_new = node.table_checksum("t_heap") self.assertEqual(full_result, full_result_new) node.cleanup() @@ -93,7 +93,7 @@ def test_basic_compression_stream_zlib(self): repr(self.output), self.cmd)) node.slow_start() - page_result_new = node.execute("postgres", "SELECT * FROM t_heap") + page_result_new = node.table_checksum("t_heap") self.assertEqual(page_result, page_result_new) node.cleanup() @@ -109,7 +109,7 @@ def test_basic_compression_stream_zlib(self): repr(self.output), self.cmd)) node.slow_start() - delta_result_new = node.execute("postgres", "SELECT * FROM t_heap") + delta_result_new = node.table_checksum("t_heap") self.assertEqual(delta_result, delta_result_new) def test_compression_archive_zlib(self): @@ -134,7 +134,7 @@ def test_compression_archive_zlib(self): "postgres", "create table t_heap as select i as id, md5(i::text) as text, " "md5(i::text)::tsvector as tsvector from generate_series(0,1) i") - full_result = node.execute("postgres", "SELECT * FROM t_heap") + full_result = node.table_checksum("t_heap") full_backup_id = self.backup_node( backup_dir, 'node', node, backup_type='full', options=["--compress-algorithm=zlib"]) @@ -145,7 +145,7 @@ def test_compression_archive_zlib(self): "insert into t_heap select i as id, md5(i::text) as text, " "md5(i::text)::tsvector as tsvector " "from generate_series(0,2) i") - page_result = node.execute("postgres", "SELECT * FROM t_heap") + page_result = node.table_checksum("t_heap") page_backup_id = self.backup_node( backup_dir, 'node', node, backup_type='page', options=["--compress-algorithm=zlib"]) @@ -155,7 +155,7 @@ def test_compression_archive_zlib(self): "postgres", "insert into t_heap select i as id, md5(i::text) as text, " "md5(i::text)::tsvector as tsvector from generate_series(0,3) i") - delta_result = node.execute("postgres", "SELECT * FROM t_heap") + delta_result = node.table_checksum("t_heap") delta_backup_id = self.backup_node( backup_dir, 'node', node, backup_type='delta', options=['--compress-algorithm=zlib']) @@ -175,7 +175,7 @@ def test_compression_archive_zlib(self): repr(self.output), self.cmd)) node.slow_start() - full_result_new = node.execute("postgres", "SELECT * FROM t_heap") + full_result_new = node.table_checksum("t_heap") self.assertEqual(full_result, full_result_new) node.cleanup() @@ -191,7 +191,7 @@ def test_compression_archive_zlib(self): repr(self.output), self.cmd)) node.slow_start() - page_result_new = node.execute("postgres", "SELECT * FROM t_heap") + page_result_new = node.table_checksum("t_heap") self.assertEqual(page_result, page_result_new) node.cleanup() @@ -207,7 +207,7 @@ def test_compression_archive_zlib(self): repr(self.output), self.cmd)) node.slow_start() - delta_result_new = node.execute("postgres", "SELECT * FROM t_heap") + delta_result_new = node.table_checksum("t_heap") self.assertEqual(delta_result, delta_result_new) node.cleanup() @@ -234,7 +234,7 @@ def test_compression_stream_pglz(self): "create table t_heap as select i as id, md5(i::text) as text, " "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(0,256) i") - full_result = node.execute("postgres", "SELECT * FROM t_heap") + full_result = node.table_checksum("t_heap") full_backup_id = self.backup_node( backup_dir, 'node', node, backup_type='full', options=['--stream', '--compress-algorithm=pglz']) @@ -245,7 +245,7 @@ def test_compression_stream_pglz(self): "insert into t_heap select i as id, md5(i::text) as text, " "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(256,512) i") - page_result = node.execute("postgres", "SELECT * FROM t_heap") + page_result = node.table_checksum("t_heap") page_backup_id = self.backup_node( backup_dir, 'node', node, backup_type='page', options=['--stream', '--compress-algorithm=pglz']) @@ -256,7 +256,7 @@ def test_compression_stream_pglz(self): "insert into t_heap select i as id, md5(i::text) as text, " "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(512,768) i") - delta_result = node.execute("postgres", "SELECT * FROM t_heap") + delta_result = node.table_checksum("t_heap") delta_backup_id = self.backup_node( backup_dir, 'node', node, backup_type='delta', options=['--stream', '--compress-algorithm=pglz']) @@ -276,7 +276,7 @@ def test_compression_stream_pglz(self): repr(self.output), self.cmd)) node.slow_start() - full_result_new = node.execute("postgres", "SELECT * FROM t_heap") + full_result_new = node.table_checksum("t_heap") self.assertEqual(full_result, full_result_new) node.cleanup() @@ -292,7 +292,7 @@ def test_compression_stream_pglz(self): repr(self.output), self.cmd)) node.slow_start() - page_result_new = node.execute("postgres", "SELECT * FROM t_heap") + page_result_new = node.table_checksum("t_heap") self.assertEqual(page_result, page_result_new) node.cleanup() @@ -308,7 +308,7 @@ def test_compression_stream_pglz(self): repr(self.output), self.cmd)) node.slow_start() - delta_result_new = node.execute("postgres", "SELECT * FROM t_heap") + delta_result_new = node.table_checksum("t_heap") self.assertEqual(delta_result, delta_result_new) node.cleanup() @@ -335,7 +335,7 @@ def test_compression_archive_pglz(self): "create table t_heap as select i as id, md5(i::text) as text, " "md5(i::text)::tsvector as tsvector " "from generate_series(0,100) i") - full_result = node.execute("postgres", "SELECT * FROM t_heap") + full_result = node.table_checksum("t_heap") full_backup_id = self.backup_node( backup_dir, 'node', node, backup_type='full', options=['--compress-algorithm=pglz']) @@ -346,7 +346,7 @@ def test_compression_archive_pglz(self): "insert into t_heap select i as id, md5(i::text) as text, " "md5(i::text)::tsvector as tsvector " "from generate_series(100,200) i") - page_result = node.execute("postgres", "SELECT * FROM t_heap") + page_result = node.table_checksum("t_heap") page_backup_id = self.backup_node( backup_dir, 'node', node, backup_type='page', options=['--compress-algorithm=pglz']) @@ -357,7 +357,7 @@ def test_compression_archive_pglz(self): "insert into t_heap select i as id, md5(i::text) as text, " "md5(i::text)::tsvector as tsvector " "from generate_series(200,300) i") - delta_result = node.execute("postgres", "SELECT * FROM t_heap") + delta_result = node.table_checksum("t_heap") delta_backup_id = self.backup_node( backup_dir, 'node', node, backup_type='delta', options=['--compress-algorithm=pglz']) @@ -377,7 +377,7 @@ def test_compression_archive_pglz(self): repr(self.output), self.cmd)) node.slow_start() - full_result_new = node.execute("postgres", "SELECT * FROM t_heap") + full_result_new = node.table_checksum("t_heap") self.assertEqual(full_result, full_result_new) node.cleanup() @@ -393,7 +393,7 @@ def test_compression_archive_pglz(self): repr(self.output), self.cmd)) node.slow_start() - page_result_new = node.execute("postgres", "SELECT * FROM t_heap") + page_result_new = node.table_checksum("t_heap") self.assertEqual(page_result, page_result_new) node.cleanup() @@ -409,7 +409,7 @@ def test_compression_archive_pglz(self): repr(self.output), self.cmd)) node.slow_start() - delta_result_new = node.execute("postgres", "SELECT * FROM t_heap") + delta_result_new = node.table_checksum("t_heap") self.assertEqual(delta_result, delta_result_new) node.cleanup() diff --git a/tests/delta_test.py b/tests/delta_test.py index 23583fd93..8736a079c 100644 --- a/tests/delta_test.py +++ b/tests/delta_test.py @@ -239,7 +239,7 @@ def test_delta_stream(self): "md5(i::text)::tsvector as tsvector " "from generate_series(0,100) i") - full_result = node.execute("postgres", "SELECT * FROM t_heap") + full_result = node.table_checksum("t_heap") full_backup_id = self.backup_node( backup_dir, 'node', node, backup_type='full', options=['--stream']) @@ -250,7 +250,7 @@ def test_delta_stream(self): "insert into t_heap select i as id, md5(i::text) as text, " "md5(i::text)::tsvector as tsvector " "from generate_series(100,200) i") - delta_result = node.execute("postgres", "SELECT * FROM t_heap") + delta_result = node.table_checksum("t_heap") delta_backup_id = self.backup_node( backup_dir, 'node', node, backup_type='delta', options=['--stream']) @@ -270,7 +270,7 @@ def test_delta_stream(self): '\n Unexpected Error Message: {0}\n' ' CMD: {1}'.format(repr(self.output), self.cmd)) node.slow_start() - full_result_new = node.execute("postgres", "SELECT * FROM t_heap") + full_result_new = node.table_checksum("t_heap") self.assertEqual(full_result, full_result_new) node.cleanup() @@ -286,7 +286,7 @@ def test_delta_stream(self): '\n Unexpected Error Message: {0}\n' ' CMD: {1}'.format(repr(self.output), self.cmd)) node.slow_start() - delta_result_new = node.execute("postgres", "SELECT * FROM t_heap") + delta_result_new = node.table_checksum("t_heap") self.assertEqual(delta_result, delta_result_new) node.cleanup() @@ -313,7 +313,7 @@ def test_delta_archive(self): "postgres", "create table t_heap as select i as id, md5(i::text) as text, " "md5(i::text)::tsvector as tsvector from generate_series(0,1) i") - full_result = node.execute("postgres", "SELECT * FROM t_heap") + full_result = node.table_checksum("t_heap") full_backup_id = self.backup_node( backup_dir, 'node', node, backup_type='full') @@ -322,7 +322,7 @@ def test_delta_archive(self): "postgres", "insert into t_heap select i as id, md5(i::text) as text, " "md5(i::text)::tsvector as tsvector from generate_series(0,2) i") - delta_result = node.execute("postgres", "SELECT * FROM t_heap") + delta_result = node.table_checksum("t_heap") delta_backup_id = self.backup_node( backup_dir, 'node', node, backup_type='delta') @@ -341,7 +341,7 @@ def test_delta_archive(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(self.output), self.cmd)) node.slow_start() - full_result_new = node.execute("postgres", "SELECT * FROM t_heap") + full_result_new = node.table_checksum("t_heap") self.assertEqual(full_result, full_result_new) node.cleanup() @@ -357,7 +357,7 @@ def test_delta_archive(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(self.output), self.cmd)) node.slow_start() - delta_result_new = node.execute("postgres", "SELECT * FROM t_heap") + delta_result_new = node.table_checksum("t_heap") self.assertEqual(delta_result, delta_result_new) node.cleanup() @@ -400,7 +400,7 @@ def test_delta_multiple_segments(self): node.safe_psql("postgres", "checkpoint") # GET LOGICAL CONTENT FROM NODE - result = node.safe_psql("postgres", "select count(*) from pgbench_accounts") + result = node.table_checksum("pgbench_accounts") # delta BACKUP self.backup_node( backup_dir, 'node', node, @@ -429,9 +429,7 @@ def test_delta_multiple_segments(self): self.set_auto_conf(restored_node, {'port': restored_node.port}) restored_node.slow_start() - result_new = restored_node.safe_psql( - "postgres", - "select count(*) from pgbench_accounts") + result_new = restored_node.table_checksum("pgbench_accounts") # COMPARE RESTORED FILES self.assertEqual(result, result_new, 'data is lost') @@ -540,7 +538,7 @@ def test_create_db(self): "create table t_heap as select i as id, md5(i::text) as text, " "md5(i::text)::tsvector as tsvector from generate_series(0,100) i") - node.safe_psql("postgres", "SELECT * FROM t_heap") + node.table_checksum("t_heap") self.backup_node( backup_dir, 'node', node, options=["--stream"]) @@ -663,7 +661,7 @@ def test_exists_in_previous_backup(self): "create table t_heap as select i as id, md5(i::text) as text, " "md5(i::text)::tsvector as tsvector from generate_series(0,100) i") - node.safe_psql("postgres", "SELECT * FROM t_heap") + node.table_checksum("t_heap") filepath = node.safe_psql( "postgres", "SELECT pg_relation_filepath('t_heap')").decode('utf-8').rstrip() @@ -774,8 +772,7 @@ def test_alter_table_set_tablespace_delta(self): "alter table t_heap set tablespace somedata_new") # DELTA BACKUP - result = node.safe_psql( - "postgres", "select * from t_heap") + result = node.table_checksum("t_heap") self.backup_node( backup_dir, 'node', node, backup_type='delta', @@ -813,8 +810,7 @@ def test_alter_table_set_tablespace_delta(self): self.set_auto_conf(node_restored, {'port': node_restored.port}) node_restored.slow_start() - result_new = node_restored.safe_psql( - "postgres", "select * from t_heap") + result_new = node_restored.table_checksum("t_heap") self.assertEqual(result, result_new, 'lost some data after restore') diff --git a/tests/merge_test.py b/tests/merge_test.py index ffa73263c..c789298fd 100644 --- a/tests/merge_test.py +++ b/tests/merge_test.py @@ -659,13 +659,8 @@ def test_merge_page_truncate(self): node_restored.slow_start() # Logical comparison - result1 = node.safe_psql( - "postgres", - "select * from t_heap") - - result2 = node_restored.safe_psql( - "postgres", - "select * from t_heap") + result1 = node.table_checksum("t_heap") + result2 = node_restored.table_checksum("t_heap") self.assertEqual(result1, result2) @@ -744,13 +739,8 @@ def test_merge_delta_truncate(self): node_restored.slow_start() # Logical comparison - result1 = node.safe_psql( - "postgres", - "select * from t_heap") - - result2 = node_restored.safe_psql( - "postgres", - "select * from t_heap") + result1 = node.table_checksum("t_heap") + result2 = node_restored.table_checksum("t_heap") self.assertEqual(result1, result2) @@ -836,13 +826,8 @@ def test_merge_ptrack_truncate(self): node_restored.slow_start() # Logical comparison - result1 = node.safe_psql( - "postgres", - "select * from t_heap") - - result2 = node_restored.safe_psql( - "postgres", - "select * from t_heap") + result1 = node.table_checksum("t_heap") + result2 = node_restored.table_checksum("t_heap") self.assertEqual(result1, result2) @@ -1931,9 +1916,7 @@ def test_merge_backup_from_future(self): backup_id = self.backup_node(backup_dir, 'node', node, backup_type='page') pgdata = self.pgdata_content(node.data_dir) - result = node.safe_psql( - 'postgres', - 'SELECT * from pgbench_accounts') + result = node.table_checksum("pgbench_accounts") node_restored = self.make_simple_node( base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) @@ -1959,11 +1942,9 @@ def test_merge_backup_from_future(self): {'port': node_restored.port}) node_restored.slow_start() - result_new = node_restored.safe_psql( - 'postgres', - 'SELECT * from pgbench_accounts') + result_new = node_restored.table_checksum("pgbench_accounts") - self.assertTrue(result, result_new) + self.assertEqual(result, result_new) self.compare_pgdata(pgdata, pgdata_restored) @@ -2458,8 +2439,7 @@ def test_multi_timeline_merge(self): self.merge_backup(backup_dir, 'node', page_id) - result = node.safe_psql( - "postgres", "select * from pgbench_accounts") + result = node.table_checksum("pgbench_accounts") node_restored = self.make_simple_node( base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) @@ -2471,8 +2451,7 @@ def test_multi_timeline_merge(self): self.set_auto_conf(node_restored, {'port': node_restored.port}) node_restored.slow_start() - result_new = node_restored.safe_psql( - "postgres", "select * from pgbench_accounts") + result_new = node_restored.table_checksum("pgbench_accounts") self.assertEqual(result, result_new) diff --git a/tests/page_test.py b/tests/page_test.py index be6116bbe..786374bdb 100644 --- a/tests/page_test.py +++ b/tests/page_test.py @@ -84,13 +84,8 @@ def test_basic_page_vacuum_truncate(self): node_restored.slow_start() # Logical comparison - result1 = node.safe_psql( - "postgres", - "select * from t_heap") - - result2 = node_restored.safe_psql( - "postgres", - "select * from t_heap") + result1 = node.table_checksum("t_heap") + result2 = node_restored.table_checksum("t_heap") self.assertEqual(result1, result2) @@ -191,7 +186,7 @@ def test_page_stream(self): "md5(i::text)::tsvector as tsvector " "from generate_series(0,100) i") - full_result = node.execute("postgres", "SELECT * FROM t_heap") + full_result = node.table_checksum("t_heap") full_backup_id = self.backup_node( backup_dir, 'node', node, backup_type='full', options=['--stream']) @@ -202,7 +197,7 @@ def test_page_stream(self): "insert into t_heap select i as id, md5(i::text) as text, " "md5(i::text)::tsvector as tsvector " "from generate_series(100,200) i") - page_result = node.execute("postgres", "SELECT * FROM t_heap") + page_result = node.table_checksum("t_heap") page_backup_id = self.backup_node( backup_dir, 'node', node, backup_type='page', options=['--stream', '-j', '4']) @@ -223,7 +218,7 @@ def test_page_stream(self): ' CMD: {1}'.format(repr(self.output), self.cmd)) node.slow_start() - full_result_new = node.execute("postgres", "SELECT * FROM t_heap") + full_result_new = node.table_checksum("t_heap") self.assertEqual(full_result, full_result_new) node.cleanup() @@ -242,7 +237,7 @@ def test_page_stream(self): self.compare_pgdata(pgdata, pgdata_restored) node.slow_start() - page_result_new = node.execute("postgres", "SELECT * FROM t_heap") + page_result_new = node.table_checksum("t_heap") self.assertEqual(page_result, page_result_new) node.cleanup() @@ -272,7 +267,7 @@ def test_page_archive(self): "postgres", "create table t_heap as select i as id, md5(i::text) as text, " "md5(i::text)::tsvector as tsvector from generate_series(0,100) i") - full_result = node.execute("postgres", "SELECT * FROM t_heap") + full_result = node.table_checksum("t_heap") full_backup_id = self.backup_node( backup_dir, 'node', node, backup_type='full') @@ -282,7 +277,7 @@ def test_page_archive(self): "insert into t_heap select i as id, " "md5(i::text) as text, md5(i::text)::tsvector as tsvector " "from generate_series(100, 200) i") - page_result = node.execute("postgres", "SELECT * FROM t_heap") + page_result = node.table_checksum("t_heap") page_backup_id = self.backup_node( backup_dir, 'node', node, backup_type='page', options=["-j", "4"]) @@ -308,7 +303,7 @@ def test_page_archive(self): node.slow_start() - full_result_new = node.execute("postgres", "SELECT * FROM t_heap") + full_result_new = node.table_checksum("t_heap") self.assertEqual(full_result, full_result_new) node.cleanup() @@ -332,7 +327,7 @@ def test_page_archive(self): node.slow_start() - page_result_new = node.execute("postgres", "SELECT * FROM t_heap") + page_result_new = node.table_checksum("t_heap") self.assertEqual(page_result, page_result_new) node.cleanup() @@ -370,7 +365,7 @@ def test_page_multiple_segments(self): pgbench.wait() # GET LOGICAL CONTENT FROM NODE - result = node.safe_psql("postgres", "select count(*) from pgbench_accounts") + result = node.table_checksum("pgbench_accounts") # PAGE BACKUP self.backup_node(backup_dir, 'node', node, backup_type='page') @@ -398,8 +393,7 @@ def test_page_multiple_segments(self): self.set_auto_conf(restored_node, {'port': restored_node.port}) restored_node.slow_start() - result_new = restored_node.safe_psql( - "postgres", "select count(*) from pgbench_accounts") + result_new = restored_node.table_checksum("pgbench_accounts") # COMPARE RESTORED FILES self.assertEqual(result, result_new, 'data is lost') diff --git a/tests/ptrack_test.py b/tests/ptrack_test.py index b8a4065b0..7b5bc416b 100644 --- a/tests/ptrack_test.py +++ b/tests/ptrack_test.py @@ -440,7 +440,7 @@ def test_ptrack_simple(self): if self.paranoia: pgdata = self.pgdata_content(node.data_dir) - result = node.safe_psql("postgres", "SELECT * FROM t_heap") + result = node.table_checksum("t_heap") node_restored = self.make_simple_node( base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) @@ -463,7 +463,7 @@ def test_ptrack_simple(self): # Logical comparison self.assertEqual( result, - node_restored.safe_psql("postgres", "SELECT * FROM t_heap")) + node_restored.table_checksum("t_heap")) # @unittest.skip("skip") def test_ptrack_unprivileged(self): @@ -1030,7 +1030,7 @@ def test_ptrack_get_block(self): if self.paranoia: pgdata = self.pgdata_content(node.data_dir) - result = node.safe_psql("postgres", "SELECT * FROM t_heap") + result = node.table_checksum("t_heap") node.cleanup() self.restore_node(backup_dir, 'node', node, options=["-j", "4"]) @@ -1044,7 +1044,7 @@ def test_ptrack_get_block(self): # Logical comparison self.assertEqual( result, - node.safe_psql("postgres", "SELECT * FROM t_heap")) + node.table_checksum("t_heap")) # @unittest.skip("skip") def test_ptrack_stream(self): @@ -1075,7 +1075,7 @@ def test_ptrack_stream(self): " as t_seq, md5(i::text) as text, md5(i::text)::tsvector" " as tsvector from generate_series(0,100) i") - full_result = node.safe_psql("postgres", "SELECT * FROM t_heap") + full_result = node.table_checksum("t_heap") full_backup_id = self.backup_node( backup_dir, 'node', node, options=['--stream']) @@ -1086,7 +1086,7 @@ def test_ptrack_stream(self): " md5(i::text) as text, md5(i::text)::tsvector as tsvector" " from generate_series(100,200) i") - ptrack_result = node.safe_psql("postgres", "SELECT * FROM t_heap") + ptrack_result = node.table_checksum("t_heap") ptrack_backup_id = self.backup_node( backup_dir, 'node', node, backup_type='ptrack', options=['--stream']) @@ -1108,7 +1108,7 @@ def test_ptrack_stream(self): repr(self.output), self.cmd) ) node.slow_start() - full_result_new = node.safe_psql("postgres", "SELECT * FROM t_heap") + full_result_new = node.table_checksum("t_heap") self.assertEqual(full_result, full_result_new) node.cleanup() @@ -1128,7 +1128,7 @@ def test_ptrack_stream(self): self.compare_pgdata(pgdata, pgdata_restored) node.slow_start() - ptrack_result_new = node.safe_psql("postgres", "SELECT * FROM t_heap") + ptrack_result_new = node.table_checksum("t_heap") self.assertEqual(ptrack_result, ptrack_result_new) # @unittest.skip("skip") @@ -1162,7 +1162,7 @@ def test_ptrack_archive(self): " md5(i::text)::tsvector as tsvector" " from generate_series(0,100) i") - full_result = node.safe_psql("postgres", "SELECT * FROM t_heap") + full_result = node.table_checksum("t_heap") full_backup_id = self.backup_node(backup_dir, 'node', node) full_target_time = self.show_pb( backup_dir, 'node', full_backup_id)['recovery-time'] @@ -1175,7 +1175,7 @@ def test_ptrack_archive(self): " md5(i::text)::tsvector as tsvector" " from generate_series(100,200) i") - ptrack_result = node.safe_psql("postgres", "SELECT * FROM t_heap") + ptrack_result = node.table_checksum("t_heap") ptrack_backup_id = self.backup_node( backup_dir, 'node', node, backup_type='ptrack') ptrack_target_time = self.show_pb( @@ -1208,7 +1208,7 @@ def test_ptrack_archive(self): ) node.slow_start() - full_result_new = node.safe_psql("postgres", "SELECT * FROM t_heap") + full_result_new = node.table_checksum("t_heap") self.assertEqual(full_result, full_result_new) node.cleanup() @@ -1233,7 +1233,7 @@ def test_ptrack_archive(self): self.compare_pgdata(pgdata, pgdata_restored) node.slow_start() - ptrack_result_new = node.safe_psql("postgres", "SELECT * FROM t_heap") + ptrack_result_new = node.table_checksum("t_heap") self.assertEqual(ptrack_result, ptrack_result_new) node.cleanup() @@ -1263,9 +1263,6 @@ def test_ptrack_pgpro417(self): "postgres", "create table t_heap as select i as id, md5(i::text) as text, " "md5(i::text)::tsvector as tsvector from generate_series(0,100) i") - node.safe_psql( - "postgres", - "SELECT * FROM t_heap") backup_id = self.backup_node( backup_dir, 'node', node, @@ -1280,7 +1277,7 @@ def test_ptrack_pgpro417(self): "insert into t_heap select i as id, md5(i::text) as text, " "md5(i::text)::tsvector as tsvector " "from generate_series(100,200) i") - node.safe_psql("postgres", "SELECT * FROM t_heap") + node.table_checksum("t_heap") backup_id = self.backup_node( backup_dir, 'node', node, backup_type='ptrack', options=["--stream"]) @@ -1339,7 +1336,7 @@ def test_page_pgpro417(self): "postgres", "create table t_heap as select i as id, md5(i::text) as text, " "md5(i::text)::tsvector as tsvector from generate_series(0,100) i") - node.safe_psql("postgres", "SELECT * FROM t_heap") + node.table_checksum("t_heap") # PAGE BACKUP node.safe_psql( @@ -1347,7 +1344,7 @@ def test_page_pgpro417(self): "insert into t_heap select i as id, md5(i::text) as text, " "md5(i::text)::tsvector as tsvector " "from generate_series(100,200) i") - node.safe_psql("postgres", "SELECT * FROM t_heap") + node.table_checksum("t_heap") backup_id = self.backup_node( backup_dir, 'node', node, backup_type='page') @@ -1403,7 +1400,7 @@ def test_full_pgpro417(self): " md5(i::text)::tsvector as tsvector " " from generate_series(0,100) i" ) - node.safe_psql("postgres", "SELECT * FROM t_heap") + node.table_checksum("t_heap") self.backup_node(backup_dir, 'node', node, options=["--stream"]) # SECOND FULL BACKUP @@ -1413,7 +1410,7 @@ def test_full_pgpro417(self): " md5(i::text)::tsvector as tsvector" " from generate_series(100,200) i" ) - node.safe_psql("postgres", "SELECT * FROM t_heap") + node.table_checksum("t_heap") backup_id = self.backup_node( backup_dir, 'node', node, options=["--stream"]) @@ -1474,7 +1471,7 @@ def test_create_db(self): "create table t_heap as select i as id, md5(i::text) as text, " "md5(i::text)::tsvector as tsvector from generate_series(0,100) i") - node.safe_psql("postgres", "SELECT * FROM t_heap") + node.table_checksum("t_heap") self.backup_node( backup_dir, 'node', node, options=["--stream"]) @@ -1694,8 +1691,7 @@ def test_alter_table_set_tablespace_ptrack(self): # sys.exit(1) # PTRACK BACKUP - #result = node.safe_psql( - # "postgres", "select * from t_heap") + #result = node.table_checksum("t_heap") self.backup_node( backup_dir, 'node', node, backup_type='ptrack', @@ -1737,8 +1733,7 @@ def test_alter_table_set_tablespace_ptrack(self): node_restored, {'port': node_restored.port}) node_restored.slow_start() -# result_new = node_restored.safe_psql( -# "postgres", "select * from t_heap") +# result_new = node_restored.table_checksum("t_heap") # # self.assertEqual(result, result_new, 'lost some data after restore') @@ -1838,7 +1833,7 @@ def test_drop_tablespace(self): "create table t_heap as select i as id, md5(i::text) as text, " "md5(i::text)::tsvector as tsvector from generate_series(0,100) i") - result = node.safe_psql("postgres", "select * from t_heap") + result = node.table_checksum("t_heap") # FULL BACKUP self.backup_node(backup_dir, 'node', node, options=["--stream"]) @@ -1892,7 +1887,7 @@ def test_drop_tablespace(self): "Expecting Error because " "tablespace 'somedata' should not be present") - result_new = node.safe_psql("postgres", "select * from t_heap") + result_new = node.table_checksum("t_heap") self.assertEqual(result, result_new) if self.paranoia: @@ -1930,7 +1925,7 @@ def test_ptrack_alter_tablespace(self): "create table t_heap as select i as id, md5(i::text) as text, " "md5(i::text)::tsvector as tsvector from generate_series(0,100) i") - result = node.safe_psql("postgres", "select * from t_heap") + result = node.table_checksum("t_heap") # FULL BACKUP self.backup_node(backup_dir, 'node', node, options=["--stream"]) @@ -1939,7 +1934,7 @@ def test_ptrack_alter_tablespace(self): "postgres", "alter table t_heap set tablespace somedata") # GET LOGICAL CONTENT FROM NODE - result = node.safe_psql("postgres", "select * from t_heap") + result = node.table_checksum("t_heap") # FIRTS PTRACK BACKUP self.backup_node( @@ -1971,8 +1966,7 @@ def test_ptrack_alter_tablespace(self): restored_node.slow_start() # COMPARE LOGICAL CONTENT - result_new = restored_node.safe_psql( - "postgres", "select * from t_heap") + result_new = restored_node.table_checksum("t_heap") self.assertEqual(result, result_new) restored_node.cleanup() @@ -2006,8 +2000,7 @@ def test_ptrack_alter_tablespace(self): restored_node, {'port': restored_node.port}) restored_node.slow_start() - result_new = restored_node.safe_psql( - "postgres", "select * from t_heap") + result_new = restored_node.table_checksum("t_heap") self.assertEqual(result, result_new) # @unittest.skip("skip") diff --git a/tests/replica_test.py b/tests/replica_test.py index 577dcd3a5..17fc5a823 100644 --- a/tests/replica_test.py +++ b/tests/replica_test.py @@ -125,7 +125,7 @@ def test_replica_stream_ptrack_backup(self): "create table t_heap as select i as id, md5(i::text) as text, " "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(0,256) i") - before = master.safe_psql("postgres", "SELECT * FROM t_heap") + before = master.table_checksum("t_heap") # take full backup and restore it self.backup_node(backup_dir, 'master', master, options=['--stream']) @@ -137,7 +137,7 @@ def test_replica_stream_ptrack_backup(self): # Check data correctness on replica replica.slow_start(replica=True) - after = replica.safe_psql("postgres", "SELECT * FROM t_heap") + after = replica.table_checksum("t_heap") self.assertEqual(before, after) # Change data on master, take FULL backup from replica, @@ -148,7 +148,7 @@ def test_replica_stream_ptrack_backup(self): "insert into t_heap select i as id, md5(i::text) as text, " "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(256,512) i") - before = master.safe_psql("postgres", "SELECT * FROM t_heap") + before = master.table_checksum("t_heap") self.add_instance(backup_dir, 'replica', replica) backup_id = self.backup_node( @@ -173,7 +173,7 @@ def test_replica_stream_ptrack_backup(self): node.slow_start() # CHECK DATA CORRECTNESS - after = node.safe_psql("postgres", "SELECT * FROM t_heap") + after = node.table_checksum("t_heap") self.assertEqual(before, after) # Change data on master, take PTRACK backup from replica, @@ -185,7 +185,7 @@ def test_replica_stream_ptrack_backup(self): "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(512,768) i") - before = master.safe_psql("postgres", "SELECT * FROM t_heap") + before = master.table_checksum("t_heap") backup_id = self.backup_node( backup_dir, 'replica', replica, backup_type='ptrack', @@ -208,7 +208,7 @@ def test_replica_stream_ptrack_backup(self): node.slow_start() # CHECK DATA CORRECTNESS - after = node.safe_psql("postgres", "SELECT * FROM t_heap") + after = node.table_checksum("t_heap") self.assertEqual(before, after) # @unittest.skip("skip") @@ -248,7 +248,7 @@ def test_replica_archive_page_backup(self): "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(0,2560) i") - before = master.safe_psql("postgres", "SELECT * FROM t_heap") + before = master.table_checksum("t_heap") backup_id = self.backup_node( backup_dir, 'master', master, backup_type='page') @@ -262,7 +262,7 @@ def test_replica_archive_page_backup(self): replica.slow_start(replica=True) # Check data correctness on replica - after = replica.safe_psql("postgres", "SELECT * FROM t_heap") + after = replica.table_checksum("t_heap") self.assertEqual(before, after) # Change data on master, take FULL backup from replica, @@ -274,7 +274,7 @@ def test_replica_archive_page_backup(self): "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(256,25120) i") - before = master.safe_psql("postgres", "SELECT * FROM t_heap") + before = master.table_checksum("t_heap") self.wait_until_replica_catch_with_master(master, replica) @@ -301,7 +301,7 @@ def test_replica_archive_page_backup(self): node.slow_start() # CHECK DATA CORRECTNESS - after = node.safe_psql("postgres", "SELECT * FROM t_heap") + after = node.table_checksum("t_heap") self.assertEqual(before, after) node.cleanup() @@ -385,7 +385,7 @@ def test_basic_make_replica_via_restore(self): "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(0,8192) i") - before = master.safe_psql("postgres", "SELECT * FROM t_heap") + before = master.table_checksum("t_heap") backup_id = self.backup_node( backup_dir, 'master', master, backup_type='page') diff --git a/tests/restore_test.py b/tests/restore_test.py index 2de3ecc0f..da3ebffb4 100644 --- a/tests/restore_test.py +++ b/tests/restore_test.py @@ -34,7 +34,7 @@ def test_restore_full_to_latest(self): stdout=subprocess.PIPE, stderr=subprocess.STDOUT) pgbench.wait() pgbench.stdout.close() - before = node.execute("postgres", "SELECT * FROM pgbench_branches") + before = node.table_checksum("pgbench_branches") backup_id = self.backup_node(backup_dir, 'node', node) node.stop() @@ -60,7 +60,7 @@ def test_restore_full_to_latest(self): node.slow_start() - after = node.execute("postgres", "SELECT * FROM pgbench_branches") + after = node.table_checksum("pgbench_branches") self.assertEqual(before, after) # @unittest.skip("skip") @@ -88,7 +88,7 @@ def test_restore_full_page_to_latest(self): backup_id = self.backup_node( backup_dir, 'node', node, backup_type="page") - before = node.execute("postgres", "SELECT * FROM pgbench_branches") + before = node.table_checksum("pgbench_branches") node.stop() node.cleanup() @@ -102,7 +102,7 @@ def test_restore_full_page_to_latest(self): node.slow_start() - after = node.execute("postgres", "SELECT * FROM pgbench_branches") + after = node.table_checksum("pgbench_branches") self.assertEqual(before, after) # @unittest.skip("skip") @@ -120,7 +120,7 @@ def test_restore_to_specific_timeline(self): node.pgbench_init(scale=2) - before = node.execute("postgres", "SELECT * FROM pgbench_branches") + before = node.table_checksum("pgbench_branches") backup_id = self.backup_node(backup_dir, 'node', node) @@ -164,7 +164,7 @@ def test_restore_to_specific_timeline(self): self.assertEqual(int(recovery_target_timeline), target_tli) node.slow_start() - after = node.execute("postgres", "SELECT * FROM pgbench_branches") + after = node.table_checksum("pgbench_branches") self.assertEqual(before, after) # @unittest.skip("skip") @@ -182,7 +182,7 @@ def test_restore_to_time(self): node.slow_start() node.pgbench_init(scale=2) - before = node.execute("postgres", "SELECT * FROM pgbench_branches") + before = node.table_checksum("pgbench_branches") backup_id = self.backup_node(backup_dir, 'node', node) @@ -210,7 +210,7 @@ def test_restore_to_time(self): repr(self.output), self.cmd)) node.slow_start() - after = node.execute("postgres", "SELECT * FROM pgbench_branches") + after = node.table_checksum("pgbench_branches") self.assertEqual(before, after) # @unittest.skip("skip") @@ -238,7 +238,7 @@ def test_restore_to_xid_inclusive(self): pgbench.wait() pgbench.stdout.close() - before = node.safe_psql("postgres", "SELECT * FROM pgbench_branches") + before = node.table_checksum("pgbench_branches") with node.connect("postgres") as con: res = con.execute("INSERT INTO tbl0005 VALUES ('inserted') RETURNING (xmin)") con.commit() @@ -264,7 +264,7 @@ def test_restore_to_xid_inclusive(self): repr(self.output), self.cmd)) node.slow_start() - after = node.safe_psql("postgres", "SELECT * FROM pgbench_branches") + after = node.table_checksum("pgbench_branches") self.assertEqual(before, after) self.assertEqual( len(node.execute("postgres", "SELECT * FROM tbl0005")), 1) @@ -294,7 +294,7 @@ def test_restore_to_xid_not_inclusive(self): pgbench.wait() pgbench.stdout.close() - before = node.execute("postgres", "SELECT * FROM pgbench_branches") + before = node.table_checksum("pgbench_branches") with node.connect("postgres") as con: result = con.execute("INSERT INTO tbl0005 VALUES ('inserted') RETURNING (xmin)") con.commit() @@ -321,7 +321,7 @@ def test_restore_to_xid_not_inclusive(self): repr(self.output), self.cmd)) node.slow_start() - after = node.execute("postgres", "SELECT * FROM pgbench_branches") + after = node.table_checksum("pgbench_branches") self.assertEqual(before, after) self.assertEqual( len(node.execute("postgres", "SELECT * FROM tbl0005")), 0) @@ -354,7 +354,7 @@ def test_restore_to_lsn_inclusive(self): pgbench.wait() pgbench.stdout.close() - before = node.safe_psql("postgres", "SELECT * FROM pgbench_branches") + before = node.table_checksum("pgbench_branches") with node.connect("postgres") as con: con.execute("INSERT INTO tbl0005 VALUES (1)") con.commit() @@ -387,7 +387,7 @@ def test_restore_to_lsn_inclusive(self): node.slow_start() - after = node.safe_psql("postgres", "SELECT * FROM pgbench_branches") + after = node.table_checksum("pgbench_branches") self.assertEqual(before, after) self.assertEqual( len(node.execute("postgres", "SELECT * FROM tbl0005")), 2) @@ -420,7 +420,7 @@ def test_restore_to_lsn_not_inclusive(self): pgbench.wait() pgbench.stdout.close() - before = node.safe_psql("postgres", "SELECT * FROM pgbench_branches") + before = node.table_checksum("pgbench_branches") with node.connect("postgres") as con: con.execute("INSERT INTO tbl0005 VALUES (1)") con.commit() @@ -454,7 +454,7 @@ def test_restore_to_lsn_not_inclusive(self): node.slow_start() - after = node.safe_psql("postgres", "SELECT * FROM pgbench_branches") + after = node.table_checksum("pgbench_branches") self.assertEqual(before, after) self.assertEqual( len(node.execute("postgres", "SELECT * FROM tbl0005")), 1) @@ -492,7 +492,7 @@ def test_restore_full_ptrack_archive(self): backup_id = self.backup_node( backup_dir, 'node', node, backup_type="ptrack") - before = node.execute("postgres", "SELECT * FROM pgbench_branches") + before = node.table_checksum("pgbench_branches") node.stop() node.cleanup() @@ -506,7 +506,7 @@ def test_restore_full_ptrack_archive(self): repr(self.output), self.cmd)) node.slow_start() - after = node.execute("postgres", "SELECT * FROM pgbench_branches") + after = node.table_checksum("pgbench_branches") self.assertEqual(before, after) # @unittest.skip("skip") @@ -549,7 +549,7 @@ def test_restore_ptrack(self): backup_id = self.backup_node( backup_dir, 'node', node, backup_type="ptrack") - before = node.execute("postgres", "SELECT * FROM pgbench_branches") + before = node.table_checksum("pgbench_branches") node.stop() node.cleanup() @@ -563,7 +563,7 @@ def test_restore_ptrack(self): repr(self.output), self.cmd)) node.slow_start() - after = node.execute("postgres", "SELECT * FROM pgbench_branches") + after = node.table_checksum("pgbench_branches") self.assertEqual(before, after) # @unittest.skip("skip") @@ -601,7 +601,7 @@ def test_restore_full_ptrack_stream(self): backup_dir, 'node', node, backup_type="ptrack", options=["--stream"]) - before = node.execute("postgres", "SELECT * FROM pgbench_branches") + before = node.table_checksum("pgbench_branches") node.stop() node.cleanup() @@ -614,7 +614,7 @@ def test_restore_full_ptrack_stream(self): repr(self.output), self.cmd)) node.slow_start() - after = node.execute("postgres", "SELECT * FROM pgbench_branches") + after = node.table_checksum("pgbench_branches") self.assertEqual(before, after) # @unittest.skip("skip") @@ -1289,9 +1289,7 @@ def test_archive_restore_to_restore_point(self): node.safe_psql( "postgres", "create table t_heap as select generate_series(0,10000)") - result = node.safe_psql( - "postgres", - "select * from t_heap") + result = node.table_checksum("t_heap") node.safe_psql( "postgres", "select pg_create_restore_point('savepoint')") node.safe_psql( @@ -1307,7 +1305,7 @@ def test_archive_restore_to_restore_point(self): node.slow_start() - result_new = node.safe_psql("postgres", "select * from t_heap") + result_new = node.table_checksum("t_heap") res = node.psql("postgres", "select * from t_heap_1") self.assertEqual( res[0], 1, From 29a9efb4d499dbf8ae93f842cf730f2c1c6f0ed4 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 6 Dec 2022 14:17:04 +0300 Subject: [PATCH 424/525] [PBCKP-325] refix test_issue_231 to make two backups in one second we have to fail them. Therefore we have to fetch backup_id from log in exception's message. Retry for 20 seconds to have a chance to start in one second. If we couldn't, lets skip the test. --- tests/backup_test.py | 28 ++++++++++++++++++++++------ 1 file changed, 22 insertions(+), 6 deletions(-) diff --git a/tests/backup_test.py b/tests/backup_test.py index 31f0b427a..8810108a5 100644 --- a/tests/backup_test.py +++ b/tests/backup_test.py @@ -1,5 +1,6 @@ import unittest import os +import re from time import sleep, time from .helpers.ptrack_helpers import base36enc, ProbackupTest, ProbackupException import shutil @@ -2780,18 +2781,33 @@ def test_issue_231(self): """ backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + base_dir=os.path.join(self.module_name, self.fname, 'node')) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) - node.slow_start() datadir = os.path.join(node.data_dir, '123') - pb1 = self.backup_node(backup_dir, 'node', node, data_dir='{0}'.format(datadir)) - pb2 = self.backup_node(backup_dir, 'node', node, options=['--stream']) + t0 = time() + while True: + with self.assertRaises(ProbackupException) as ctx: + self.backup_node(backup_dir, 'node', node) + pb1 = re.search(r' backup ID: ([^\s,]+),', ctx.exception.message).groups()[0] + + t = time() + if int(pb1, 36) == int(t) and t % 1 < 0.5: + # ok, we have a chance to start next backup in same second + break + elif t - t0 > 20: + # Oops, we are waiting for too long. Looks like this runner + # is too slow. Lets skip the test. + self.skipTest("runner is too slow") + # sleep to the second's end so backup will not sleep for a second. + sleep(1 - t % 1) + + with self.assertRaises(ProbackupException) as ctx: + self.backup_node(backup_dir, 'node', node) + pb2 = re.search(r' backup ID: ([^\s,]+),', ctx.exception.message).groups()[0] self.assertNotEqual(pb1, pb2) From 3bc0fc4b8169d53ee93913f21538926fc4463d36 Mon Sep 17 00:00:00 2001 From: Daniel Shelepanov Date: Wed, 7 Dec 2022 13:35:25 +0300 Subject: [PATCH 425/525] Documentation hot fix --- doc/pgprobackup.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/pgprobackup.xml b/doc/pgprobackup.xml index 7c8610681..2cb10e379 100644 --- a/doc/pgprobackup.xml +++ b/doc/pgprobackup.xml @@ -653,7 +653,7 @@ GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_checkpoint() TO backup; COMMIT; - For PostgreSQL 10: + For PostgreSQL versions 10 — 14: BEGIN; From 15c304ad6cec482ac2ed7f9680f3379d49504086 Mon Sep 17 00:00:00 2001 From: Victor Spirin Date: Sun, 4 Dec 2022 18:15:22 +0300 Subject: [PATCH 426/525] [PBCKP-375] Prepared for moving RelFileNode to RelFileLocator in the PG16. --- src/datapagemap.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/datapagemap.h b/src/datapagemap.h index 6af54713b..6ad7a6204 100644 --- a/src/datapagemap.h +++ b/src/datapagemap.h @@ -9,7 +9,12 @@ #ifndef DATAPAGEMAP_H #define DATAPAGEMAP_H +#if PG_VERSION_NUM < 160000 #include "storage/relfilenode.h" +#else +#include "storage/relfilelocator.h" +#define RelFileNode RelFileLocator +#endif #include "storage/block.h" From 25e63c5a7c0f290290216bd9783632ed06ea7435 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 8 Dec 2022 07:44:12 +0300 Subject: [PATCH 427/525] raw strings in python regex; ignore generated transation files Author: Sergey Fukanchik --- .gitignore | 3 +++ tests/helpers/ptrack_helpers.py | 4 ++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/.gitignore b/.gitignore index 502473605..97d323ceb 100644 --- a/.gitignore +++ b/.gitignore @@ -21,6 +21,9 @@ # Binaries /pg_probackup +# Generated translated file +/po/ru.mo + # Generated by test suite /regression.diffs /regression.out diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index 706506432..067225d66 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -1685,7 +1685,7 @@ def version_to_num(self, version): parts.append('0') num = 0 for part in parts: - num = num * 100 + int(re.sub("[^\d]", "", part)) + num = num * 100 + int(re.sub(r"[^\d]", "", part)) return num def switch_wal_segment(self, node): @@ -2038,7 +2038,7 @@ def __init__(self, cmd, env, attach=False): # Get version gdb_version_number = re.search( - b"^GNU gdb [^\d]*(\d+)\.(\d)", + br"^GNU gdb [^\d]*(\d+)\.(\d)", gdb_version) self.major_version = int(gdb_version_number.group(1)) self.minor_version = int(gdb_version_number.group(2)) From 822fbbfe503f52a3b898a9e8deb54f4b28b9f56c Mon Sep 17 00:00:00 2001 From: Daniel Shelepanov Date: Thu, 3 Nov 2022 10:50:23 +0300 Subject: [PATCH 428/525] [PBCKP-326] regex fixed in test_missing_replication_permission_1 Everything between WARNING and FATAL sections is now handled with the [\s\S]*? regex: * [\s\S] is a group that handles any whitespace and non-whitespace character including new lines which are important in this case. * "*" quantifier means zero or more characters. There may as well be nothing between these two sections. * "?" quantifies in this case means greedy search so that we don't match more than we need. --- tests/backup_test.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/backup_test.py b/tests/backup_test.py index 8810108a5..fc1135cab 100644 --- a/tests/backup_test.py +++ b/tests/backup_test.py @@ -3226,10 +3226,11 @@ def test_missing_replication_permission_1(self): # Messages for >=14 # 'WARNING: could not connect to database backupdb: connection to server on socket "/tmp/.s.PGSQL.30983" failed: FATAL: must be superuser or replication role to start walsender' # 'WARNING: could not connect to database backupdb: connection to server at "localhost" (127.0.0.1), port 29732 failed: FATAL: must be superuser or replication role to start walsender' + # OS-dependant messages: + # 'WARNING: could not connect to database backupdb: connection to server at "localhost" (::1), port 12101 failed: Connection refused\n\tIs the server running on that host and accepting TCP/IP connections?\nconnection to server at "localhost" (127.0.0.1), port 12101 failed: FATAL: must be superuser or replication role to start walsender' self.assertRegex( output, - r'WARNING: could not connect to database backupdb: (connection to server (on socket "/tmp/.s.PGSQL.\d+"|at "localhost" \(127.0.0.1\), port \d+) failed: ){0,1}' - 'FATAL: must be superuser or replication role to start walsender') + r'WARNING: could not connect to database backupdb:[\s\S]*?FATAL: must be superuser or replication role to start walsender') # @unittest.skip("skip") def test_basic_backup_default_transaction_read_only(self): From 076e3fdae97d75a3a5c1e859202762864b087971 Mon Sep 17 00:00:00 2001 From: Sofia Kopikova Date: Mon, 12 Dec 2022 15:46:17 +0300 Subject: [PATCH 429/525] [PBCKP-394] skip creating partitioned index on < 11 versions on test_checkdb_amcheck_only_sanity --- tests/checkdb_test.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/tests/checkdb_test.py b/tests/checkdb_test.py index 2caf4fcb2..1e6daefdb 100644 --- a/tests/checkdb_test.py +++ b/tests/checkdb_test.py @@ -40,9 +40,11 @@ def test_checkdb_amcheck_only_sanity(self): "create table idxpart (a int) " "partition by range (a)") - node.safe_psql( - "postgres", - "create index on idxpart(a)") + # there aren't partitioned indexes on 10 and lesser versions + if self.get_version(node) >= 110000: + node.safe_psql( + "postgres", + "create index on idxpart(a)") try: node.safe_psql( From b74ec9046d635dbe9ce6a133494bfa4995f56fa1 Mon Sep 17 00:00:00 2001 From: Alexey Savchkov Date: Mon, 5 Dec 2022 22:31:47 +0700 Subject: [PATCH 430/525] Update Readme --- README.md | 48 ++++++++++++++++++++++++------------------------ 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/README.md b/README.md index c5b01ced2..5b9e094a2 100644 --- a/README.md +++ b/README.md @@ -74,62 +74,62 @@ Installers are available in release **assets**. [Latests](https://github.com/pos #DEB Ubuntu|Debian Packages sudo sh -c 'echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup/deb/ $(lsb_release -cs) main-$(lsb_release -cs)" > /etc/apt/sources.list.d/pg_probackup.list' sudo wget -O - https://repo.postgrespro.ru/pg_probackup/keys/GPG-KEY-PG_PROBACKUP | sudo apt-key add - && sudo apt-get update -sudo apt-get install pg-probackup-{14,13,12,11,10,9.6} -sudo apt-get install pg-probackup-{14,13,12,11,10,9.6}-dbg +sudo apt-get install pg-probackup-{15,14,13,12,11,10} +sudo apt-get install pg-probackup-{15,14,13,12,11,10}-dbg #DEB-SRC Packages sudo sh -c 'echo "deb-src [arch=amd64] https://repo.postgrespro.ru/pg_probackup/deb/ $(lsb_release -cs) main-$(lsb_release -cs)" >>\ /etc/apt/sources.list.d/pg_probackup.list' && sudo apt-get update -sudo apt-get source pg-probackup-{14,13,12,11,10,9.6} +sudo apt-get source pg-probackup-{15,14,13,12,11,10} #DEB Astra Linix Orel sudo sh -c 'echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup/deb/ stretch main-stretch" > /etc/apt/sources.list.d/pg_probackup.list' sudo wget -O - https://repo.postgrespro.ru/pg_probackup/keys/GPG-KEY-PG_PROBACKUP | sudo apt-key add - && sudo apt-get update -sudo apt-get install pg-probackup-{14,13,12,11,10,9.6}{-dbg,} +sudo apt-get install pg-probackup-{15,14,13,12,11,10}{-dbg,} #RPM Centos Packages rpm -ivh https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-centos.noarch.rpm -yum install pg_probackup-{14,13,12,11,10,9.6} -yum install pg_probackup-{14,13,12,11,10,9.6}-debuginfo +yum install pg_probackup-{15,14,13,12,11,10} +yum install pg_probackup-{15,14,13,12,11,10}-debuginfo #RPM RHEL Packages rpm -ivh https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-rhel.noarch.rpm -yum install pg_probackup-{14,13,12,11,10,9.6} -yum install pg_probackup-{14,13,12,11,10,9.6}-debuginfo +yum install pg_probackup-{15,14,13,12,11,10} +yum install pg_probackup-{15,14,13,12,11,10}-debuginfo #RPM Oracle Linux Packages rpm -ivh https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-oraclelinux.noarch.rpm -yum install pg_probackup-{14,13,12,11,10,9.6} -yum install pg_probackup-{14,13,12,11,10,9.6}-debuginfo +yum install pg_probackup-{15,14,13,12,11,10} +yum install pg_probackup-{15,14,13,12,11,10}-debuginfo #SRPM Centos|RHEL|OracleLinux Packages -yumdownloader --source pg_probackup-{14,13,12,11,10,9.6} +yumdownloader --source pg_probackup-{15,14,13,12,11,10} #RPM SUSE|SLES Packages zypper install --allow-unsigned-rpm -y https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-suse.noarch.rpm -zypper --gpg-auto-import-keys install -y pg_probackup-{14,13,12,11,10,9.6} -zypper install pg_probackup-{14,13,12,11,10,9.6}-debuginfo +zypper --gpg-auto-import-keys install -y pg_probackup-{15,14,13,12,11,10} +zypper install pg_probackup-{15,14,13,12,11,10}-debuginfo #SRPM SUSE|SLES Packages -zypper si pg_probackup-{14,13,12,11,10,9.6} - -#RPM ALT Linux 7 -sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-p7 x86_64 vanilla" > /etc/apt/sources.list.d/pg_probackup.list' -sudo apt-get update -sudo apt-get install pg_probackup-{14,13,12,11,10,9.6} -sudo apt-get install pg_probackup-{14,13,12,11,10,9.6}-debuginfo +zypper si pg_probackup-{15,14,13,12,11,10} #RPM ALT Linux 8 sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-p8 x86_64 vanilla" > /etc/apt/sources.list.d/pg_probackup.list' sudo apt-get update -sudo apt-get install pg_probackup-{14,13,12,11,10,9.6} -sudo apt-get install pg_probackup-{14,13,12,11,10,9.6}-debuginfo +sudo apt-get install pg_probackup-{15,14,13,12,11,10} +sudo apt-get install pg_probackup-{15,14,13,12,11,10}-debuginfo #RPM ALT Linux 9 sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-p9 x86_64 vanilla" > /etc/apt/sources.list.d/pg_probackup.list' sudo apt-get update -sudo apt-get install pg_probackup-{14,13,12,11,10,9.6} -sudo apt-get install pg_probackup-{14,13,12,11,10,9.6}-debuginfo +sudo apt-get install pg_probackup-{15,14,13,12,11,10} +sudo apt-get install pg_probackup-{15,14,13,12,11,10}-debuginfo + +#RPM ALT Linux 10 +sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-p10 x86_64 vanilla" > /etc/apt/sources.list.d/pg_probackup.list' +sudo apt-get update +sudo apt-get install pg_probackup-{15,14,13,12,11,10} +sudo apt-get install pg_probackup-{15,14,13,12,11,10}-debuginfo ``` #### pg_probackup for PostgresPro Standard and Enterprise From 9391a1b6768c8913497f99642c1c138657cdb159 Mon Sep 17 00:00:00 2001 From: Alexey Savchkov Date: Tue, 13 Dec 2022 00:01:03 +0700 Subject: [PATCH 431/525] Increment the version --- src/pg_probackup.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 843fb3522..fa3bc4123 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -347,7 +347,7 @@ typedef enum ShowFormat #define BYTES_INVALID (-1) /* file didn`t changed since previous backup, DELTA backup do not rely on it */ #define FILE_NOT_FOUND (-2) /* file disappeared during backup */ #define BLOCKNUM_INVALID (-1) -#define PROGRAM_VERSION "2.5.10" +#define PROGRAM_VERSION "2.5.11" /* update when remote agent API or behaviour changes */ #define AGENT_PROTOCOL_VERSION 20509 From 24f12d98d98601cffb195d22342162f877a6a1c7 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 13 Dec 2022 02:52:18 +0300 Subject: [PATCH 432/525] [PBCKP-402,PBCKP-354] refix "missing contrecord" detection. Error message is translated according to current locale. So we can't compare it as a string. But `abortedRecPtr` exists exactly for this case, so we can rely on it. --- src/parsexlog.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/parsexlog.c b/src/parsexlog.c index 284b610f6..b7743f6c7 100644 --- a/src/parsexlog.c +++ b/src/parsexlog.c @@ -1446,7 +1446,7 @@ XLogThreadWorker(void *arg) * TODO: probably we should abort reading logs at this moment. * But we continue as we did with bug present in Pg < 15. */ - strncmp(errormsg, "missing contrecord", 18) == 0)) + !XLogRecPtrIsInvalid(xlogreader->abortedRecPtr))) { if (SwitchThreadToNextWal(xlogreader, thread_arg)) continue; From 10ac3c9918907c827e2b14687e2fe12629a415b4 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 13 Dec 2022 13:06:09 +0300 Subject: [PATCH 433/525] [PBCKP-402] bound check for abortedRecPtr to Pg15 It is not really needed in previous versions. It doesn't harm, but we want to reduce tests amount. --- src/parsexlog.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/parsexlog.c b/src/parsexlog.c index b7743f6c7..7c4b5b349 100644 --- a/src/parsexlog.c +++ b/src/parsexlog.c @@ -1439,6 +1439,7 @@ XLogThreadWorker(void *arg) * Usually SimpleXLogPageRead() does it by itself. But here we need * to do it manually to support threads. */ +#if PG_VERSION_NUM >= 150000 if (reader_data->need_switch && ( errormsg == NULL || /* @@ -1447,6 +1448,9 @@ XLogThreadWorker(void *arg) * But we continue as we did with bug present in Pg < 15. */ !XLogRecPtrIsInvalid(xlogreader->abortedRecPtr))) +#else + if (reader_data->need_switch && errormsg == NULL) +#endif { if (SwitchThreadToNextWal(xlogreader, thread_arg)) continue; From b90273fe143c4b2d2d198f2e29b61547c316a561 Mon Sep 17 00:00:00 2001 From: Alexey Savchkov Date: Tue, 13 Dec 2022 22:50:17 +0700 Subject: [PATCH 434/525] Increment the expected test version --- tests/expected/option_version.out | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/expected/option_version.out b/tests/expected/option_version.out index 8abfe7fdd..e0d6924b9 100644 --- a/tests/expected/option_version.out +++ b/tests/expected/option_version.out @@ -1 +1 @@ -pg_probackup 2.5.10 +pg_probackup 2.5.11 From 30e3e37c7b92d629982cf375ccc4ccc0fd518fb9 Mon Sep 17 00:00:00 2001 From: Alexander Burtsev Date: Wed, 14 Dec 2022 16:09:53 +0300 Subject: [PATCH 435/525] Update README.md --- README.md | 1 - 1 file changed, 1 deletion(-) diff --git a/README.md b/README.md index 5b9e094a2..fb3c5b79c 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,3 @@ -[![Build Status](https://travis-ci.com/postgrespro/pg_probackup.svg?branch=master)](https://travis-ci.com/postgrespro/pg_probackup) [![GitHub release](https://img.shields.io/github/v/release/postgrespro/pg_probackup?include_prereleases)](https://github.com/postgrespro/pg_probackup/releases/latest) # pg_probackup From 640e7a5dcd8b6ad1884b4c0b8698fc38fec18d14 Mon Sep 17 00:00:00 2001 From: Alexander Burtsev Date: Wed, 14 Dec 2022 17:42:15 +0300 Subject: [PATCH 436/525] Update README.md --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index fb3c5b79c..7486a6ca6 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,5 @@ [![GitHub release](https://img.shields.io/github/v/release/postgrespro/pg_probackup?include_prereleases)](https://github.com/postgrespro/pg_probackup/releases/latest) +[![Build Status](https://travis-ci.com/postgrespro/pg_probackup.svg?branch=master)](https://travis-ci.com/postgrespro/pg_probackup) # pg_probackup From bbe41a403de67db1a6d44b22c7372faaad411a7a Mon Sep 17 00:00:00 2001 From: "e.garbuz" Date: Thu, 15 Dec 2022 13:37:15 +0300 Subject: [PATCH 437/525] Fix tests test_restore_from_fullbackup_to_new_location and test_restore_from_fullbackup_to_new_location_5_jobs --- tests/cfs_restore_test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/cfs_restore_test.py b/tests/cfs_restore_test.py index e70af39b4..2fa35e71a 100644 --- a/tests/cfs_restore_test.py +++ b/tests/cfs_restore_test.py @@ -233,7 +233,7 @@ def test_restore_from_fullbackup_to_new_location(self): ) self.assertEqual( - self.node.table_checksum("t1"), + node_new.table_checksum("t1"), self.table_t1 ) node_new.cleanup() @@ -277,7 +277,7 @@ def test_restore_from_fullbackup_to_new_location_5_jobs(self): ) self.assertEqual( - self.node.table_checksum("t1"), + node_new.table_checksum("t1"), self.table_t1 ) node_new.cleanup() From 8f4e7d6e5f163744baa340143d9b65682fb64fd9 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 22 Dec 2022 05:49:52 +0300 Subject: [PATCH 438/525] [PBCKP-346] archive-get doesn't need -D/--pgdata argument at all --- src/pg_probackup.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/pg_probackup.c b/src/pg_probackup.c index 849685278..ed48178b4 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -677,6 +677,7 @@ main(int argc, char *argv[]) if (instance_config.pgdata != NULL) canonicalize_path(instance_config.pgdata); if (instance_config.pgdata != NULL && + backup_subcmd != ARCHIVE_GET_CMD && !is_absolute_path(instance_config.pgdata)) elog(ERROR, "-D, --pgdata must be an absolute path"); From 22bdbb391549d41c581e85b5eebd07404d7bbc3c Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Wed, 11 Jan 2023 23:37:45 +0300 Subject: [PATCH 439/525] [PBCKP-423] test_archive_push_sanity: wait logs with tail_file (and some other test as well). --- tests/archive_test.py | 95 ++++++++++++++++------------------- tests/helpers/__init__.py | 2 +- tests/helpers/data_helpers.py | 78 ++++++++++++++++++++++++++++ 3 files changed, 123 insertions(+), 52 deletions(-) create mode 100644 tests/helpers/data_helpers.py diff --git a/tests/archive_test.py b/tests/archive_test.py index b2217a7bf..dd6959290 100644 --- a/tests/archive_test.py +++ b/tests/archive_test.py @@ -3,6 +3,7 @@ import gzip import unittest from .helpers.ptrack_helpers import ProbackupTest, ProbackupException, GdbException +from .helpers.data_helpers import tail_file from datetime import datetime, timedelta import subprocess from sys import exit @@ -383,26 +384,31 @@ def test_archive_push_file_exists(self): self.switch_wal_segment(node) sleep(1) - with open(log_file, 'r') as f: - log_content = f.read() + log = tail_file(log_file, linetimeout=30, totaltimeout=120, + collect=True) + log.wait(contains = 'The failed archive command was') + self.assertIn( 'LOG: archive command failed with exit code 1', - log_content) + log.content) self.assertIn( 'DETAIL: The failed archive command was:', - log_content) + log.content) self.assertIn( 'pg_probackup archive-push WAL file', - log_content) + log.content) self.assertIn( 'WAL file already exists in archive with different checksum', - log_content) + log.content) self.assertNotIn( - 'pg_probackup archive-push completed successfully', log_content) + 'pg_probackup archive-push completed successfully', log.content) + + # btw check that console coloring codes are not slipped into log file + self.assertNotIn('[0m', log.content) if self.get_version(node) < 100000: wal_src = os.path.join( @@ -419,19 +425,9 @@ def test_archive_push_file_exists(self): shutil.copyfile(wal_src, file) self.switch_wal_segment(node) - sleep(5) - - with open(log_file, 'r') as f: - log_content = f.read() - - self.assertIn( - 'pg_probackup archive-push completed successfully', - log_content) - - # btw check that console coloring codes are not slipped into log file - self.assertNotIn('[0m', log_content) - print(log_content) + log.stop_collect() + log.wait(contains = 'pg_probackup archive-push completed successfully') # @unittest.skip("skip") def test_archive_push_file_exists_overwrite(self): @@ -471,39 +467,35 @@ def test_archive_push_file_exists_overwrite(self): self.switch_wal_segment(node) sleep(1) - with open(log_file, 'r') as f: - log_content = f.read() + log = tail_file(log_file, linetimeout=30, collect=True) + log.wait(contains = 'The failed archive command was') self.assertIn( - 'LOG: archive command failed with exit code 1', log_content) + 'LOG: archive command failed with exit code 1', log.content) self.assertIn( - 'DETAIL: The failed archive command was:', log_content) + 'DETAIL: The failed archive command was:', log.content) self.assertIn( - 'pg_probackup archive-push WAL file', log_content) + 'pg_probackup archive-push WAL file', log.content) self.assertNotIn( 'WAL file already exists in archive with ' - 'different checksum, overwriting', log_content) + 'different checksum, overwriting', log.content) self.assertIn( 'WAL file already exists in archive with ' - 'different checksum', log_content) + 'different checksum', log.content) self.assertNotIn( - 'pg_probackup archive-push completed successfully', log_content) + 'pg_probackup archive-push completed successfully', log.content) self.set_archiving(backup_dir, 'node', node, overwrite=True) node.reload() self.switch_wal_segment(node) - sleep(5) - with open(log_file, 'r') as f: - log_content = f.read() - self.assertTrue( - 'pg_probackup archive-push completed successfully' in log_content, - 'Expecting messages about successfull execution archive_command') + log.drop_content() + log.wait(contains = 'pg_probackup archive-push completed successfully') self.assertIn( 'WAL file already exists in archive with ' - 'different checksum, overwriting', log_content) + 'different checksum, overwriting', log.content) # @unittest.skip("skip") def test_archive_push_partial_file_exists(self): @@ -2049,14 +2041,22 @@ def test_archive_push_sanity(self): replica.promote() replica.pgbench_init(scale=10) - with open(os.path.join(replica.logs_dir, 'postgresql.log'), 'r') as f: - replica_log_content = f.read() + log = tail_file(os.path.join(replica.logs_dir, 'postgresql.log'), + collect=True) + log.wait(regex=r"pushing file.*history") + log.wait(contains='archive-push completed successfully') + log.wait(regex=r"pushing file.*partial") + log.wait(contains='archive-push completed successfully') # make sure that .partial file is not compressed - self.assertNotIn('.partial.gz', replica_log_content) + self.assertNotIn('.partial.gz', log.content) # make sure that .history file is not compressed - self.assertNotIn('.history.gz', replica_log_content) - self.assertNotIn('WARNING', replica_log_content) + self.assertNotIn('.history.gz', log.content) + + replica.stop() + log.wait_shutdown() + + self.assertNotIn('WARNING', log.content) output = self.show_archive( backup_dir, 'node', as_json=False, as_text=True, @@ -2440,18 +2440,11 @@ def test_archive_get_prefetch_corruption(self): os.remove(os.path.join(replica.logs_dir, 'postgresql.log')) replica.slow_start(replica=True) - sleep(60) - - with open(os.path.join(replica.logs_dir, 'postgresql.log'), 'r') as f: - postgres_log_content = f.read() - - self.assertIn( - 'Prefetched WAL segment {0} is invalid, cannot use it'.format(filename), - postgres_log_content) - - self.assertIn( - 'LOG: restored log file "{0}" from archive'.format(filename), - postgres_log_content) + prefetch_line = 'Prefetched WAL segment {0} is invalid, cannot use it'.format(filename) + restored_line = 'LOG: restored log file "{0}" from archive'.format(filename) + tailer = tail_file(os.path.join(replica.logs_dir, 'postgresql.log')) + tailer.wait(contains=prefetch_line) + tailer.wait(contains=restored_line) # @unittest.skip("skip") def test_archive_show_partial_files_handling(self): diff --git a/tests/helpers/__init__.py b/tests/helpers/__init__.py index 4ae3ef8c4..2e5ed40e8 100644 --- a/tests/helpers/__init__.py +++ b/tests/helpers/__init__.py @@ -1,4 +1,4 @@ -__all__ = ['ptrack_helpers', 'cfs_helpers', 'expected_errors'] +__all__ = ['ptrack_helpers', 'cfs_helpers', 'data_helpers'] import unittest diff --git a/tests/helpers/data_helpers.py b/tests/helpers/data_helpers.py new file mode 100644 index 000000000..27cb66c3d --- /dev/null +++ b/tests/helpers/data_helpers.py @@ -0,0 +1,78 @@ +import re +import unittest +import functools +import time + +def _tail_file(file, linetimeout, totaltimeout): + start = time.time() + with open(file, 'r') as f: + waits = 0 + while waits < linetimeout: + line = f.readline() + if line == '': + waits += 1 + time.sleep(1) + continue + waits = 0 + yield line + if time.time() - start > totaltimeout: + raise TimeoutError("total timeout tailing %s" % (file,)) + else: + raise TimeoutError("line timeout tailing %s" % (file,)) + + +class tail_file(object): # snake case to immitate function + def __init__(self, filename, *, linetimeout=10, totaltimeout=60, collect=False): + self.filename = filename + self.tailer = _tail_file(filename, linetimeout, totaltimeout) + self.collect = collect + self.lines = [] + self._content = None + + def __iter__(self): + return self + + def __next__(self): + line = next(self.tailer) + if self.collect: + self.lines.append(line) + self._content = None + return line + + @property + def content(self): + if not self.collect: + raise AttributeError("content collection is not enabled", + name="content", obj=self) + if not self._content: + self._content = "".join(self.lines) + return self._content + + def drop_content(self): + self.lines.clear() + self._content = None + + def stop_collect(self): + self.drop_content() + self.collect = False + + def wait(self, *, contains:str = None, regex:str = None): + assert contains != None or regex != None + assert contains == None or regex == None + try: + for line in self: + if contains is not None and contains in line: + break + if regex is not None and re.search(regex, line): + break + except TimeoutError: + msg = "Didn't found expected " + if contains is not None: + msg += repr(contains) + elif regex is not None: + msg += f"/{regex}/" + msg += f" in {self.filename}" + raise unittest.TestCase.failureException(msg) + + def wait_shutdown(self): + self.wait(contains='database system is shut down') From fa2902090ac2c9e4f7a006c6e88461424757cac1 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 12 Jan 2023 00:04:47 +0300 Subject: [PATCH 440/525] [PBCKP-423] and backport cleanup_ptrack for test_archive_push_sanity --- tests/archive_test.py | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/tests/archive_test.py b/tests/archive_test.py index dd6959290..00fd1f592 100644 --- a/tests/archive_test.py +++ b/tests/archive_test.py @@ -2013,7 +2013,7 @@ def test_archive_push_sanity(self): self.backup_node(backup_dir, 'node', node) with open(os.path.join(node.logs_dir, 'postgresql.log'), 'r') as f: - postgres_log_content = f.read() + postgres_log_content = cleanup_ptrack(f.read()) # print(postgres_log_content) # make sure that .backup file is not compressed @@ -2056,7 +2056,7 @@ def test_archive_push_sanity(self): replica.stop() log.wait_shutdown() - self.assertNotIn('WARNING', log.content) + self.assertNotIn('WARNING', cleanup_ptrack(log.content)) output = self.show_archive( backup_dir, 'node', as_json=False, as_text=True, @@ -2662,6 +2662,17 @@ def test_archive_empty_history_file(self): 'WARNING: History file is corrupted or missing: "{0}"'.format(os.path.join(wal_dir, '00000004.history')), log_content) + +def cleanup_ptrack(log_content): + # PBCKP-423 - need to clean ptrack warning + ptrack_is_not = 'Ptrack 1.X is not supported anymore' + if ptrack_is_not in log_content: + lines = [line for line in log_content.splitlines() + if ptrack_is_not not in line] + log_content = "".join(lines) + return log_content + + # TODO test with multiple not archived segments. # TODO corrupted file in archive. From 343c6a0f98f936727b7154d3c0ba6b687db7bff4 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 12 Jan 2023 00:26:45 +0300 Subject: [PATCH 441/525] [PBCKP-423] add '.partial.part' detection as well Purposes are: - to not issue WARNING - to remove file properly in delete_walfiles_in_tli --- src/catalog.c | 3 ++- src/pg_probackup.h | 5 +++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/src/catalog.c b/src/catalog.c index 92a2d84b7..afbac28ab 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -1623,7 +1623,8 @@ catalog_get_timelines(InstanceState *instanceState, InstanceConfig *instance) } /* temp WAL segment */ else if (IsTempXLogFileName(file->name) || - IsTempCompressXLogFileName(file->name)) + IsTempCompressXLogFileName(file->name) || + IsTempPartialXLogFileName(file->name)) { elog(VERBOSE, "temp WAL file \"%s\"", file->name); diff --git a/src/pg_probackup.h b/src/pg_probackup.h index fa3bc4123..6ff8a41b3 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -777,6 +777,11 @@ typedef struct StopBackupCallbackParams strspn(fname, "0123456789ABCDEF") == XLOG_FNAME_LEN && \ strcmp((fname) + XLOG_FNAME_LEN, ".part") == 0) +#define IsTempPartialXLogFileName(fname) \ + (strlen(fname) == XLOG_FNAME_LEN + strlen(".partial.part") && \ + strspn(fname, "0123456789ABCDEF") == XLOG_FNAME_LEN && \ + strcmp((fname) + XLOG_FNAME_LEN, ".partial.part") == 0) + #define IsTempCompressXLogFileName(fname) \ (strlen(fname) == XLOG_FNAME_LEN + strlen(".gz.part") && \ strspn(fname, "0123456789ABCDEF") == XLOG_FNAME_LEN && \ From 612f530de0df16616d01529551ab24c011ad64bb Mon Sep 17 00:00:00 2001 From: Ivan Lazarev Date: Fri, 9 Dec 2022 11:51:48 +0300 Subject: [PATCH 442/525] [PBCKP-287] fix. added cfs files grouping and processing cfs segment in single thread manner --- src/backup.c | 284 +++++++++++++++++++++++++++++++++------------ src/dir.c | 14 ++- src/pg_probackup.h | 7 +- src/utils/parray.c | 27 +++++ src/utils/parray.h | 4 + 5 files changed, 257 insertions(+), 79 deletions(-) diff --git a/src/backup.c b/src/backup.c index 35fc98092..415f4a02a 100644 --- a/src/backup.c +++ b/src/backup.c @@ -65,7 +65,11 @@ static bool pg_is_in_recovery(PGconn *conn); static bool pg_is_superuser(PGconn *conn); static void check_server_version(PGconn *conn, PGNodeInfo *nodeInfo); static void confirm_block_size(PGconn *conn, const char *name, int blcksz); -static void set_cfs_datafiles(parray *files, const char *root, char *relative, size_t i); +static size_t rewind_and_mark_cfs_datafiles(parray *files, const char *root, char *relative, size_t i); +static void group_cfs_segments(parray *files, size_t first, size_t last); +static bool remove_excluded_files_criterion(void *value, void *exclude_args); +static void backup_cfs_segment(int i, pgFile *file, backup_files_arg *arguments); +static void process_file(int i, pgFile *file, backup_files_arg *arguments); static StopBackupCallbackParams stop_callback_params; @@ -2054,8 +2058,6 @@ static void * backup_files(void *arg) { int i; - char from_fullpath[MAXPGPATH]; - char to_fullpath[MAXPGPATH]; static time_t prev_time; backup_files_arg *arguments = (backup_files_arg *) arg; @@ -2067,7 +2069,6 @@ backup_files(void *arg) for (i = 0; i < n_backup_files_list; i++) { pgFile *file = (pgFile *) parray_get(arguments->files_list, i); - pgFile *prev_file = NULL; /* We have already copied all directories */ if (S_ISDIR(file->mode)) @@ -2087,6 +2088,9 @@ backup_files(void *arg) } } + if (file->skip_cfs_nested) + continue; + if (!pg_atomic_test_set_flag(&file->lock)) continue; @@ -2097,89 +2101,146 @@ backup_files(void *arg) elog(progress ? INFO : LOG, "Progress: (%d/%d). Process file \"%s\"", i + 1, n_backup_files_list, file->rel_path); - /* Handle zero sized files */ - if (file->size == 0) + if (file->is_cfs) { - file->write_size = 0; - continue; - } - - /* construct destination filepath */ - if (file->external_dir_num == 0) - { - join_path_components(from_fullpath, arguments->from_root, file->rel_path); - join_path_components(to_fullpath, arguments->to_root, file->rel_path); + backup_cfs_segment(i, file, arguments); } else { - char external_dst[MAXPGPATH]; - char *external_path = parray_get(arguments->external_dirs, - file->external_dir_num - 1); + process_file(i, file, arguments); + } + } + + /* ssh connection to longer needed */ + fio_disconnect(); + + /* Data files transferring is successful */ + arguments->ret = 0; + + return NULL; +} + +static void +process_file(int i, pgFile *file, backup_files_arg *arguments) +{ + char from_fullpath[MAXPGPATH]; + char to_fullpath[MAXPGPATH]; + pgFile *prev_file = NULL; + + elog(progress ? INFO : LOG, "Progress: (%d/%zu). Process file \"%s\"", + i + 1, parray_num(arguments->files_list), file->rel_path); - makeExternalDirPathByNum(external_dst, + /* Handle zero sized files */ + if (file->size == 0) + { + file->write_size = 0; + return; + } + + /* construct from_fullpath & to_fullpath */ + if (file->external_dir_num == 0) + { + join_path_components(from_fullpath, arguments->from_root, file->rel_path); + join_path_components(to_fullpath, arguments->to_root, file->rel_path); + } + else + { + char external_dst[MAXPGPATH]; + char *external_path = parray_get(arguments->external_dirs, + file->external_dir_num - 1); + + makeExternalDirPathByNum(external_dst, arguments->external_prefix, file->external_dir_num); - join_path_components(to_fullpath, external_dst, file->rel_path); - join_path_components(from_fullpath, external_path, file->rel_path); - } - - /* Encountered some strange beast */ - if (!S_ISREG(file->mode)) - elog(WARNING, "Unexpected type %d of file \"%s\", skipping", - file->mode, from_fullpath); + join_path_components(to_fullpath, external_dst, file->rel_path); + join_path_components(from_fullpath, external_path, file->rel_path); + } - /* Check that file exist in previous backup */ - if (current.backup_mode != BACKUP_MODE_FULL) - { - pgFile **prev_file_tmp = NULL; - prev_file_tmp = (pgFile **) parray_bsearch(arguments->prev_filelist, - file, pgFileCompareRelPathWithExternal); - if (prev_file_tmp) - { - /* File exists in previous backup */ - file->exists_in_prev = true; - prev_file = *prev_file_tmp; - } - } + /* Encountered some strange beast */ + if (!S_ISREG(file->mode)) + { + elog(WARNING, "Unexpected type %d of file \"%s\", skipping", + file->mode, from_fullpath); + return; + } - /* backup file */ - if (file->is_datafile && !file->is_cfs) - { - backup_data_file(file, from_fullpath, to_fullpath, - arguments->prev_start_lsn, - current.backup_mode, - instance_config.compress_alg, - instance_config.compress_level, - arguments->nodeInfo->checksum_version, - arguments->hdr_map, false); - } - else + /* Check that file exist in previous backup */ + if (current.backup_mode != BACKUP_MODE_FULL) + { + pgFile **prevFileTmp = NULL; + prevFileTmp = (pgFile **) parray_bsearch(arguments->prev_filelist, + file, pgFileCompareRelPathWithExternal); + if (prevFileTmp) { - backup_non_data_file(file, prev_file, from_fullpath, to_fullpath, - current.backup_mode, current.parent_backup, true); + /* File exists in previous backup */ + file->exists_in_prev = true; + prev_file = *prevFileTmp; } + } - if (file->write_size == FILE_NOT_FOUND) - continue; + /* backup file */ + if (file->is_datafile && !file->is_cfs) + { + backup_data_file(file, from_fullpath, to_fullpath, + arguments->prev_start_lsn, + current.backup_mode, + instance_config.compress_alg, + instance_config.compress_level, + arguments->nodeInfo->checksum_version, + arguments->hdr_map, false); + } + else + { + backup_non_data_file(file, prev_file, from_fullpath, to_fullpath, + current.backup_mode, current.parent_backup, true); + } - if (file->write_size == BYTES_INVALID) - { - elog(LOG, "Skipping the unchanged file: \"%s\"", from_fullpath); - continue; - } + if (file->write_size == FILE_NOT_FOUND) + return; - elog(LOG, "File \"%s\". Copied "INT64_FORMAT " bytes", - from_fullpath, file->write_size); + if (file->write_size == BYTES_INVALID) + { + elog(LOG, "Skipping the unchanged file: \"%s\"", from_fullpath); + return; } - /* ssh connection to longer needed */ - fio_disconnect(); + elog(LOG, "File \"%s\". Copied "INT64_FORMAT " bytes", + from_fullpath, file->write_size); - /* Data files transferring is successful */ - arguments->ret = 0; +} - return NULL; +static void +backup_cfs_segment(int i, pgFile *file, backup_files_arg *arguments) { + pgFile *data_file = file; + pgFile *cfm_file = NULL; + pgFile *data_bck_file = NULL; + pgFile *cfm_bck_file = NULL; + + while (data_file->cfs_chain) + { + data_file = data_file->cfs_chain; + if (data_file->forkName == cfm) + cfm_file = data_file; + if (data_file->forkName == cfs_bck) + data_bck_file = data_file; + if (data_file->forkName == cfm_bck) + cfm_bck_file = data_file; + } + data_file = file; + Assert(cfm_file); /* ensure we always have cfm exist */ + + elog(LOG, "backup CFS segment %s, data_file=%s, cfm_file=%s, data_bck_file=%s, cfm_bck_file=%s", + data_file->name, data_file->name, cfm_file->name, data_bck_file == NULL? "NULL": data_bck_file->name, cfm_bck_file == NULL? "NULL": cfm_bck_file->name); + + /* storing cfs in order data_bck_file -> cfm_bck -> data_file -> map */ + if (cfm_bck_file) + process_file(i, cfm_bck_file, arguments); + if (data_bck_file) + process_file(i, data_bck_file, arguments); + process_file(i, cfm_file, arguments); + process_file(i, data_file, arguments); + elog(LOG, "Backup CFS segment %s done", data_file->name); } /* @@ -2209,11 +2270,12 @@ parse_filelist_filenames(parray *files, const char *root) */ if (strcmp(file->name, "pg_compression") == 0) { + /* processing potential cfs tablespace */ Oid tblspcOid; Oid dbOid; char tmp_rel_path[MAXPGPATH]; /* - * Check that the file is located under + * Check that pg_compression is located under * TABLESPACE_VERSION_DIRECTORY */ sscanf_result = sscanf(file->rel_path, PG_TBLSPC_DIR "/%u/%s/%u", @@ -2222,8 +2284,12 @@ parse_filelist_filenames(parray *files, const char *root) /* Yes, it is */ if (sscanf_result == 2 && strncmp(tmp_rel_path, TABLESPACE_VERSION_DIRECTORY, - strlen(TABLESPACE_VERSION_DIRECTORY)) == 0) - set_cfs_datafiles(files, root, file->rel_path, i); + strlen(TABLESPACE_VERSION_DIRECTORY)) == 0) { + /* rewind index to the beginning of cfs tablespace */ + size_t start = rewind_and_mark_cfs_datafiles(files, root, file->rel_path, i); + /* group every to cfs segments chains */ + group_cfs_segments(files, start, i); + } } } @@ -2238,7 +2304,7 @@ parse_filelist_filenames(parray *files, const char *root) */ int unlogged_file_num = i - 1; pgFile *unlogged_file = (pgFile *) parray_get(files, - unlogged_file_num); + unlogged_file_num); unlogged_file_reloid = file->relOid; @@ -2246,11 +2312,10 @@ parse_filelist_filenames(parray *files, const char *root) (unlogged_file_reloid != 0) && (unlogged_file->relOid == unlogged_file_reloid)) { - pgFileFree(unlogged_file); - parray_remove(files, unlogged_file_num); + /* flagged to remove from list on stage 2 */ + unlogged_file->remove_from_list = true; unlogged_file_num--; - i--; unlogged_file = (pgFile *) parray_get(files, unlogged_file_num); @@ -2260,6 +2325,68 @@ parse_filelist_filenames(parray *files, const char *root) i++; } + + /* stage 2. clean up from temporary tables */ + parray_remove_if(files, remove_excluded_files_criterion, NULL, pgFileFree); +} + +static bool +remove_excluded_files_criterion(void *value, void *exclude_args) { + pgFile *file = (pgFile*)value; + return file->remove_from_list; +} + +/* + * For every cfs segment do group its files to linked list, datafile on the head. + * All non data files of segment moved to linked list and marked to skip in backup processing threads. + * @param first - first index of cfs tablespace files + * @param last - last index of cfs tablespace files + */ +void group_cfs_segments(parray *files, size_t first, size_t last) {/* grouping cfs files by relOid.segno, removing leafs of group */ + + for (;first <= last; first++) + { + pgFile *file = parray_get(files, first); + + if (file->is_cfs) + { + pgFile *cfs_file = file; + size_t counter = first + 1; + pgFile *chain_file = parray_get(files, counter); + + bool has_cfm = false; /* flag for later assertion the cfm file also exist */ + + elog(LOG, "Preprocessing cfs file %s, %u.%d", cfs_file->name, cfs_file->relOid, cfs_file->segno); + + elog(LOG, "Checking file %s, %u.%d as cfs chain", chain_file->name, chain_file->relOid, chain_file->segno); + + /* scanning cfs segment files */ + while (cfs_file->relOid == chain_file->relOid && + cfs_file->segno == chain_file->segno) + { + elog(LOG, "Grouping cfs chain file %s, %d.%d", chain_file->name, chain_file->relOid, chain_file->segno); + chain_file->skip_cfs_nested = true; + cfs_file->cfs_chain = chain_file; /* adding to cfs group */ + cfs_file = chain_file; + + /* next file */ + counter++; + chain_file = parray_get(files, counter); + elog(LOG, "Checking file %s, %u.%d as cfs chain", chain_file->name, chain_file->relOid, chain_file->segno); + } + + /* assertion - we always have cfs data + cfs map files */ + cfs_file = file; + for (; cfs_file; cfs_file = cfs_file->cfs_chain) { + elog(LOG, "searching cfm in %s, chain is %s", cfs_file->name, cfs_file->cfs_chain == NULL? "NULL": cfs_file->cfs_chain->name); + has_cfm = cfs_file->forkName == cfm; + } + Assert(has_cfm); + + /* shifting to last cfs segment file */ + first = counter-1; + } + } } /* If file is equal to pg_compression, then we consider this tablespace as @@ -2273,9 +2400,11 @@ parse_filelist_filenames(parray *files, const char *root) * tblspcOid/TABLESPACE_VERSION_DIRECTORY/dboid/1 * tblspcOid/TABLESPACE_VERSION_DIRECTORY/dboid/1.cfm * tblspcOid/TABLESPACE_VERSION_DIRECTORY/pg_compression + * + * @returns index of first tablespace entry, i.e tblspcOid/TABLESPACE_VERSION_DIRECTORY */ -static void -set_cfs_datafiles(parray *files, const char *root, char *relative, size_t i) +static size_t +rewind_and_mark_cfs_datafiles(parray *files, const char *root, char *relative, size_t i) { int len; int p; @@ -2311,6 +2440,7 @@ set_cfs_datafiles(parray *files, const char *root, char *relative, size_t i) } } free(cfs_tblspc_path); + return p+1; } /* diff --git a/src/dir.c b/src/dir.c index 0a55c0f67..4bae25de2 100644 --- a/src/dir.c +++ b/src/dir.c @@ -1837,7 +1837,19 @@ set_forkname(pgFile *file) return false; } - /* CFS "fork name" */ + /* CFS family fork names */ + if (file->forkName == none && + is_forkname(file->name, &i, ".cfm.bck")) + { + /* /^\d+(\.\d+)?\.cfm\.bck$/ */ + file->forkName = cfm_bck; + } + if (file->forkName == none && + is_forkname(file->name, &i, ".bck")) + { + /* /^\d+(\.\d+)?\.bck$/ */ + file->forkName = cfs_bck; + } if (file->forkName == none && is_forkname(file->name, &i, ".cfm")) { diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 6ff8a41b3..a5c17d9f8 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -222,7 +222,9 @@ typedef enum ForkName fsm, cfm, init, - ptrack + ptrack, + cfs_bck, + cfm_bck } ForkName; #define INIT_FILE_CRC32(use_crc32c, crc) \ @@ -278,6 +280,7 @@ typedef struct pgFile int segno; /* Segment number for ptrack */ int n_blocks; /* number of blocks in the data file in data directory */ bool is_cfs; /* Flag to distinguish files compressed by CFS*/ + struct pgFile *cfs_chain; /* linked list of CFS segment's cfm, bck, cfm_bck related files */ int external_dir_num; /* Number of external directory. 0 if not external */ bool exists_in_prev; /* Mark files, both data and regular, that exists in previous backup */ CompressAlg compress_alg; /* compression algorithm applied to the file */ @@ -292,6 +295,8 @@ typedef struct pgFile pg_off_t hdr_off; /* offset in header map */ int hdr_size; /* length of headers */ bool excluded; /* excluded via --exclude-path option */ + bool skip_cfs_nested; /* mark to skip in processing treads as nested to cfs_chain */ + bool remove_from_list; /* tmp flag to clean up files list from temp and unlogged tables */ } pgFile; typedef struct page_map_entry diff --git a/src/utils/parray.c b/src/utils/parray.c index 792e26907..65377c001 100644 --- a/src/utils/parray.c +++ b/src/utils/parray.c @@ -217,3 +217,30 @@ bool parray_contains(parray *array, void *elem) } return false; } + +/* effectively remove elements that satisfy certain criterion */ +void +parray_remove_if(parray *array, criterion_fn criterion, void *args, cleanup_fn clean) { + int i = 0; + int j = 0; + + /* removing certain elements */ + while(j < parray_num(array)) { + void *value = array->data[j]; + // if the value satisfies the criterion, clean it up + if(criterion(value, args)) { + clean(value); + j++; + continue; + } + + if(i != j) + array->data[i] = array->data[j]; + + i++; + j++; + } + + /* adjust the number of used elements */ + array->used -= j - i; +} diff --git a/src/utils/parray.h b/src/utils/parray.h index e92ad728c..08846f252 100644 --- a/src/utils/parray.h +++ b/src/utils/parray.h @@ -16,6 +16,9 @@ */ typedef struct parray parray; +typedef bool (*criterion_fn)(void *value, void *args); +typedef void (*cleanup_fn)(void *ref); + extern parray *parray_new(void); extern void parray_expand(parray *array, size_t newnum); extern void parray_free(parray *array); @@ -32,6 +35,7 @@ extern void *parray_bsearch(parray *array, const void *key, int(*compare)(const extern int parray_bsearch_index(parray *array, const void *key, int(*compare)(const void *, const void *)); extern void parray_walk(parray *array, void (*action)(void *)); extern bool parray_contains(parray *array, void *elem); +extern void parray_remove_if(parray *array, criterion_fn criterion, void *args, cleanup_fn clean); #endif /* PARRAY_H */ From 2f27142009dc6c6c1783e07622b5f285b43577bc Mon Sep 17 00:00:00 2001 From: Ivan Lazarev Date: Thu, 12 Jan 2023 13:47:28 +0300 Subject: [PATCH 443/525] [PBCKP-287] skipping data_bck_file+cfm_bck_file on backup when they both exist --- src/backup.c | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/src/backup.c b/src/backup.c index 415f4a02a..6831ec8f6 100644 --- a/src/backup.c +++ b/src/backup.c @@ -2233,11 +2233,24 @@ backup_cfs_segment(int i, pgFile *file, backup_files_arg *arguments) { elog(LOG, "backup CFS segment %s, data_file=%s, cfm_file=%s, data_bck_file=%s, cfm_bck_file=%s", data_file->name, data_file->name, cfm_file->name, data_bck_file == NULL? "NULL": data_bck_file->name, cfm_bck_file == NULL? "NULL": cfm_bck_file->name); - /* storing cfs in order data_bck_file -> cfm_bck -> data_file -> map */ - if (cfm_bck_file) - process_file(i, cfm_bck_file, arguments); + /* storing cfs segment. processing corner case [PBCKP-287] stage 1. + * - when we do have data_bck_file we should skip both data_bck_file and cfm_bck_file if exists. + * they are removed by cfs_recover() during postgres start. + */ if (data_bck_file) - process_file(i, data_bck_file, arguments); + { + if (cfm_bck_file) + cfm_bck_file->write_size = FILE_NOT_FOUND; + data_bck_file->write_size = FILE_NOT_FOUND; + } + /* else we store cfm_bck_file. processing corner case [PBCKP-287] stage 2. + * - when we do have cfm_bck_file only we should store it. + * it will replace cfm_file after postgres start. + */ + else if (cfm_bck_file) + process_file(i, cfm_bck_file, arguments); + + /* storing cfs segment in order cfm_file -> datafile to guarantee their consistency */ process_file(i, cfm_file, arguments); process_file(i, data_file, arguments); elog(LOG, "Backup CFS segment %s done", data_file->name); From d3babee2ae5caafcff0ec106070af4a2955f5458 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 12 Jan 2023 15:23:34 +0300 Subject: [PATCH 444/525] [PBCKP-287] simplify and fix cfs chaining - properly chain only main fork and cfs related forks - properly chain datafile with its cfm even sort order places other segments between - don't raise error for system tables which has no companion cfm. + fix couple of tests --- src/backup.c | 142 ++++++++++++++++++++------------------- src/utils/pgut.h | 10 +++ tests/cfs_backup_test.py | 27 ++------ 3 files changed, 90 insertions(+), 89 deletions(-) diff --git a/src/backup.c b/src/backup.c index 6831ec8f6..af225017a 100644 --- a/src/backup.c +++ b/src/backup.c @@ -65,8 +65,7 @@ static bool pg_is_in_recovery(PGconn *conn); static bool pg_is_superuser(PGconn *conn); static void check_server_version(PGconn *conn, PGNodeInfo *nodeInfo); static void confirm_block_size(PGconn *conn, const char *name, int blcksz); -static size_t rewind_and_mark_cfs_datafiles(parray *files, const char *root, char *relative, size_t i); -static void group_cfs_segments(parray *files, size_t first, size_t last); +static void rewind_and_mark_cfs_datafiles(parray *files, const char *root, char *relative, size_t i); static bool remove_excluded_files_criterion(void *value, void *exclude_args); static void backup_cfs_segment(int i, pgFile *file, backup_files_arg *arguments); static void process_file(int i, pgFile *file, backup_files_arg *arguments); @@ -2228,7 +2227,11 @@ backup_cfs_segment(int i, pgFile *file, backup_files_arg *arguments) { cfm_bck_file = data_file; } data_file = file; - Assert(cfm_file); /* ensure we always have cfm exist */ + if (data_file->relOid >= FirstNormalObjectId && cfm_file == NULL) + { + elog(ERROR, "'CFS' file '%s' have to have '%s.cfm' companion file", + data_file->rel_path, data_file->name); + } elog(LOG, "backup CFS segment %s, data_file=%s, cfm_file=%s, data_bck_file=%s, cfm_bck_file=%s", data_file->name, data_file->name, cfm_file->name, data_bck_file == NULL? "NULL": data_bck_file->name, cfm_bck_file == NULL? "NULL": cfm_bck_file->name); @@ -2251,7 +2254,10 @@ backup_cfs_segment(int i, pgFile *file, backup_files_arg *arguments) { process_file(i, cfm_bck_file, arguments); /* storing cfs segment in order cfm_file -> datafile to guarantee their consistency */ - process_file(i, cfm_file, arguments); + /* cfm_file could be NULL for system tables. But we don't clear is_cfs flag + * for compatibility with older pg_probackup. */ + if (cfm_file) + process_file(i, cfm_file, arguments); process_file(i, data_file, arguments); elog(LOG, "Backup CFS segment %s done", data_file->name); } @@ -2299,9 +2305,7 @@ parse_filelist_filenames(parray *files, const char *root) strncmp(tmp_rel_path, TABLESPACE_VERSION_DIRECTORY, strlen(TABLESPACE_VERSION_DIRECTORY)) == 0) { /* rewind index to the beginning of cfs tablespace */ - size_t start = rewind_and_mark_cfs_datafiles(files, root, file->rel_path, i); - /* group every to cfs segments chains */ - group_cfs_segments(files, start, i); + rewind_and_mark_cfs_datafiles(files, root, file->rel_path, i); } } } @@ -2349,57 +2353,11 @@ remove_excluded_files_criterion(void *value, void *exclude_args) { return file->remove_from_list; } -/* - * For every cfs segment do group its files to linked list, datafile on the head. - * All non data files of segment moved to linked list and marked to skip in backup processing threads. - * @param first - first index of cfs tablespace files - * @param last - last index of cfs tablespace files - */ -void group_cfs_segments(parray *files, size_t first, size_t last) {/* grouping cfs files by relOid.segno, removing leafs of group */ - - for (;first <= last; first++) - { - pgFile *file = parray_get(files, first); - - if (file->is_cfs) - { - pgFile *cfs_file = file; - size_t counter = first + 1; - pgFile *chain_file = parray_get(files, counter); - - bool has_cfm = false; /* flag for later assertion the cfm file also exist */ - - elog(LOG, "Preprocessing cfs file %s, %u.%d", cfs_file->name, cfs_file->relOid, cfs_file->segno); - - elog(LOG, "Checking file %s, %u.%d as cfs chain", chain_file->name, chain_file->relOid, chain_file->segno); - - /* scanning cfs segment files */ - while (cfs_file->relOid == chain_file->relOid && - cfs_file->segno == chain_file->segno) - { - elog(LOG, "Grouping cfs chain file %s, %d.%d", chain_file->name, chain_file->relOid, chain_file->segno); - chain_file->skip_cfs_nested = true; - cfs_file->cfs_chain = chain_file; /* adding to cfs group */ - cfs_file = chain_file; - - /* next file */ - counter++; - chain_file = parray_get(files, counter); - elog(LOG, "Checking file %s, %u.%d as cfs chain", chain_file->name, chain_file->relOid, chain_file->segno); - } - - /* assertion - we always have cfs data + cfs map files */ - cfs_file = file; - for (; cfs_file; cfs_file = cfs_file->cfs_chain) { - elog(LOG, "searching cfm in %s, chain is %s", cfs_file->name, cfs_file->cfs_chain == NULL? "NULL": cfs_file->cfs_chain->name); - has_cfm = cfs_file->forkName == cfm; - } - Assert(has_cfm); - - /* shifting to last cfs segment file */ - first = counter-1; - } - } +static uint32_t +hash_rel_seg(pgFile* file) +{ + uint32 hash = hash_mix32_2(file->relOid, file->segno); + return hash_mix32_2(hash, 0xcf5); } /* If file is equal to pg_compression, then we consider this tablespace as @@ -2416,13 +2374,24 @@ void group_cfs_segments(parray *files, size_t first, size_t last) {/* grouping c * * @returns index of first tablespace entry, i.e tblspcOid/TABLESPACE_VERSION_DIRECTORY */ -static size_t +static void rewind_and_mark_cfs_datafiles(parray *files, const char *root, char *relative, size_t i) { int len; int p; + int j; pgFile *prev_file; + pgFile *tmp_file; char *cfs_tblspc_path; + uint32_t h; + + /* hash table for cfm files */ +#define HASHN 128 + parray *hashtab[HASHN] = {NULL}; + parray *bucket; + for (p = 0; p < HASHN; p++) + hashtab[p] = parray_new(); + cfs_tblspc_path = strdup(relative); if(!cfs_tblspc_path) @@ -2437,23 +2406,60 @@ rewind_and_mark_cfs_datafiles(parray *files, const char *root, char *relative, s elog(LOG, "Checking file in cfs tablespace %s", prev_file->rel_path); - if (strstr(prev_file->rel_path, cfs_tblspc_path) != NULL) + if (strstr(prev_file->rel_path, cfs_tblspc_path) == NULL) + { + elog(LOG, "Breaking on %s", prev_file->rel_path); + break; + } + + if (!S_ISREG(prev_file->mode)) + continue; + + h = hash_rel_seg(prev_file); + bucket = hashtab[h % HASHN]; + + if (prev_file->forkName == cfm || prev_file->forkName == cfm_bck || + prev_file->forkName == cfs_bck) { - if (S_ISREG(prev_file->mode) && prev_file->is_datafile) + parray_append(bucket, prev_file); + } + else if (prev_file->is_datafile && prev_file->forkName == none) + { + elog(LOG, "Processing 'cfs' file %s", prev_file->rel_path); + /* have to mark as is_cfs even for system-tables for compatibility + * with older pg_probackup */ + prev_file->is_cfs = true; + prev_file->cfs_chain = NULL; + for (j = 0; j < parray_num(bucket); j++) { - elog(LOG, "Setting 'is_cfs' on file %s, name %s", - prev_file->rel_path, prev_file->name); - prev_file->is_cfs = true; + tmp_file = parray_get(bucket, j); + elog(LOG, "Linking 'cfs' file '%s' to '%s'", + tmp_file->rel_path, prev_file->rel_path); + if (tmp_file->relOid == prev_file->relOid && + tmp_file->segno == prev_file->segno) + { + tmp_file->cfs_chain = prev_file->cfs_chain; + prev_file->cfs_chain = tmp_file; + parray_remove(bucket, j); + j--; + } } } - else + } + + for (p = 0; p < HASHN; p++) + { + bucket = hashtab[p]; + for (j = 0; j < parray_num(bucket); j++) { - elog(LOG, "Breaking on %s", prev_file->rel_path); - break; + tmp_file = parray_get(bucket, j); + elog(WARNING, "Orphaned cfs related file '%s'", tmp_file->rel_path); } + parray_free(bucket); + hashtab[p] = NULL; } +#undef HASHN free(cfs_tblspc_path); - return p+1; } /* diff --git a/src/utils/pgut.h b/src/utils/pgut.h index f8554f9d0..4fd659b82 100644 --- a/src/utils/pgut.h +++ b/src/utils/pgut.h @@ -115,4 +115,14 @@ extern int usleep(unsigned int usec); #define ARG_SIZE_HINT static #endif +static inline uint32_t hash_mix32_2(uint32_t a, uint32_t b) +{ + b ^= (a<<7)|(a>>25); + a *= 0xdeadbeef; + b *= 0xcafeabed; + a ^= a >> 16; + b ^= b >> 15; + return a^b; +} + #endif /* PGUT_H */ diff --git a/tests/cfs_backup_test.py b/tests/cfs_backup_test.py index cd2826d21..fb4a6c6b8 100644 --- a/tests/cfs_backup_test.py +++ b/tests/cfs_backup_test.py @@ -431,16 +431,10 @@ def test_page_doesnt_store_unchanged_cfm(self): "FROM generate_series(0,256) i".format('t1', tblspace_name) ) - try: - backup_id_full = self.backup_node( - self.backup_dir, 'node', self.node, backup_type='full') - except ProbackupException as e: - self.fail( - "ERROR: Full backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) + self.node.safe_psql("postgres", "checkpoint") + + backup_id_full = self.backup_node( + self.backup_dir, 'node', self.node, backup_type='full') self.assertTrue( find_by_extensions( @@ -449,16 +443,8 @@ def test_page_doesnt_store_unchanged_cfm(self): "ERROR: .cfm files not found in backup dir" ) - try: - backup_id = self.backup_node( - self.backup_dir, 'node', self.node, backup_type='page') - except ProbackupException as e: - self.fail( - "ERROR: Incremental backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) + backup_id = self.backup_node( + self.backup_dir, 'node', self.node, backup_type='page') show_backup = self.show_pb(self.backup_dir, 'node', backup_id) self.assertEqual( @@ -1046,7 +1032,6 @@ def test_fullbackup_after_create_table_page_after_create_table_stream(self): ) # --- Make backup with not valid data(broken .cfm) --- # - @unittest.expectedFailure # @unittest.skip("skip") @unittest.skipUnless(ProbackupTest.enterprise, 'skip') def test_delete_random_cfm_file_from_tablespace_dir(self): From b240b9077263aed673d6cdd0bafe5a7032e904e0 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Fri, 13 Jan 2023 09:38:48 +0300 Subject: [PATCH 445/525] fix for < Pg12 --- src/backup.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/backup.c b/src/backup.c index af225017a..78a679244 100644 --- a/src/backup.c +++ b/src/backup.c @@ -13,6 +13,9 @@ #if PG_VERSION_NUM < 110000 #include "catalog/catalog.h" #endif +#if PG_VERSION_NUM < 120000 +#include "access/transam.h" +#endif #include "catalog/pg_tablespace.h" #include "pgtar.h" #include "streamutil.h" From 610216c6f8b1036bf6de94d92d6acef4ed71fb87 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Fri, 13 Jan 2023 15:31:03 +0300 Subject: [PATCH 446/525] ptrack_helpers.py: fix compare_pgdata --- tests/helpers/ptrack_helpers.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index 067225d66..c96007448 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -1875,7 +1875,7 @@ def compare_pgdata(self, original_pgdata, restored_pgdata, exclusion_dict = dict # Compare directories restored_dirs = set(restored_pgdata['dirs']) - original_dirs = set(restored_pgdata['dirs']) + original_dirs = set(original_pgdata['dirs']) for directory in sorted(restored_dirs - original_dirs): fail = True @@ -1903,7 +1903,7 @@ def compare_pgdata(self, original_pgdata, restored_pgdata, exclusion_dict = dict restored.mode) restored_files = set(restored_pgdata['files']) - original_files = set(restored_pgdata['files']) + original_files = set(original_pgdata['files']) for file in sorted(restored_files - original_files): # File is present in RESTORED PGDATA From 49bb374d0c9be08152318613a579625e83bc8b2b Mon Sep 17 00:00:00 2001 From: Victor Spirin Date: Sat, 24 Dec 2022 14:50:14 +0300 Subject: [PATCH 447/525] [PBCKP-365] Fixed test help_6 test. Added check_locale function for check that locale is installed. --- tests/option_test.py | 29 ++++++++++++++++++++++------- 1 file changed, 22 insertions(+), 7 deletions(-) diff --git a/tests/option_test.py b/tests/option_test.py index eec1bab44..af4b12b71 100644 --- a/tests/option_test.py +++ b/tests/option_test.py @@ -3,7 +3,6 @@ from .helpers.ptrack_helpers import ProbackupTest, ProbackupException import locale - class OptionTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") @@ -220,12 +219,28 @@ def test_options_5(self): def test_help_6(self): """help options""" if ProbackupTest.enable_nls: - self.test_env['LC_ALL'] = 'ru_RU.utf-8' - with open(os.path.join(self.dir_path, "expected/option_help_ru.out"), "rb") as help_out: - self.assertEqual( - self.run_pb(["--help"]), - help_out.read().decode("utf-8") - ) + if check_locale('ru_RU.utf-8'): + self.test_env['LC_ALL'] = 'ru_RU.utf-8' + with open(os.path.join(self.dir_path, "expected/option_help_ru.out"), "rb") as help_out: + self.assertEqual( + self.run_pb(["--help"]), + help_out.read().decode("utf-8") + ) + else: + self.skipTest( + "Locale ru_RU.utf-8 doesn't work. You need install ru_RU.utf-8 locale for this test") else: self.skipTest( 'You need configure PostgreSQL with --enabled-nls option for this test') + + +def check_locale(locale_name): + ret=True + old_locale = locale.setlocale(locale.LC_CTYPE,"") + try: + locale.setlocale(locale.LC_CTYPE, locale_name) + except locale.Error: + ret=False + finally: + locale.setlocale(locale.LC_CTYPE, old_locale) + return ret From 137814aa6fd3d4d050ec78d0412c4ed5c6574c93 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 19 Jan 2023 13:45:34 +0300 Subject: [PATCH 448/525] do_retention_merge: fix removing from to_keep_list --- src/delete.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/delete.c b/src/delete.c index 3f299d78b..c8a8c22ec 100644 --- a/src/delete.c +++ b/src/delete.c @@ -552,7 +552,12 @@ do_retention_merge(InstanceState *instanceState, parray *backup_list, /* Try to remove merged incremental backup from both keep and purge lists */ parray_rm(to_purge_list, tmp_backup, pgBackupCompareId); - parray_set(to_keep_list, i, NULL); + for (i = 0; i < parray_num(to_keep_list); i++) + if (parray_get(to_keep_list, i) == tmp_backup) + { + parray_set(to_keep_list, i, NULL); + break; + } } if (!no_validate) pgBackupValidate(full_backup, NULL); From 1f5991dadbd383f6bb8ff36de1d504531491a25f Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Fri, 20 Jan 2023 04:06:41 +0300 Subject: [PATCH 449/525] fix cfs handling: forgot to set skip_cfs_nested --- src/backup.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/backup.c b/src/backup.c index 78a679244..7e6e33c53 100644 --- a/src/backup.c +++ b/src/backup.c @@ -2424,6 +2424,7 @@ rewind_and_mark_cfs_datafiles(parray *files, const char *root, char *relative, s if (prev_file->forkName == cfm || prev_file->forkName == cfm_bck || prev_file->forkName == cfs_bck) { + prev_file->skip_cfs_nested = true; parray_append(bucket, prev_file); } else if (prev_file->is_datafile && prev_file->forkName == none) From c8909b825ed973731342fe7369e2dd00e48c2c13 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 9 Feb 2023 15:33:10 +0300 Subject: [PATCH 450/525] Fix warning in get_backup_filelist pg_multixact contains files which looks like db files, but they are not. get_backup_filelist should not pass to set_forkname files not in db folder nor `global` folder. --- src/catalog.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/catalog.c b/src/catalog.c index afbac28ab..aadc47bb1 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -1142,7 +1142,10 @@ get_backup_filelist(pgBackup *backup, bool strict) if (!file->is_datafile || file->is_cfs) file->size = file->uncompressed_size; - if (file->external_dir_num == 0 && S_ISREG(file->mode)) + if (file->external_dir_num == 0 && + (file->dbOid != 0 || + path_is_prefix_of_path("global", file->rel_path)) && + S_ISREG(file->mode)) { bool is_datafile = file->is_datafile; set_forkname(file); From 297dd2a7eb23821bdb9c53aa4d137639103851fc Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Fri, 10 Feb 2023 15:07:25 +0300 Subject: [PATCH 451/525] test for previous commit --- tests/backup_test.py | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/tests/backup_test.py b/tests/backup_test.py index fc1135cab..32a2cee50 100644 --- a/tests/backup_test.py +++ b/tests/backup_test.py @@ -3573,3 +3573,36 @@ def test_start_time_few_nodes(self): show_backup2 = self.show_pb(backup_dir2, 'node2')[3] self.assertEqual(show_backup1['id'], show_backup2['id']) + def test_regress_issue_585(self): + """https://github.com/postgrespro/pg_probackup/issues/585""" + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + # create couple of files that looks like db files + with open(os.path.join(node.data_dir, 'pg_multixact/offsets/1000'),'wb') as f: + pass + with open(os.path.join(node.data_dir, 'pg_multixact/members/1000'),'wb') as f: + pass + + self.backup_node( + backup_dir, 'node', node, backup_type='full', + options=['--stream']) + + output = self.backup_node( + backup_dir, 'node', node, backup_type='delta', + options=['--stream'], + return_id=False, + ) + self.assertNotRegex(output, r'WARNING: [^\n]* was stored as .* but looks like') + + node.cleanup() + + output = self.restore_node(backup_dir, 'node', node) + self.assertNotRegex(output, r'WARNING: [^\n]* was stored as .* but looks like') From a2387b5134bc6f56b54079903b1845494d29252e Mon Sep 17 00:00:00 2001 From: Daniel Shelepanov Date: Tue, 28 Feb 2023 20:53:02 +0300 Subject: [PATCH 452/525] [PBCKP-211] got rid of timezone hack, tzset() allowed on every platform Now if UTC is true in time2iso we just use strftime to format string and add +00 timezone --- src/utils/configuration.c | 24 +++--------------------- 1 file changed, 3 insertions(+), 21 deletions(-) diff --git a/src/utils/configuration.c b/src/utils/configuration.c index 193d1c680..24c6febbf 100644 --- a/src/utils/configuration.c +++ b/src/utils/configuration.c @@ -1294,9 +1294,7 @@ parse_time(const char *value, time_t *result, bool utc_default) { /* set timezone to UTC */ pgut_setenv("TZ", "UTC"); -#ifdef WIN32 tzset(); -#endif } /* convert time to utc unix time */ @@ -1308,9 +1306,7 @@ parse_time(const char *value, time_t *result, bool utc_default) else pgut_unsetenv("TZ"); -#ifdef WIN32 tzset(); -#endif /* adjust time zone */ if (tz_set || utc_default) @@ -1546,33 +1542,19 @@ time2iso(char *buf, size_t len, time_t time, bool utc) time_t gmt; time_t offset; char *ptr = buf; - char *local_tz = getenv("TZ"); /* set timezone to UTC if requested */ if (utc) { - pgut_setenv("TZ", "UTC"); -#ifdef WIN32 - tzset(); -#endif + ptm = gmtime(&time); + strftime(ptr, len, "%Y-%m-%d %H:%M:%S+00", ptm); + return; } ptm = gmtime(&time); gmt = mktime(ptm); ptm = localtime(&time); - if (utc) - { - /* return old timezone back if any */ - if (local_tz) - pgut_setenv("TZ", local_tz); - else - pgut_unsetenv("TZ"); -#ifdef WIN32 - tzset(); -#endif - } - /* adjust timezone offset */ offset = time - gmt + (ptm->tm_isdst ? 3600 : 0); From 4c001e86f46c75619c03e96c3d2dc2a3bb8f4695 Mon Sep 17 00:00:00 2001 From: "v.shepard" Date: Fri, 13 Jan 2023 01:03:03 +0100 Subject: [PATCH 453/525] Style and typo fixes in pg_probackup.c, pg_probackup.h --- src/pg_probackup.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/pg_probackup.c b/src/pg_probackup.c index ed48178b4..0e371ef42 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -1186,8 +1186,8 @@ opt_datname_exclude_list(ConfigOption *opt, const char *arg) void opt_datname_include_list(ConfigOption *opt, const char *arg) { - if (strcmp(arg, "tempate0") == 0 || - strcmp(arg, "tempate1") == 0) + if (strcmp(arg, "template0") == 0 || + strcmp(arg, "template1") == 0) elog(ERROR, "Databases 'template0' and 'template1' cannot be used for partial restore or validation"); opt_parser_add_to_parray_helper(&datname_include_list, arg); From 7249b10be0d3a92cd983088c71abe397d3974eee Mon Sep 17 00:00:00 2001 From: Vyacheslav Makarov Date: Fri, 19 Aug 2022 06:10:58 +0300 Subject: [PATCH 454/525] [PBCKP-247]: typo in the option_get_value function. --- src/utils/configuration.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/utils/configuration.c b/src/utils/configuration.c index 24c6febbf..61d153baa 100644 --- a/src/utils/configuration.c +++ b/src/utils/configuration.c @@ -689,7 +689,7 @@ option_get_value(ConfigOption *opt) if (opt->type == 'i') convert_from_base_unit(*((int32 *) opt->var), opt->flags & OPTION_UNIT, &value, &unit); - else if (opt->type == 'i') + else if (opt->type == 'I') convert_from_base_unit(*((int64 *) opt->var), opt->flags & OPTION_UNIT, &value, &unit); else if (opt->type == 'u') From be2b90bd3fc96a20dee4a5755311923ca3b6aa11 Mon Sep 17 00:00:00 2001 From: Sergey Fukanchik Date: Mon, 6 Mar 2023 13:34:53 +0300 Subject: [PATCH 455/525] PBCKP-553 fix a typo in merge preconditions check --- src/merge.c | 2 +- tests/merge_test.py | 41 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 42 insertions(+), 1 deletion(-) diff --git a/src/merge.c b/src/merge.c index 0017c9e9c..3b8321e97 100644 --- a/src/merge.c +++ b/src/merge.c @@ -337,7 +337,7 @@ do_merge(InstanceState *instanceState, time_t backup_id, bool no_validate, bool else { if ((full_backup->status == BACKUP_STATUS_MERGED || - full_backup->status == BACKUP_STATUS_MERGED) && + full_backup->status == BACKUP_STATUS_MERGING) && dest_backup->start_time != full_backup->merge_dest_backup) { elog(ERROR, "Full backup %s has unfinished merge with backup %s", diff --git a/tests/merge_test.py b/tests/merge_test.py index c789298fd..a9bc6fe68 100644 --- a/tests/merge_test.py +++ b/tests/merge_test.py @@ -2734,5 +2734,46 @@ def test_merge_pg_filenode_map(self): 'postgres', 'select 1') + def test_unfinished_merge(self): + """ Test when parent has unfinished merge with a different backup. """ + self._check_gdb_flag_or_skip_test() + cases = [('fail_merged', 'write_backup_filelist', ['MERGED', 'MERGING', 'OK']), + ('fail_merging', 'pgBackupWriteControl', ['MERGING', 'OK', 'OK'])] + + for name, terminate_at, states in cases: + node_name = 'node_' + name + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, name) + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, node_name), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, node_name, node) + self.set_archiving(backup_dir, node_name, node) + node.slow_start() + + full_id=self.backup_node(backup_dir, node_name, node, options=['--stream']) + + backup_id = self.backup_node(backup_dir, node_name, node, backup_type='delta') + second_backup_id = self.backup_node(backup_dir, node_name, node, backup_type='delta') + + gdb = self.merge_backup(backup_dir, node_name, backup_id, gdb=True) + gdb.set_breakpoint(terminate_at) + gdb.run_until_break() + + gdb.remove_all_breakpoints() + gdb._execute('signal SIGINT') + gdb.continue_execution_until_error() + + print(self.show_pb(backup_dir, node_name, as_json=False, as_text=True)) + + for expected, real in zip(states, self.show_pb(backup_dir, node_name), strict=True): + self.assertEqual(expected, real['status']) + + with self.assertRaisesRegex(ProbackupException, + f"Full backup {full_id} has unfinished merge with backup {backup_id}"): + self.merge_backup(backup_dir, node_name, second_backup_id, gdb=False) + # 1. Need new test with corrupted FULL backup # 2. different compression levels From 3c111262af9aa867eff56add03f2fc2366cadb91 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 3 Jan 2023 07:52:16 +0300 Subject: [PATCH 456/525] compatibility tests: skip if PGPROBACKUPBIN_OLD is not set --- tests/compatibility_test.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/tests/compatibility_test.py b/tests/compatibility_test.py index 591afb069..7ae8baf9f 100644 --- a/tests/compatibility_test.py +++ b/tests/compatibility_test.py @@ -14,12 +14,7 @@ def check_ssh_agent_path_exists(): return 'PGPROBACKUP_SSH_AGENT_PATH' in os.environ -class CompatibilityTest(ProbackupTest, unittest.TestCase): - - def setUp(self): - self.fname = self.id().split('.')[3] - - # @unittest.expectedFailure +class CrossCompatibilityTest(ProbackupTest, unittest.TestCase): @unittest.skipUnless(check_manual_tests_enabled(), 'skip manual test') @unittest.skipUnless(check_ssh_agent_path_exists(), 'skip no ssh agent path exist') # @unittest.skip("skip") @@ -86,6 +81,14 @@ def test_catchup_with_different_remote_major_pg(self): options=['-d', 'postgres', '-p', str(src_pg.port), '--stream', '--remote-path=' + pgprobackup_ssh_agent_path] ) + +class CompatibilityTest(ProbackupTest, unittest.TestCase): + + def setUp(self): + super().setUp() + if not self.probackup_old_path: + self.skipTest('PGPROBACKUPBIN_OLD is not set') + # @unittest.expectedFailure # @unittest.skip("skip") def test_backward_compatibility_page(self): From 9727a98976f706be9348ac638667821f226d89bb Mon Sep 17 00:00:00 2001 From: Daria Lepikhova Date: Fri, 10 Mar 2023 18:24:46 +0300 Subject: [PATCH 457/525] PBCKP-422: Fix is_enterprise checking for upstream and pgpro. Add is_pgpro checking --- tests/checkdb_test.py | 2 +- tests/helpers/ptrack_helpers.py | 34 ++++++++++++++++++++------------- 2 files changed, 22 insertions(+), 14 deletions(-) diff --git a/tests/checkdb_test.py b/tests/checkdb_test.py index 1e6daefdb..4d3a4cbbf 100644 --- a/tests/checkdb_test.py +++ b/tests/checkdb_test.py @@ -808,7 +808,7 @@ def test_checkdb_with_least_privileges(self): "backupdb", "GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool, bool) TO backup") - if ProbackupTest.enterprise: + if ProbackupTest.pgpro: node.safe_psql( 'backupdb', 'GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup; ' diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index c96007448..f8044a814 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -90,27 +90,34 @@ def dir_files(base_dir): return out_list +def is_pgpro(): + # pg_config --help + cmd = [os.environ['PG_CONFIG'], '--help'] + + result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True) + return b'postgrespro' in result.stdout + + def is_enterprise(): # pg_config --help cmd = [os.environ['PG_CONFIG'], '--help'] - p = subprocess.Popen( - cmd, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE - ) - return b'postgrespro.ru' in p.communicate()[0] + p = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True) + # PostgresPro std or ent + if b'postgrespro' in p.stdout: + cmd = [os.environ['PG_CONFIG'], '--pgpro-edition'] + p = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True) + + return b'enterprise' in p.stdout + else: # PostgreSQL + return False + - def is_nls_enabled(): cmd = [os.environ['PG_CONFIG'], '--configure'] - p = subprocess.Popen( - cmd, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE - ) - return b'enable-nls' in p.communicate()[0] + result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True) + return b'enable-nls' in result.stdout def base36enc(number): @@ -229,6 +236,7 @@ class ProbackupTest(object): # Class attributes enterprise = is_enterprise() enable_nls = is_nls_enabled() + pgpro = is_pgpro() def __init__(self, *args, **kwargs): super(ProbackupTest, self).__init__(*args, **kwargs) From 2e6d20dba059fe2faf006ad01289263debab11b8 Mon Sep 17 00:00:00 2001 From: "s.fukanchik" Date: Sun, 12 Mar 2023 19:59:17 +0300 Subject: [PATCH 458/525] PBCKP-191 serialize wal segment push finalization --- src/archive.c | 135 ++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 120 insertions(+), 15 deletions(-) diff --git a/src/archive.c b/src/archive.c index 734602cac..e06d01b68 100644 --- a/src/archive.c +++ b/src/archive.c @@ -13,14 +13,6 @@ #include "utils/thread.h" #include "instr_time.h" -static int push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_dir, - const char *archive_dir, bool overwrite, bool no_sync, - uint32 archive_timeout); -#ifdef HAVE_LIBZ -static int push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir, - const char *archive_dir, bool overwrite, bool no_sync, - int compress_level, uint32 archive_timeout); -#endif static void *push_files(void *arg); static void *get_files(void *arg); static bool get_wal_file(const char *filename, const char *from_path, const char *to_path, @@ -91,8 +83,19 @@ typedef struct WALSegno { char name[MAXFNAMELEN]; volatile pg_atomic_flag lock; + volatile pg_atomic_uint32 done; + struct WALSegno* prev; } WALSegno; +static int push_file_internal_uncompressed(WALSegno *wal_file_name, const char *pg_xlog_dir, + const char *archive_dir, bool overwrite, bool no_sync, + uint32 archive_timeout); +#ifdef HAVE_LIBZ +static int push_file_internal_gz(WALSegno *wal_file_name, const char *pg_xlog_dir, + const char *archive_dir, bool overwrite, bool no_sync, + int compress_level, uint32 archive_timeout); +#endif + static int push_file(WALSegno *xlogfile, const char *archive_status_dir, const char *pg_xlog_dir, const char *archive_dir, bool overwrite, bool no_sync, uint32 archive_timeout, @@ -337,16 +340,18 @@ push_file(WALSegno *xlogfile, const char *archive_status_dir, /* If compression is not required, then just copy it as is */ if (!is_compress) - rc = push_file_internal_uncompressed(xlogfile->name, pg_xlog_dir, + rc = push_file_internal_uncompressed(xlogfile, pg_xlog_dir, archive_dir, overwrite, no_sync, archive_timeout); #ifdef HAVE_LIBZ else - rc = push_file_internal_gz(xlogfile->name, pg_xlog_dir, archive_dir, + rc = push_file_internal_gz(xlogfile, pg_xlog_dir, archive_dir, overwrite, no_sync, compress_level, archive_timeout); #endif + pg_atomic_write_u32(&xlogfile->done, 1); + /* take '--no-ready-rename' flag into account */ if (!no_ready_rename && archive_status_dir != NULL) { @@ -381,13 +386,14 @@ push_file(WALSegno *xlogfile, const char *archive_status_dir, * has the same checksum */ int -push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_dir, +push_file_internal_uncompressed(WALSegno *wal_file, const char *pg_xlog_dir, const char *archive_dir, bool overwrite, bool no_sync, uint32 archive_timeout) { FILE *in = NULL; int out = -1; char *buf = pgut_malloc(OUT_BUF_SIZE); /* 1MB buffer */ + const char *wal_file_name = wal_file->name; char from_fullpath[MAXPGPATH]; char to_fullpath[MAXPGPATH]; /* partial handling */ @@ -409,7 +415,10 @@ push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_d /* Open source file for read */ in = fopen(from_fullpath, PG_BINARY_R); if (in == NULL) + { + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Cannot open source file \"%s\": %s", from_fullpath, strerror(errno)); + } /* disable stdio buffering for input file */ setvbuf(in, NULL, _IONBF, BUFSIZ); @@ -422,8 +431,11 @@ push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_d if (out < 0) { if (errno != EEXIST) + { + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Failed to open temp WAL file \"%s\": %s", to_fullpath_part, strerror(errno)); + } /* Already existing destination temp file is not an error condition */ } else @@ -453,15 +465,21 @@ push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_d if (out < 0) { if (errno != EEXIST) + { + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Failed to open temp WAL file \"%s\": %s", to_fullpath_part, strerror(errno)); + } } else /* Successfully created partial file */ break; } else + { + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Cannot stat temp WAL file \"%s\": %s", to_fullpath_part, strerror(errno)); + } } /* first round */ @@ -492,8 +510,11 @@ push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_d if (out < 0) { if (!partial_is_stale) + { + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Failed to open temp WAL file \"%s\" in %i seconds", to_fullpath_part, archive_timeout); + } /* Partial segment is considered stale, so reuse it */ elog(LOG, "Reusing stale temp WAL file \"%s\"", to_fullpath_part); @@ -501,7 +522,10 @@ push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_d out = fio_open(to_fullpath_part, O_RDWR | O_CREAT | O_EXCL | PG_BINARY, FIO_BACKUP_HOST); if (out < 0) + { + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Cannot open temp WAL file \"%s\": %s", to_fullpath_part, strerror(errno)); + } } part_opened: @@ -536,6 +560,7 @@ push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_d * so we must unlink partial file and exit with error. */ fio_unlink(to_fullpath_part, FIO_BACKUP_HOST); + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "WAL file already exists in archive with " "different checksum: \"%s\"", to_fullpath); } @@ -553,6 +578,7 @@ push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_d if (ferror(in)) { fio_unlink(to_fullpath_part, FIO_BACKUP_HOST); + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Cannot read source file \"%s\": %s", from_fullpath, strerror(errno)); } @@ -560,6 +586,7 @@ push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_d if (read_len > 0 && fio_write_async(out, buf, read_len) != read_len) { fio_unlink(to_fullpath_part, FIO_BACKUP_HOST); + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Cannot write to destination temp file \"%s\": %s", to_fullpath_part, strerror(errno)); } @@ -575,14 +602,29 @@ push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_d if (fio_check_error_fd(out, &errmsg)) { fio_unlink(to_fullpath_part, FIO_BACKUP_HOST); + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Cannot write to the remote file \"%s\": %s", to_fullpath_part, errmsg); } + if (wal_file->prev != NULL) + { + while (!pg_atomic_read_u32(&wal_file->prev->done)) + { + if (thread_interrupted || interrupted) + { + pg_atomic_write_u32(&wal_file->done, 1); + elog(ERROR, "terminated while waiting for prev file"); + } + usleep(250); + } + } + /* close temp file */ if (fio_close(out) != 0) { fio_unlink(to_fullpath_part, FIO_BACKUP_HOST); + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Cannot close temp WAL file \"%s\": %s", to_fullpath_part, strerror(errno)); } @@ -591,8 +633,11 @@ push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_d if (!no_sync) { if (fio_sync(to_fullpath_part, FIO_BACKUP_HOST) != 0) + { + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Failed to sync file \"%s\": %s", to_fullpath_part, strerror(errno)); + } } elog(LOG, "Rename \"%s\" to \"%s\"", to_fullpath_part, to_fullpath); @@ -603,6 +648,7 @@ push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_d if (fio_rename(to_fullpath_part, to_fullpath, FIO_BACKUP_HOST) < 0) { fio_unlink(to_fullpath_part, FIO_BACKUP_HOST); + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Cannot rename file \"%s\" to \"%s\": %s", to_fullpath_part, to_fullpath, strerror(errno)); } @@ -620,13 +666,14 @@ push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_d * has the same checksum */ int -push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir, +push_file_internal_gz(WALSegno *wal_file, const char *pg_xlog_dir, const char *archive_dir, bool overwrite, bool no_sync, int compress_level, uint32 archive_timeout) { FILE *in = NULL; gzFile out = NULL; char *buf = pgut_malloc(OUT_BUF_SIZE); + const char *wal_file_name = wal_file->name; char from_fullpath[MAXPGPATH]; char to_fullpath[MAXPGPATH]; char to_fullpath_gz[MAXPGPATH]; @@ -656,8 +703,11 @@ push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir, /* Open source file for read */ in = fopen(from_fullpath, PG_BINARY_R); if (in == NULL) + { + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Cannot open source WAL file \"%s\": %s", from_fullpath, strerror(errno)); + } /* disable stdio buffering for input file */ setvbuf(in, NULL, _IONBF, BUFSIZ); @@ -667,8 +717,11 @@ push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir, if (out == NULL) { if (errno != EEXIST) + { + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Cannot open temp WAL file \"%s\": %s", to_fullpath_gz_part, strerror(errno)); + } /* Already existing destination temp file is not an error condition */ } else @@ -698,16 +751,22 @@ push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir, if (out == NULL) { if (errno != EEXIST) + { + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Failed to open temp WAL file \"%s\": %s", to_fullpath_gz_part, strerror(errno)); + } } else /* Successfully created partial file */ break; } else + { + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Cannot stat temp WAL file \"%s\": %s", to_fullpath_gz_part, strerror(errno)); + } } /* first round */ @@ -738,8 +797,11 @@ push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir, if (out == NULL) { if (!partial_is_stale) + { + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Failed to open temp WAL file \"%s\" in %i seconds", to_fullpath_gz_part, archive_timeout); + } /* Partial segment is considered stale, so reuse it */ elog(LOG, "Reusing stale temp WAL file \"%s\"", to_fullpath_gz_part); @@ -747,8 +809,11 @@ push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir, out = fio_gzopen(to_fullpath_gz_part, PG_BINARY_W, compress_level, FIO_BACKUP_HOST); if (out == NULL) + { + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Cannot open temp WAL file \"%s\": %s", to_fullpath_gz_part, strerror(errno)); + } } part_opened: @@ -784,6 +849,7 @@ push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir, * so we must unlink partial file and exit with error. */ fio_unlink(to_fullpath_gz_part, FIO_BACKUP_HOST); + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "WAL file already exists in archive with " "different checksum: \"%s\"", to_fullpath_gz); } @@ -801,6 +867,7 @@ push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir, if (ferror(in)) { fio_unlink(to_fullpath_gz_part, FIO_BACKUP_HOST); + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Cannot read from source file \"%s\": %s", from_fullpath, strerror(errno)); } @@ -808,6 +875,7 @@ push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir, if (read_len > 0 && fio_gzwrite(out, buf, read_len) != read_len) { fio_unlink(to_fullpath_gz_part, FIO_BACKUP_HOST); + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Cannot write to compressed temp WAL file \"%s\": %s", to_fullpath_gz_part, get_gz_error(out, errno)); } @@ -823,14 +891,29 @@ push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir, if (fio_check_error_fd_gz(out, &errmsg)) { fio_unlink(to_fullpath_gz_part, FIO_BACKUP_HOST); + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Cannot write to the remote compressed file \"%s\": %s", to_fullpath_gz_part, errmsg); } + if (wal_file->prev != NULL) + { + while (!pg_atomic_read_u32(&wal_file->prev->done)) + { + if (thread_interrupted || interrupted) + { + pg_atomic_write_u32(&wal_file->done, 1); + elog(ERROR, "terminated while waiting for prev file"); + } + usleep(250); + } + } + /* close temp file, TODO: make it synchronous */ if (fio_gzclose(out) != 0) { fio_unlink(to_fullpath_gz_part, FIO_BACKUP_HOST); + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Cannot close compressed temp WAL file \"%s\": %s", to_fullpath_gz_part, strerror(errno)); } @@ -839,8 +922,11 @@ push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir, if (!no_sync) { if (fio_sync(to_fullpath_gz_part, FIO_BACKUP_HOST) != 0) + { + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Failed to sync file \"%s\": %s", to_fullpath_gz_part, strerror(errno)); + } } elog(LOG, "Rename \"%s\" to \"%s\"", @@ -852,6 +938,7 @@ push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir, if (fio_rename(to_fullpath_gz_part, to_fullpath_gz, FIO_BACKUP_HOST) < 0) { fio_unlink(to_fullpath_gz_part, FIO_BACKUP_HOST); + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Cannot rename file \"%s\" to \"%s\": %s", to_fullpath_gz_part, to_fullpath_gz, strerror(errno)); } @@ -905,6 +992,15 @@ get_gz_error(gzFile gzf, int errnum) // } //} +static int +walSegnoCompareName(const void *f1, const void *f2) +{ + WALSegno *w1 = *(WALSegno**)f1; + WALSegno *w2 = *(WALSegno**)f2; + + return strcmp(w1->name, w2->name); +} + /* Look for files with '.ready' suffix in archive_status directory * and pack such files into batch sized array. */ @@ -912,14 +1008,15 @@ parray * setup_push_filelist(const char *archive_status_dir, const char *first_file, int batch_size) { - int i; WALSegno *xlogfile = NULL; parray *status_files = NULL; parray *batch_files = parray_new(); + size_t i; /* guarantee that first filename is in batch list */ - xlogfile = palloc(sizeof(WALSegno)); + xlogfile = palloc0(sizeof(WALSegno)); pg_atomic_init_flag(&xlogfile->lock); + pg_atomic_init_u32(&xlogfile->done, 0); snprintf(xlogfile->name, MAXFNAMELEN, "%s", first_file); parray_append(batch_files, xlogfile); @@ -950,8 +1047,9 @@ setup_push_filelist(const char *archive_status_dir, const char *first_file, if (strcmp(filename, first_file) == 0) continue; - xlogfile = palloc(sizeof(WALSegno)); + xlogfile = palloc0(sizeof(WALSegno)); pg_atomic_init_flag(&xlogfile->lock); + pg_atomic_init_u32(&xlogfile->done, 0); snprintf(xlogfile->name, MAXFNAMELEN, "%s", filename); parray_append(batch_files, xlogfile); @@ -960,6 +1058,13 @@ setup_push_filelist(const char *archive_status_dir, const char *first_file, break; } + parray_qsort(batch_files, walSegnoCompareName); + for (i = 1; i < parray_num(batch_files); i++) + { + xlogfile = (WALSegno*) parray_get(batch_files, i); + xlogfile->prev = (WALSegno*) parray_get(batch_files, i-1); + } + /* cleanup */ parray_walk(status_files, pgFileFree); parray_free(status_files); From 91ebe718ba3653b1a1212429d68f4ae77e097611 Mon Sep 17 00:00:00 2001 From: Victor Spirin Date: Thu, 16 Mar 2023 12:47:03 +0300 Subject: [PATCH 459/525] [PBCKP-528] Fixed memory leaks and some minor bugs. --- src/catalog.c | 13 ++++++++++--- src/delete.c | 8 ++++++++ src/dir.c | 4 ++-- src/merge.c | 4 ++-- src/show.c | 4 +++- src/stream.c | 2 +- src/utils/configuration.c | 26 ++++++++++++++++++++++++-- src/utils/pgut.c | 5 ++++- 8 files changed, 54 insertions(+), 12 deletions(-) diff --git a/src/catalog.c b/src/catalog.c index aadc47bb1..1cf86cd24 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -891,7 +891,7 @@ catalog_get_instance_list(CatalogState *catalogState) instanceState = pgut_new(InstanceState); - strncpy(instanceState->instance_name, dent->d_name, MAXPGPATH); + strlcpy(instanceState->instance_name, dent->d_name, MAXPGPATH); join_path_components(instanceState->instance_backup_subdir_path, catalogState->backup_subdir_path, instanceState->instance_name); join_path_components(instanceState->instance_wal_subdir_path, @@ -2245,6 +2245,12 @@ do_set_backup(InstanceState *instanceState, time_t backup_id, if (set_backup_params->note) add_note(target_backup, set_backup_params->note); + /* Cleanup */ + if (backup_list) + { + parray_walk(backup_list, pgBackupFree); + parray_free(backup_list); + } } /* @@ -2310,6 +2316,7 @@ add_note(pgBackup *target_backup, char *note) { char *note_string; + char *p; /* unset note */ if (pg_strcasecmp(note, "none") == 0) @@ -2326,8 +2333,8 @@ add_note(pgBackup *target_backup, char *note) * we save only "aaa" * Example: tests.set_backup.SetBackupTest.test_add_note_newlines */ - note_string = pgut_malloc(MAX_NOTE_SIZE); - sscanf(note, "%[^\n]", note_string); + p = strchr(note, '\n'); + note_string = pgut_strndup(note, p ? (p-note) : MAX_NOTE_SIZE); target_backup->note = note_string; elog(INFO, "Adding note to backup %s: '%s'", diff --git a/src/delete.c b/src/delete.c index c8a8c22ec..f48ecc95f 100644 --- a/src/delete.c +++ b/src/delete.c @@ -158,7 +158,13 @@ void do_retention(InstanceState *instanceState, bool no_validate, bool no_sync) /* Retention is disabled but we still can cleanup wal */ elog(WARNING, "Retention policy is not set"); if (!delete_wal) + { + parray_walk(backup_list, pgBackupFree); + parray_free(backup_list); + parray_free(to_keep_list); + parray_free(to_purge_list); return; + } } else /* At least one retention policy is active */ @@ -1047,6 +1053,8 @@ do_delete_status(InstanceState *instanceState, InstanceConfig *instance_config, if (parray_num(backup_list) == 0) { elog(WARNING, "Instance '%s' has no backups", instanceState->instance_name); + parray_free(delete_list); + parray_free(backup_list); return; } diff --git a/src/dir.c b/src/dir.c index 4bae25de2..c6701929a 100644 --- a/src/dir.c +++ b/src/dir.c @@ -151,7 +151,7 @@ dir_create_dir(const char *dir, mode_t mode, bool strict) { char parent[MAXPGPATH]; - strncpy(parent, dir, MAXPGPATH); + strlcpy(parent, dir, MAXPGPATH); get_parent_directory(parent); /* Create parent first */ @@ -964,7 +964,7 @@ create_data_directories(parray *dest_files, const char *data_dir, const char *ba if (links) { /* get parent dir of rel_path */ - strncpy(parent_dir, dir->rel_path, MAXPGPATH); + strlcpy(parent_dir, dir->rel_path, MAXPGPATH); get_parent_directory(parent_dir); /* check if directory is actually link to tablespace */ diff --git a/src/merge.c b/src/merge.c index 3b8321e97..c2751cde3 100644 --- a/src/merge.c +++ b/src/merge.c @@ -887,7 +887,7 @@ merge_chain(InstanceState *instanceState, pfree(threads); } - if (result_filelist && parray_num(result_filelist) > 0) + if (result_filelist) { parray_walk(result_filelist, pgFileFree); parray_free(result_filelist); @@ -1067,7 +1067,7 @@ merge_files(void *arg) tmp_file->hdr_crc = file->hdr_crc; } else - tmp_file->uncompressed_size = tmp_file->uncompressed_size; + tmp_file->uncompressed_size = file->uncompressed_size; /* Copy header metadata from old map into a new one */ tmp_file->n_headers = file->n_headers; diff --git a/src/show.c b/src/show.c index 2e06582ed..cc22a2acb 100644 --- a/src/show.c +++ b/src/show.c @@ -452,7 +452,7 @@ print_backup_json_object(PQExpBuffer buf, pgBackup *backup) appendPQExpBuffer(buf, INT64_FORMAT, backup->uncompressed_bytes); } - if (backup->uncompressed_bytes >= 0) + if (backup->pgdata_bytes >= 0) { json_add_key(buf, "pgdata-bytes", json_level); appendPQExpBuffer(buf, INT64_FORMAT, backup->pgdata_bytes); @@ -514,6 +514,8 @@ show_backup(InstanceState *instanceState, time_t requested_backup_id) elog(INFO, "Requested backup \"%s\" is not found.", /* We do not need free base36enc's result, we exit anyway */ base36enc(requested_backup_id)); + parray_walk(backups, pgBackupFree); + parray_free(backups); /* This is not error */ return 0; } diff --git a/src/stream.c b/src/stream.c index f7bbeae5a..73bea6780 100644 --- a/src/stream.c +++ b/src/stream.c @@ -648,7 +648,7 @@ start_WAL_streaming(PGconn *backup_conn, char *stream_dst_path, ConnectionOption //TODO Add a comment about this calculation stream_stop_timeout = stream_stop_timeout + stream_stop_timeout * 0.1; - strncpy(stream_thread_arg.basedir, stream_dst_path, sizeof(stream_thread_arg.basedir)); + strlcpy(stream_thread_arg.basedir, stream_dst_path, sizeof(stream_thread_arg.basedir)); /* * Connect in replication mode to the server. diff --git a/src/utils/configuration.c b/src/utils/configuration.c index 61d153baa..08d024516 100644 --- a/src/utils/configuration.c +++ b/src/utils/configuration.c @@ -1177,7 +1177,8 @@ parse_time(const char *value, time_t *result, bool utc_default) char *local_tz = getenv("TZ"); /* tmp = replace( value, !isalnum, ' ' ) */ - tmp = pgut_malloc(strlen(value) + + 1); + tmp = pgut_malloc(strlen(value) + 1); + if(!tmp) return false; len = 0; fields_num = 1; @@ -1205,7 +1206,10 @@ parse_time(const char *value, time_t *result, bool utc_default) errno = 0; hr = strtol(value + 1, &cp, 10); if ((value + 1) == cp || errno == ERANGE) + { + pfree(tmp); return false; + } /* explicit delimiter? */ if (*cp == ':') @@ -1213,13 +1217,19 @@ parse_time(const char *value, time_t *result, bool utc_default) errno = 0; min = strtol(cp + 1, &cp, 10); if (errno == ERANGE) + { + pfree(tmp); return false; + } if (*cp == ':') { errno = 0; sec = strtol(cp + 1, &cp, 10); if (errno == ERANGE) + { + pfree(tmp); return false; + } } } /* otherwise, might have run things together... */ @@ -1234,11 +1244,20 @@ parse_time(const char *value, time_t *result, bool utc_default) /* Range-check the values; see notes in datatype/timestamp.h */ if (hr < 0 || hr > MAX_TZDISP_HOUR) + { + pfree(tmp); return false; + } if (min < 0 || min >= MINS_PER_HOUR) + { + pfree(tmp); return false; + } if (sec < 0 || sec >= SECS_PER_MINUTE) + { + pfree(tmp); return false; + } tz = (hr * MINS_PER_HOUR + min) * SECS_PER_MINUTE + sec; if (*value == '-') @@ -1251,7 +1270,10 @@ parse_time(const char *value, time_t *result, bool utc_default) } /* wrong format */ else if (!IsSpace(*value)) + { + pfree(tmp); return false; + } else value++; } @@ -1268,7 +1290,7 @@ parse_time(const char *value, time_t *result, bool utc_default) i = sscanf(tmp, "%04d %02d %02d %02d %02d %02d%1s", &tm.tm_year, &tm.tm_mon, &tm.tm_mday, &tm.tm_hour, &tm.tm_min, &tm.tm_sec, junk); - free(tmp); + pfree(tmp); if (i < 3 || i > 6) return false; diff --git a/src/utils/pgut.c b/src/utils/pgut.c index 6123c18d8..9559fa644 100644 --- a/src/utils/pgut.c +++ b/src/utils/pgut.c @@ -1215,13 +1215,16 @@ pgut_pgfnames(const char *path, bool strict) } } + filenames[numnames] = NULL; + if (errno) { elog(strict ? ERROR : WARNING, "could not read directory \"%s\": %m", path); + pgut_pgfnames_cleanup(filenames); + closedir(dir); return NULL; } - filenames[numnames] = NULL; if (closedir(dir)) { From c0b8eb7c20a65240d6be448a48f426dba3b17477 Mon Sep 17 00:00:00 2001 From: "s.fukanchik" Date: Mon, 27 Mar 2023 21:47:17 +0300 Subject: [PATCH 460/525] PBCKP-91 fix grammar in probackup's log messages --- src/archive.c | 4 ++-- src/backup.c | 33 ++++++++++++++++--------------- src/catalog.c | 8 ++++---- src/catchup.c | 2 +- src/checkdb.c | 4 ++-- src/dir.c | 22 ++++++++++----------- src/fetch.c | 2 +- src/init.c | 13 +++++++------ src/merge.c | 4 ++-- src/pg_probackup.c | 20 ++++++++++++------- src/ptrack.c | 2 +- src/restore.c | 41 ++++++++++++++++++++------------------- src/util.c | 4 ++-- src/utils/configuration.c | 12 +++++++++--- src/validate.c | 4 ++-- tests/auth_test.py | 4 ++-- tests/backup_test.py | 8 ++++---- tests/checkdb_test.py | 4 ++-- tests/compression_test.py | 2 +- tests/init_test.py | 3 ++- tests/option_test.py | 19 ++++++++---------- tests/restore_test.py | 4 ++-- tests/set_backup_test.py | 2 +- 23 files changed, 117 insertions(+), 104 deletions(-) diff --git a/src/archive.c b/src/archive.c index e06d01b68..7d753c8b3 100644 --- a/src/archive.c +++ b/src/archive.c @@ -614,7 +614,7 @@ push_file_internal_uncompressed(WALSegno *wal_file, const char *pg_xlog_dir, if (thread_interrupted || interrupted) { pg_atomic_write_u32(&wal_file->done, 1); - elog(ERROR, "terminated while waiting for prev file"); + elog(ERROR, "Terminated while waiting for prev file"); } usleep(250); } @@ -903,7 +903,7 @@ push_file_internal_gz(WALSegno *wal_file, const char *pg_xlog_dir, if (thread_interrupted || interrupted) { pg_atomic_write_u32(&wal_file->done, 1); - elog(ERROR, "terminated while waiting for prev file"); + elog(ERROR, "Terminated while waiting for prev file"); } usleep(250); } diff --git a/src/backup.c b/src/backup.c index 7e6e33c53..4c2454558 100644 --- a/src/backup.c +++ b/src/backup.c @@ -84,7 +84,7 @@ backup_stopbackup_callback(bool fatal, void *userdata) */ if (backup_in_progress) { - elog(WARNING, "backup in progress, stop backup"); + elog(WARNING, "A backup is in progress, stopping it."); /* don't care about stop_lsn in case of error */ pg_stop_backup_send(st->conn, st->server_version, current.from_replica, exclusive_backup, NULL); } @@ -711,8 +711,9 @@ do_backup(InstanceState *instanceState, pgSetBackupParams *set_backup_params, char pretty_bytes[20]; if (!instance_config.pgdata) - elog(ERROR, "required parameter not specified: PGDATA " - "(-D, --pgdata)"); + elog(ERROR, "No postgres data directory specified.\n" + "Please specify it either using environment variable PGDATA or\n" + "command line option --pgdata (-D)"); /* Initialize PGInfonode */ pgNodeInit(&nodeInfo); @@ -936,12 +937,12 @@ check_server_version(PGconn *conn, PGNodeInfo *nodeInfo) if (nodeInfo->server_version < 90500) elog(ERROR, - "server version is %s, must be %s or higher", + "Server version is %s, must be %s or higher", nodeInfo->server_version_str, "9.5"); if (current.from_replica && nodeInfo->server_version < 90600) elog(ERROR, - "server version is %s, must be %s or higher for backup from replica", + "Server version is %s, must be %s or higher for backup from replica", nodeInfo->server_version_str, "9.6"); if (nodeInfo->pgpro_support) @@ -1050,7 +1051,7 @@ confirm_block_size(PGconn *conn, const char *name, int blcksz) res = pgut_execute(conn, "SELECT pg_catalog.current_setting($1)", 1, &name); if (PQntuples(res) != 1 || PQnfields(res) != 1) - elog(ERROR, "cannot get %s: %s", name, PQerrorMessage(conn)); + elog(ERROR, "Cannot get %s: %s", name, PQerrorMessage(conn)); block_size = strtol(PQgetvalue(res, 0, 0), &endp, 10); if ((endp && *endp) || block_size != blcksz) @@ -1439,7 +1440,7 @@ wait_wal_lsn(const char *wal_segment_dir, XLogRecPtr target_lsn, bool is_start_l } if (!current.stream && is_start_lsn && try_count == 30) - elog(WARNING, "By default pg_probackup assume WAL delivery method to be ARCHIVE. " + elog(WARNING, "By default pg_probackup assumes that WAL delivery method to be ARCHIVE. " "If continuous archiving is not set up, use '--stream' option to make autonomous backup. " "Otherwise check that continuous archiving works correctly."); @@ -1775,9 +1776,9 @@ pg_stop_backup_consume(PGconn *conn, int server_version, { pgut_cancel(conn); #if PG_VERSION_NUM >= 150000 - elog(ERROR, "interrupted during waiting for pg_backup_stop"); + elog(ERROR, "Interrupted during waiting for pg_backup_stop"); #else - elog(ERROR, "interrupted during waiting for pg_stop_backup"); + elog(ERROR, "Interrupted during waiting for pg_stop_backup"); #endif } @@ -1823,7 +1824,7 @@ pg_stop_backup_consume(PGconn *conn, int server_version, case PGRES_TUPLES_OK: break; default: - elog(ERROR, "query failed: %s query was: %s", + elog(ERROR, "Query failed: %s query was: %s", PQerrorMessage(conn), query_text); } backup_in_progress = false; @@ -1834,13 +1835,13 @@ pg_stop_backup_consume(PGconn *conn, int server_version, /* get&check recovery_xid */ if (sscanf(PQgetvalue(query_result, 0, recovery_xid_colno), XID_FMT, &result->snapshot_xid) != 1) elog(ERROR, - "result of txid_snapshot_xmax() is invalid: %s", + "Result of txid_snapshot_xmax() is invalid: %s", PQgetvalue(query_result, 0, recovery_xid_colno)); /* get&check recovery_time */ if (!parse_time(PQgetvalue(query_result, 0, recovery_time_colno), &result->invocation_time, true)) elog(ERROR, - "result of current_timestamp is invalid: %s", + "Result of current_timestamp is invalid: %s", PQgetvalue(query_result, 0, recovery_time_colno)); /* get stop_backup_lsn */ @@ -1898,13 +1899,13 @@ pg_stop_backup_write_file_helper(const char *path, const char *filename, const c join_path_components(full_filename, path, filename); fp = fio_fopen(full_filename, PG_BINARY_W, FIO_BACKUP_HOST); if (fp == NULL) - elog(ERROR, "can't open %s file \"%s\": %s", + elog(ERROR, "Can't open %s file \"%s\": %s", error_msg_filename, full_filename, strerror(errno)); if (fio_fwrite(fp, data, len) != len || fio_fflush(fp) != 0 || fio_fclose(fp)) - elog(ERROR, "can't write %s file \"%s\": %s", + elog(ERROR, "Can't write %s file \"%s\": %s", error_msg_filename, full_filename, strerror(errno)); /* @@ -1943,7 +1944,7 @@ pg_stop_backup(InstanceState *instanceState, pgBackup *backup, PGconn *pg_startb /* Remove it ? */ if (!backup_in_progress) - elog(ERROR, "backup is not in progress"); + elog(ERROR, "Backup is not in progress"); pg_silent_client_messages(pg_startbackup_conn); @@ -2098,7 +2099,7 @@ backup_files(void *arg) /* check for interrupt */ if (interrupted || thread_interrupted) - elog(ERROR, "interrupted during backup"); + elog(ERROR, "Interrupted during backup"); elog(progress ? INFO : LOG, "Progress: (%d/%d). Process file \"%s\"", i + 1, n_backup_files_list, file->rel_path); diff --git a/src/catalog.c b/src/catalog.c index 1cf86cd24..b29090789 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -1055,7 +1055,7 @@ get_backup_filelist(pgBackup *backup, bool strict) fp = fio_open_stream(backup_filelist_path, FIO_BACKUP_HOST); if (fp == NULL) - elog(ERROR, "cannot open \"%s\": %s", backup_filelist_path, strerror(errno)); + elog(ERROR, "Cannot open \"%s\": %s", backup_filelist_path, strerror(errno)); /* enable stdio buffering for local file */ if (!fio_is_remote(FIO_BACKUP_HOST)) @@ -2841,7 +2841,7 @@ parse_backup_mode(const char *value) return BACKUP_MODE_DIFF_DELTA; /* Backup mode is invalid, so leave with an error */ - elog(ERROR, "invalid backup-mode \"%s\"", value); + elog(ERROR, "Invalid backup-mode \"%s\"", value); return BACKUP_MODE_INVALID; } @@ -2876,7 +2876,7 @@ parse_compress_alg(const char *arg) len = strlen(arg); if (len == 0) - elog(ERROR, "compress algorithm is empty"); + elog(ERROR, "Compress algorithm is empty"); if (pg_strncasecmp("zlib", arg, len) == 0) return ZLIB_COMPRESS; @@ -2885,7 +2885,7 @@ parse_compress_alg(const char *arg) else if (pg_strncasecmp("none", arg, len) == 0) return NONE_COMPRESS; else - elog(ERROR, "invalid compress algorithm value \"%s\"", arg); + elog(ERROR, "Invalid compress algorithm value \"%s\"", arg); return NOT_DEFINED_COMPRESS; } diff --git a/src/catchup.c b/src/catchup.c index 79e3361a8..427542dda 100644 --- a/src/catchup.c +++ b/src/catchup.c @@ -184,7 +184,7 @@ catchup_preflight_checks(PGNodeInfo *source_node_info, PGconn *source_conn, if (source_node_info->ptrack_version_num == 0) elog(ERROR, "This PostgreSQL instance does not support ptrack"); else if (source_node_info->ptrack_version_num < 200) - elog(ERROR, "ptrack extension is too old.\n" + elog(ERROR, "Ptrack extension is too old.\n" "Upgrade ptrack to version >= 2"); else if (!source_node_info->is_ptrack_enabled) elog(ERROR, "Ptrack is disabled"); diff --git a/src/checkdb.c b/src/checkdb.c index 1133a7b5d..2a7d4e9eb 100644 --- a/src/checkdb.c +++ b/src/checkdb.c @@ -145,7 +145,7 @@ check_files(void *arg) /* check for interrupt */ if (interrupted || thread_interrupted) - elog(ERROR, "interrupted during checkdb"); + elog(ERROR, "Interrupted during checkdb"); /* No need to check directories */ if (S_ISDIR(file->mode)) @@ -750,7 +750,7 @@ do_checkdb(bool need_amcheck, if (!skip_block_validation) { if (!pgdata) - elog(ERROR, "required parameter not specified: PGDATA " + elog(ERROR, "Required parameter not specified: PGDATA " "(-D, --pgdata)"); /* get node info */ diff --git a/src/dir.c b/src/dir.c index c6701929a..a16e0f396 100644 --- a/src/dir.c +++ b/src/dir.c @@ -182,7 +182,7 @@ pgFileNew(const char *path, const char *rel_path, bool follow_symlink, /* file not found is not an error case */ if (errno == ENOENT) return NULL; - elog(ERROR, "cannot stat file \"%s\": %s", path, + elog(ERROR, "Cannot stat file \"%s\": %s", path, strerror(errno)); } @@ -787,14 +787,14 @@ opt_path_map(ConfigOption *opt, const char *arg, TablespaceList *list, for (arg_ptr = arg; *arg_ptr; arg_ptr++) { if (dst_ptr - dst >= MAXPGPATH) - elog(ERROR, "directory name too long"); + elog(ERROR, "Directory name too long"); if (*arg_ptr == '\\' && *(arg_ptr + 1) == '=') ; /* skip backslash escaping = */ else if (*arg_ptr == '=' && (arg_ptr == arg || *(arg_ptr - 1) != '\\')) { if (*cell->new_dir) - elog(ERROR, "multiple \"=\" signs in %s mapping\n", type); + elog(ERROR, "Multiple \"=\" signs in %s mapping\n", type); else dst = dst_ptr = cell->new_dir; } @@ -803,7 +803,7 @@ opt_path_map(ConfigOption *opt, const char *arg, TablespaceList *list, } if (!*cell->old_dir || !*cell->new_dir) - elog(ERROR, "invalid %s mapping format \"%s\", " + elog(ERROR, "Invalid %s mapping format \"%s\", " "must be \"OLDDIR=NEWDIR\"", type, arg); canonicalize_path(cell->old_dir); canonicalize_path(cell->new_dir); @@ -815,11 +815,11 @@ opt_path_map(ConfigOption *opt, const char *arg, TablespaceList *list, * consistent with the new_dir check. */ if (!is_absolute_path(cell->old_dir)) - elog(ERROR, "old directory is not an absolute path in %s mapping: %s\n", + elog(ERROR, "Old directory is not an absolute path in %s mapping: %s\n", type, cell->old_dir); if (!is_absolute_path(cell->new_dir)) - elog(ERROR, "new directory is not an absolute path in %s mapping: %s\n", + elog(ERROR, "New directory is not an absolute path in %s mapping: %s\n", type, cell->new_dir); if (list->tail) @@ -1046,7 +1046,7 @@ read_tablespace_map(parray *links, const char *backup_dir) int i = 0; if (sscanf(buf, "%s %n", link_name, &n) != 1) - elog(ERROR, "invalid format found in \"%s\"", map_path); + elog(ERROR, "Invalid format found in \"%s\"", map_path); path = buf + n; @@ -1438,7 +1438,7 @@ get_control_value_str(const char *str, const char *name, { /* verify if value_str not exceeds value_str_size limits */ if (value_str - value_str_start >= value_str_size - 1) { - elog(ERROR, "field \"%s\" is out of range in the line %s of the file %s", + elog(ERROR, "Field \"%s\" is out of range in the line %s of the file %s", name, str, DATABASE_FILE_LIST); } *value_str = *buf; @@ -1463,7 +1463,7 @@ get_control_value_str(const char *str, const char *name, /* Did not find target field */ if (is_mandatory) - elog(ERROR, "field \"%s\" is not found in the line %s of the file %s", + elog(ERROR, "Field \"%s\" is not found in the line %s of the file %s", name, str, DATABASE_FILE_LIST); return false; } @@ -1490,7 +1490,7 @@ dir_is_empty(const char *path, fio_location location) /* Directory in path doesn't exist */ if (errno == ENOENT) return true; - elog(ERROR, "cannot open directory \"%s\": %s", path, strerror(errno)); + elog(ERROR, "Cannot open directory \"%s\": %s", path, strerror(errno)); } errno = 0; @@ -1506,7 +1506,7 @@ dir_is_empty(const char *path, fio_location location) return false; } if (errno) - elog(ERROR, "cannot read directory \"%s\": %s", path, strerror(errno)); + elog(ERROR, "Cannot read directory \"%s\": %s", path, strerror(errno)); fio_closedir(dir); diff --git a/src/fetch.c b/src/fetch.c index bef30dac6..5401d815e 100644 --- a/src/fetch.c +++ b/src/fetch.c @@ -92,7 +92,7 @@ fetchFile(PGconn *conn, const char *filename, size_t *filesize) /* sanity check the result set */ if (PQntuples(res) != 1 || PQgetisnull(res, 0, 0)) - elog(ERROR, "unexpected result set while fetching remote file \"%s\"", + elog(ERROR, "Unexpected result set while fetching remote file \"%s\"", filename); /* Read result to local variables */ diff --git a/src/init.c b/src/init.c index 8773016b5..837e2bad0 100644 --- a/src/init.c +++ b/src/init.c @@ -24,11 +24,11 @@ do_init(CatalogState *catalogState) results = pg_check_dir(catalogState->catalog_path); if (results == 4) /* exists and not empty*/ - elog(ERROR, "backup catalog already exist and it's not empty"); + elog(ERROR, "The backup catalog already exists and is not empty"); else if (results == -1) /*trouble accessing directory*/ { int errno_tmp = errno; - elog(ERROR, "cannot open backup catalog directory \"%s\": %s", + elog(ERROR, "Cannot open backup catalog directory \"%s\": %s", catalogState->catalog_path, strerror(errno_tmp)); } @@ -41,7 +41,7 @@ do_init(CatalogState *catalogState) /* create backup catalog wal directory */ dir_create_dir(catalogState->wal_subdir_path, DIR_PERMISSION, false); - elog(INFO, "Backup catalog '%s' successfully inited", catalogState->catalog_path); + elog(INFO, "Backup catalog '%s' successfully initialized", catalogState->catalog_path); return 0; } @@ -53,8 +53,9 @@ do_add_instance(InstanceState *instanceState, InstanceConfig *instance) /* PGDATA is always required */ if (instance->pgdata == NULL) - elog(ERROR, "Required parameter not specified: PGDATA " - "(-D, --pgdata)"); + elog(ERROR, "No postgres data directory specified.\n" + "Please specify it either using environment variable PGDATA or\n" + "command line option --pgdata (-D)"); /* Read system_identifier from PGDATA */ instance->system_identifier = get_system_identifier(instance->pgdata, FIO_DB_HOST, false); @@ -121,6 +122,6 @@ do_add_instance(InstanceState *instanceState, InstanceConfig *instance) /* pgdata was set through command line */ do_set_config(instanceState, true); - elog(INFO, "Instance '%s' successfully inited", instanceState->instance_name); + elog(INFO, "Instance '%s' successfully initialized", instanceState->instance_name); return 0; } diff --git a/src/merge.c b/src/merge.c index c2751cde3..e8f926795 100644 --- a/src/merge.c +++ b/src/merge.c @@ -79,10 +79,10 @@ do_merge(InstanceState *instanceState, time_t backup_id, bool no_validate, bool int i; if (backup_id == INVALID_BACKUP_ID) - elog(ERROR, "required parameter is not specified: --backup-id"); + elog(ERROR, "Required parameter is not specified: --backup-id"); if (instanceState == NULL) - elog(ERROR, "required parameter is not specified: --instance"); + elog(ERROR, "Required parameter is not specified: --instance"); elog(INFO, "Merge started"); diff --git a/src/pg_probackup.c b/src/pg_probackup.c index 0e371ef42..505dff89b 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -491,7 +491,10 @@ main(int argc, char *argv[]) backup_subcmd != HELP_CMD && backup_subcmd != VERSION_CMD && backup_subcmd != CATCHUP_CMD) - elog(ERROR, "required parameter not specified: BACKUP_PATH (-B, --backup-path)"); + elog(ERROR, + "No backup catalog path specified.\n" + "Please specify it either using environment variable BACKUP_PATH or\n" + "command line option --backup-path (-B)"); /* ===== catalogState (END) ======*/ @@ -505,7 +508,7 @@ main(int argc, char *argv[]) { if (backup_subcmd != INIT_CMD && backup_subcmd != SHOW_CMD && backup_subcmd != VALIDATE_CMD && backup_subcmd != CHECKDB_CMD && backup_subcmd != CATCHUP_CMD) - elog(ERROR, "required parameter not specified: --instance"); + elog(ERROR, "Required parameter not specified: --instance"); } else { @@ -618,7 +621,7 @@ main(int argc, char *argv[]) backup_path != NULL && instance_name == NULL && instance_config.pgdata == NULL) - elog(ERROR, "required parameter not specified: --instance"); + elog(ERROR, "Required parameter not specified: --instance"); /* Check checkdb command options consistency */ if (backup_subcmd == CHECKDB_CMD && @@ -831,14 +834,16 @@ main(int argc, char *argv[]) if (catchup_destination_pgdata == NULL) elog(ERROR, "You must specify \"--destination-pgdata\" option with the \"%s\" command", get_subcmd_name(backup_subcmd)); if (current.backup_mode == BACKUP_MODE_INVALID) - elog(ERROR, "Required parameter not specified: BACKUP_MODE (-b, --backup-mode)"); + elog(ERROR, "No backup mode specified.\n" + "Please specify it either using environment variable BACKUP_MODE or\n" + "command line option --backup-mode (-b)"); if (current.backup_mode != BACKUP_MODE_FULL && current.backup_mode != BACKUP_MODE_DIFF_PTRACK && current.backup_mode != BACKUP_MODE_DIFF_DELTA) elog(ERROR, "Only \"FULL\", \"PTRACK\" and \"DELTA\" modes are supported with the \"%s\" command", get_subcmd_name(backup_subcmd)); if (!stream_wal) elog(INFO, "--stream is required, forcing stream mode"); current.stream = stream_wal = true; if (instance_config.external_dir_str) - elog(ERROR, "external directories not supported fom \"%s\" command", get_subcmd_name(backup_subcmd)); + elog(ERROR, "External directories not supported fom \"%s\" command", get_subcmd_name(backup_subcmd)); // TODO check instance_config.conn_opt } @@ -985,8 +990,9 @@ main(int argc, char *argv[]) /* sanity */ if (current.backup_mode == BACKUP_MODE_INVALID) - elog(ERROR, "required parameter not specified: BACKUP_MODE " - "(-b, --backup-mode)"); + elog(ERROR, "No backup mode specified.\n" + "Please specify it either using environment variable BACKUP_MODE or\n" + "command line option --backup-mode (-b)"); return do_backup(instanceState, set_backup_params, no_validate, no_sync, backup_logs, start_time); diff --git a/src/ptrack.c b/src/ptrack.c index ebcba1dd4..d27629e45 100644 --- a/src/ptrack.c +++ b/src/ptrack.c @@ -214,7 +214,7 @@ pg_ptrack_get_pagemapset(PGconn *backup_conn, const char *ptrack_schema, pfree(params[0]); if (PQnfields(res) != 2) - elog(ERROR, "cannot get ptrack pagemapset"); + elog(ERROR, "Cannot get ptrack pagemapset"); /* sanity ? */ diff --git a/src/restore.c b/src/restore.c index 6c0e1881f..bb38e8d7e 100644 --- a/src/restore.c +++ b/src/restore.c @@ -131,13 +131,14 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg XLogRecPtr shift_lsn = InvalidXLogRecPtr; if (instanceState == NULL) - elog(ERROR, "required parameter not specified: --instance"); + elog(ERROR, "Required parameter not specified: --instance"); if (params->is_restore) { if (instance_config.pgdata == NULL) - elog(ERROR, - "required parameter not specified: PGDATA (-D, --pgdata)"); + elog(ERROR, "No postgres data directory specified.\n" + "Please specify it either using environment variable PGDATA or\n" + "command line option --pgdata (-D)"); /* Check if restore destination empty */ if (!dir_is_empty(instance_config.pgdata, FIO_DB_HOST)) @@ -290,7 +291,7 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg if (!satisfy_timeline(timelines, current_backup->tli, current_backup->stop_lsn)) { if (target_backup_id != INVALID_BACKUP_ID) - elog(ERROR, "target backup %s does not satisfy target timeline", + elog(ERROR, "Target backup %s does not satisfy target timeline", base36enc(target_backup_id)); else /* Try to find another backup that satisfies target timeline */ @@ -776,7 +777,7 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain, use_bitmap = false; if (params->incremental_mode != INCR_NONE) - elog(ERROR, "incremental restore is not possible for backups older than 2.3.0 version"); + elog(ERROR, "Incremental restore is not possible for backups older than 2.3.0 version"); } /* There is no point in bitmap restore, when restoring a single FULL backup, @@ -1479,7 +1480,7 @@ update_recovery_options_before_v12(InstanceState *instanceState, pgBackup *backu fp = fio_fopen(path, "w", FIO_DB_HOST); if (fp == NULL) - elog(ERROR, "cannot open file \"%s\": %s", path, + elog(ERROR, "Cannot open file \"%s\": %s", path, strerror(errno)); if (fio_chmod(path, FILE_PERMISSION, FIO_DB_HOST) == -1) @@ -1499,7 +1500,7 @@ update_recovery_options_before_v12(InstanceState *instanceState, pgBackup *backu if (fio_fflush(fp) != 0 || fio_fclose(fp)) - elog(ERROR, "cannot write file \"%s\": %s", path, + elog(ERROR, "Cannot write file \"%s\": %s", path, strerror(errno)); } #endif @@ -1538,7 +1539,7 @@ update_recovery_options(InstanceState *instanceState, pgBackup *backup, { /* file not found is not an error case */ if (errno != ENOENT) - elog(ERROR, "cannot stat file \"%s\": %s", postgres_auto_path, + elog(ERROR, "Cannot stat file \"%s\": %s", postgres_auto_path, strerror(errno)); st.st_size = 0; } @@ -1548,13 +1549,13 @@ update_recovery_options(InstanceState *instanceState, pgBackup *backup, { fp = fio_open_stream(postgres_auto_path, FIO_DB_HOST); if (fp == NULL) - elog(ERROR, "cannot open \"%s\": %s", postgres_auto_path, strerror(errno)); + elog(ERROR, "Cannot open \"%s\": %s", postgres_auto_path, strerror(errno)); } sprintf(postgres_auto_path_tmp, "%s.tmp", postgres_auto_path); fp_tmp = fio_fopen(postgres_auto_path_tmp, "w", FIO_DB_HOST); if (fp_tmp == NULL) - elog(ERROR, "cannot open \"%s\": %s", postgres_auto_path_tmp, strerror(errno)); + elog(ERROR, "Cannot open \"%s\": %s", postgres_auto_path_tmp, strerror(errno)); while (fp && fgets(line, lengthof(line), fp)) { @@ -1612,7 +1613,7 @@ update_recovery_options(InstanceState *instanceState, pgBackup *backup, { fp = fio_fopen(postgres_auto_path, "a", FIO_DB_HOST); if (fp == NULL) - elog(ERROR, "cannot open file \"%s\": %s", postgres_auto_path, + elog(ERROR, "Cannot open file \"%s\": %s", postgres_auto_path, strerror(errno)); fio_fprintf(fp, "\n# recovery settings added by pg_probackup restore of backup %s at '%s'\n", @@ -1626,7 +1627,7 @@ update_recovery_options(InstanceState *instanceState, pgBackup *backup, if (fio_fflush(fp) != 0 || fio_fclose(fp)) - elog(ERROR, "cannot write file \"%s\": %s", postgres_auto_path, + elog(ERROR, "Cannot write file \"%s\": %s", postgres_auto_path, strerror(errno)); /* @@ -1646,12 +1647,12 @@ update_recovery_options(InstanceState *instanceState, pgBackup *backup, fp = fio_fopen(path, PG_BINARY_W, FIO_DB_HOST); if (fp == NULL) - elog(ERROR, "cannot open file \"%s\": %s", path, + elog(ERROR, "Cannot open file \"%s\": %s", path, strerror(errno)); if (fio_fflush(fp) != 0 || fio_fclose(fp)) - elog(ERROR, "cannot write file \"%s\": %s", path, + elog(ERROR, "Cannot write file \"%s\": %s", path, strerror(errno)); } @@ -1662,12 +1663,12 @@ update_recovery_options(InstanceState *instanceState, pgBackup *backup, fp = fio_fopen(path, PG_BINARY_W, FIO_DB_HOST); if (fp == NULL) - elog(ERROR, "cannot open file \"%s\": %s", path, + elog(ERROR, "Cannot open file \"%s\": %s", path, strerror(errno)); if (fio_fflush(fp) != 0 || fio_fclose(fp)) - elog(ERROR, "cannot write file \"%s\": %s", path, + elog(ERROR, "Cannot write file \"%s\": %s", path, strerror(errno)); } } @@ -1704,12 +1705,12 @@ read_timeline_history(const char *arclog_path, TimeLineID targetTLI, bool strict if (fd == NULL) { if (errno != ENOENT) - elog(ERROR, "could not open file \"%s\": %s", path, + elog(ERROR, "Could not open file \"%s\": %s", path, strerror(errno)); /* There is no history file for target timeline */ if (strict) - elog(ERROR, "recovery target timeline %u does not exist", + elog(ERROR, "Recovery target timeline %u does not exist", targetTLI); else return NULL; @@ -1743,12 +1744,12 @@ read_timeline_history(const char *arclog_path, TimeLineID targetTLI, bool strict { /* expect a numeric timeline ID as first field of line */ elog(ERROR, - "syntax error in history file: %s. Expected a numeric timeline ID.", + "Syntax error in history file: %s. Expected a numeric timeline ID.", fline); } if (nfields != 3) elog(ERROR, - "syntax error in history file: %s. Expected a transaction log switchpoint location.", + "Syntax error in history file: %s. Expected a transaction log switchpoint location.", fline); if (last_timeline && tli <= last_timeline->tli) diff --git a/src/util.c b/src/util.c index e371d2c6d..1407f03cc 100644 --- a/src/util.c +++ b/src/util.c @@ -74,7 +74,7 @@ checkControlFile(ControlFileData *ControlFile) if ((ControlFile->pg_control_version % 65536 == 0 || ControlFile->pg_control_version % 65536 > 10000) && ControlFile->pg_control_version / 65536 != 0) - elog(ERROR, "possible byte ordering mismatch\n" + elog(ERROR, "Possible byte ordering mismatch\n" "The byte ordering used to store the pg_control file might not match the one\n" "used by this program. In that case the results below would be incorrect, and\n" "the PostgreSQL installation would be incompatible with this data directory."); @@ -93,7 +93,7 @@ digestControlFile(ControlFileData *ControlFile, char *src, size_t size) #endif if (size != ControlFileSize) - elog(ERROR, "unexpected control file size %d, expected %d", + elog(ERROR, "Unexpected control file size %d, expected %d", (int) size, ControlFileSize); memcpy(ControlFile, src, sizeof(ControlFileData)); diff --git a/src/utils/configuration.c b/src/utils/configuration.c index 08d024516..921555350 100644 --- a/src/utils/configuration.c +++ b/src/utils/configuration.c @@ -521,11 +521,17 @@ config_get_opt(int argc, char **argv, ConfigOption cmd_options[], optstring = longopts_to_optstring(longopts, cmd_len + len); + opterr = 0; /* Assign named options */ while ((c = getopt_long(argc, argv, optstring, longopts, &optindex)) != -1) { ConfigOption *opt; + if (c == '?') + { + elog(ERROR, "Option '%s' requires an argument. Try \"%s --help\" for more information.", + argv[optind-1], PROGRAM_NAME); + } opt = option_find(c, cmd_options); if (opt == NULL) opt = option_find(c, options); @@ -1439,16 +1445,16 @@ parse_lsn(const char *value, XLogRecPtr *result) len1 = strspn(value, "0123456789abcdefABCDEF"); if (len1 < 1 || len1 > MAXPG_LSNCOMPONENT || value[len1] != '/') - elog(ERROR, "invalid LSN \"%s\"", value); + elog(ERROR, "Invalid LSN \"%s\"", value); len2 = strspn(value + len1 + 1, "0123456789abcdefABCDEF"); if (len2 < 1 || len2 > MAXPG_LSNCOMPONENT || value[len1 + 1 + len2] != '\0') - elog(ERROR, "invalid LSN \"%s\"", value); + elog(ERROR, "Invalid LSN \"%s\"", value); if (sscanf(value, "%X/%X", &xlogid, &xrecoff) == 2) *result = (XLogRecPtr) ((uint64) xlogid << 32) | xrecoff; else { - elog(ERROR, "invalid LSN \"%s\"", value); + elog(ERROR, "Invalid LSN \"%s\"", value); return false; } diff --git a/src/validate.c b/src/validate.c index 9372b082c..471351678 100644 --- a/src/validate.c +++ b/src/validate.c @@ -394,7 +394,7 @@ do_validate_all(CatalogState *catalogState, InstanceState *instanceState) /* open directory and list contents */ dir = opendir(catalogState->backup_subdir_path); if (dir == NULL) - elog(ERROR, "cannot open directory \"%s\": %s", catalogState->backup_subdir_path, strerror(errno)); + elog(ERROR, "Cannot open directory \"%s\": %s", catalogState->backup_subdir_path, strerror(errno)); errno = 0; while ((dent = readdir(dir))) @@ -412,7 +412,7 @@ do_validate_all(CatalogState *catalogState, InstanceState *instanceState) join_path_components(child, catalogState->backup_subdir_path, dent->d_name); if (lstat(child, &st) == -1) - elog(ERROR, "cannot stat file \"%s\": %s", child, strerror(errno)); + elog(ERROR, "Cannot stat file \"%s\": %s", child, strerror(errno)); if (!S_ISDIR(st.st_mode)) continue; diff --git a/tests/auth_test.py b/tests/auth_test.py index 52d7e1544..32cabc4a1 100644 --- a/tests/auth_test.py +++ b/tests/auth_test.py @@ -117,13 +117,13 @@ def test_backup_via_unprivileged_user(self): except ProbackupException as e: if self.get_version(node) < 150000: self.assertIn( - "ERROR: query failed: ERROR: permission denied " + "ERROR: Query failed: ERROR: permission denied " "for function pg_stop_backup", e.message, '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) else: self.assertIn( - "ERROR: query failed: ERROR: permission denied " + "ERROR: Query failed: ERROR: permission denied " "for function pg_backup_stop", e.message, '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) diff --git a/tests/backup_test.py b/tests/backup_test.py index 32a2cee50..9e911893e 100644 --- a/tests/backup_test.py +++ b/tests/backup_test.py @@ -2926,9 +2926,9 @@ def test_missing_wal_segment(self): gdb.output) self.assertIn( - 'WARNING: backup in progress, stop backup', + 'WARNING: A backup is in progress, stopping it', gdb.output) - + # TODO: check the same for PAGE backup # @unittest.skip("skip") @@ -3316,7 +3316,7 @@ def test_backup_atexit(self): log_content = f.read() #print(log_content) self.assertIn( - 'WARNING: backup in progress, stop backup', + 'WARNING: A backup is in progress, stopping it.', log_content) if self.get_version(node) < 150000: @@ -3327,7 +3327,7 @@ def test_backup_atexit(self): self.assertIn( 'FROM pg_catalog.pg_backup_stop', log_content) - + self.assertIn( 'setting its status to ERROR', log_content) diff --git a/tests/checkdb_test.py b/tests/checkdb_test.py index 4d3a4cbbf..eb46aea19 100644 --- a/tests/checkdb_test.py +++ b/tests/checkdb_test.py @@ -131,7 +131,7 @@ def test_checkdb_amcheck_only_sanity(self): repr(self.output), self.cmd)) except ProbackupException as e: self.assertIn( - "ERROR: required parameter not specified: --instance", + "ERROR: Required parameter not specified: --instance", e.message, "\n Unexpected Error Message: {0}\n CMD: {1}".format( repr(e.message), self.cmd)) @@ -397,7 +397,7 @@ def test_checkdb_block_validation_sanity(self): repr(self.output), self.cmd)) except ProbackupException as e: self.assertIn( - "ERROR: required parameter not specified: PGDATA (-D, --pgdata)", + "ERROR: Required parameter not specified: PGDATA (-D, --pgdata)", e.message, "\n Unexpected Error Message: {0}\n CMD: {1}".format( repr(e.message), self.cmd)) diff --git a/tests/compression_test.py b/tests/compression_test.py index e779f6472..55924b9d2 100644 --- a/tests/compression_test.py +++ b/tests/compression_test.py @@ -443,7 +443,7 @@ def test_compression_wrong_algorithm(self): except ProbackupException as e: self.assertEqual( e.message, - 'ERROR: invalid compress algorithm value "bla-blah"\n', + 'ERROR: Invalid compress algorithm value "bla-blah"\n', '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) diff --git a/tests/init_test.py b/tests/init_test.py index 94b076fef..4e000c78f 100644 --- a/tests/init_test.py +++ b/tests/init_test.py @@ -56,7 +56,8 @@ def test_success(self): repr(self.output), self.cmd)) except ProbackupException as e: self.assertIn( - "ERROR: Required parameter not specified: PGDATA (-D, --pgdata)", + "ERROR: No postgres data directory specified.\n" + "Please specify it either using environment variable PGDATA or\ncommand line option --pgdata (-D)", e.message, '\n Unexpected Error Message: {0}\n CMD: {1}'.format(e.message, self.cmd)) diff --git a/tests/option_test.py b/tests/option_test.py index af4b12b71..66cc13746 100644 --- a/tests/option_test.py +++ b/tests/option_test.py @@ -33,7 +33,9 @@ def test_without_backup_path_3(self): repr(self.output), self.cmd)) except ProbackupException as e: self.assertIn( - 'ERROR: required parameter not specified: BACKUP_PATH (-B, --backup-path)', + 'ERROR: No backup catalog path specified.\n' + \ + 'Please specify it either using environment variable BACKUP_PATH or\n' + \ + 'command line option --backup-path (-B)', e.message, '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) @@ -54,7 +56,7 @@ def test_options_4(self): repr(self.output), self.cmd)) except ProbackupException as e: self.assertIn( - 'ERROR: required parameter not specified: --instance', + 'ERROR: Required parameter not specified: --instance', e.message, '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) @@ -65,7 +67,7 @@ def test_options_4(self): repr(self.output), self.cmd)) except ProbackupException as e: self.assertIn( - 'ERROR: required parameter not specified: BACKUP_MODE (-b, --backup-mode)', + 'ERROR: No backup mode specified.\nPlease specify it either using environment variable BACKUP_MODE or\ncommand line option --backup-mode (-b)', e.message, '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) @@ -76,7 +78,7 @@ def test_options_4(self): repr(self.output), self.cmd)) except ProbackupException as e: self.assertIn( - 'ERROR: invalid backup-mode "bad"', + 'ERROR: Invalid backup-mode "bad"', e.message, '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) @@ -102,7 +104,7 @@ def test_options_4(self): repr(self.output), self.cmd)) except ProbackupException as e: self.assertIn( - "option requires an argument -- 'i'", + "Option '-i' requires an argument", e.message, '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) @@ -114,13 +116,8 @@ def test_options_5(self): base_dir=os.path.join(self.module_name, self.fname, 'node')) output = self.init_pb(backup_dir) - self.assertIn( - "INFO: Backup catalog", - output) + self.assertIn(f"INFO: Backup catalog '{backup_dir}' successfully initialized", output) - self.assertIn( - "successfully inited", - output) self.add_instance(backup_dir, 'node', node) node.slow_start() diff --git a/tests/restore_test.py b/tests/restore_test.py index da3ebffb4..67e99515c 100644 --- a/tests/restore_test.py +++ b/tests/restore_test.py @@ -2995,7 +2995,7 @@ def test_empty_and_mangled_database_map(self): self.output, self.cmd)) except ProbackupException as e: self.assertIn( - 'ERROR: field "dbOid" is not found in the line 42 of ' + 'ERROR: Field "dbOid" is not found in the line 42 of ' 'the file backup_content.control', e.message, '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) @@ -3011,7 +3011,7 @@ def test_empty_and_mangled_database_map(self): self.output, self.cmd)) except ProbackupException as e: self.assertIn( - 'ERROR: field "dbOid" is not found in the line 42 of ' + 'ERROR: Field "dbOid" is not found in the line 42 of ' 'the file backup_content.control', e.message, '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) diff --git a/tests/set_backup_test.py b/tests/set_backup_test.py index e789d174a..31334cfba 100644 --- a/tests/set_backup_test.py +++ b/tests/set_backup_test.py @@ -41,7 +41,7 @@ def test_set_backup_sanity(self): repr(self.output), self.cmd)) except ProbackupException as e: self.assertIn( - 'ERROR: required parameter not specified: --instance', + 'ERROR: Required parameter not specified: --instance', e.message, "\n Unexpected Error Message: {0}\n CMD: {1}".format( repr(e.message), self.cmd)) From a04f00aacc548ad1ff1ee457bc0e2b8707bba4b3 Mon Sep 17 00:00:00 2001 From: Alexey Savchkov Date: Tue, 28 Mar 2023 17:23:44 +0700 Subject: [PATCH 461/525] Up version --- src/pg_probackup.h | 2 +- tests/expected/option_version.out | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/pg_probackup.h b/src/pg_probackup.h index a5c17d9f8..bcea92804 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -352,7 +352,7 @@ typedef enum ShowFormat #define BYTES_INVALID (-1) /* file didn`t changed since previous backup, DELTA backup do not rely on it */ #define FILE_NOT_FOUND (-2) /* file disappeared during backup */ #define BLOCKNUM_INVALID (-1) -#define PROGRAM_VERSION "2.5.11" +#define PROGRAM_VERSION "2.5.12" /* update when remote agent API or behaviour changes */ #define AGENT_PROTOCOL_VERSION 20509 diff --git a/tests/expected/option_version.out b/tests/expected/option_version.out index e0d6924b9..0d50cb268 100644 --- a/tests/expected/option_version.out +++ b/tests/expected/option_version.out @@ -1 +1 @@ -pg_probackup 2.5.11 +pg_probackup 2.5.12 From 279c98140ffcc38425e31ad17abe1b980d3154c6 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Sat, 15 Apr 2023 14:47:38 +0300 Subject: [PATCH 462/525] PGPRO-552: use uint32 in some places --- src/backup.c | 4 ++-- src/utils/pgut.h | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/backup.c b/src/backup.c index 4c2454558..41f035a86 100644 --- a/src/backup.c +++ b/src/backup.c @@ -2357,7 +2357,7 @@ remove_excluded_files_criterion(void *value, void *exclude_args) { return file->remove_from_list; } -static uint32_t +static uint32 hash_rel_seg(pgFile* file) { uint32 hash = hash_mix32_2(file->relOid, file->segno); @@ -2387,7 +2387,7 @@ rewind_and_mark_cfs_datafiles(parray *files, const char *root, char *relative, s pgFile *prev_file; pgFile *tmp_file; char *cfs_tblspc_path; - uint32_t h; + uint32 h; /* hash table for cfm files */ #define HASHN 128 diff --git a/src/utils/pgut.h b/src/utils/pgut.h index 4fd659b82..1b7b7864c 100644 --- a/src/utils/pgut.h +++ b/src/utils/pgut.h @@ -115,7 +115,7 @@ extern int usleep(unsigned int usec); #define ARG_SIZE_HINT static #endif -static inline uint32_t hash_mix32_2(uint32_t a, uint32_t b) +static inline uint32 hash_mix32_2(uint32 a, uint32 b) { b ^= (a<<7)|(a>>25); a *= 0xdeadbeef; From b7551bd2bf5f0173225111ed946a7e221ea93647 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 18 Apr 2023 18:25:36 +0300 Subject: [PATCH 463/525] PBCKP-91: delete "\r" in windows command output. --- tests/helpers/ptrack_helpers.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index f8044a814..6b665097c 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -982,7 +982,8 @@ def run_pb(self, command, asynchronous=False, gdb=False, old_binary=False, retur else: return self.output except subprocess.CalledProcessError as e: - raise ProbackupException(e.output.decode('utf-8'), self.cmd) + raise ProbackupException(e.output.decode('utf-8').replace("\r",""), + self.cmd) def run_binary(self, command, asynchronous=False, env=None): From d6721662ec76257d9470b1d20d75b7bc6bb1501c Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 18 Apr 2023 17:24:34 +0300 Subject: [PATCH 464/525] Use _chsize_s on windows to grow file Looks like "seek after file end and write" doesn't work correctly in Windows. We have to grow file before. Met occasionally on GitHub actions run. Could not reproduce locally. --- src/utils/file.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/utils/file.c b/src/utils/file.c index c4ed9c721..e062a2133 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -2717,6 +2717,14 @@ fio_send_file_write(FILE* out, send_file_state* st, char *buf, size_t len) if (len == 0) return true; +#ifdef WIN32 + if (st->read_size > st->write_size && + _chsize_s(fileno(out), st->read_size) != 0) + { + elog(WARNING, "Could not change file size to %lld: %m", st->read_size); + return false; + } +#endif if (st->read_size > st->write_size && fseeko(out, st->read_size, SEEK_SET) != 0) { From 0690f8d10eef4926bbe236c88327be55eba242f2 Mon Sep 17 00:00:00 2001 From: Victor Spirin Date: Mon, 24 Apr 2023 18:22:28 +0300 Subject: [PATCH 465/525] [PBCKP-602] Added saving full size for non data files. --- src/data.c | 2 ++ tests/backup_test.py | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 35 insertions(+) diff --git a/src/data.c b/src/data.c index 490faf9b6..21c41e0b6 100644 --- a/src/data.c +++ b/src/data.c @@ -815,6 +815,8 @@ backup_non_data_file(pgFile *file, pgFile *prev_file, if (EQ_TRADITIONAL_CRC32(file->crc, prev_file->crc)) { file->write_size = BYTES_INVALID; + /* get full size from previous backup for unchanged file */ + file->uncompressed_size = prev_file->uncompressed_size; return; /* ...skip copying file. */ } } diff --git a/tests/backup_test.py b/tests/backup_test.py index 9e911893e..86a2124dc 100644 --- a/tests/backup_test.py +++ b/tests/backup_test.py @@ -3606,3 +3606,36 @@ def test_regress_issue_585(self): output = self.restore_node(backup_dir, 'node', node) self.assertNotRegex(output, r'WARNING: [^\n]* was stored as .* but looks like') + + def test_2_delta_backups(self): + """https://github.com/postgrespro/pg_probackup/issues/596""" + node = self.make_simple_node('node', + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + # self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # FULL + full_backup_id = self.backup_node(backup_dir, 'node', node, options=["--stream"]) + + # delta backup mode + delta_backup_id1 = self.backup_node( + backup_dir, 'node', node, backup_type="delta", options=["--stream"]) + + delta_backup_id2 = self.backup_node( + backup_dir, 'node', node, backup_type="delta", options=["--stream"]) + + # postgresql.conf and pg_hba.conf shouldn't be copied + conf_file = os.path.join(backup_dir, 'backups', 'node', delta_backup_id1, 'database', 'postgresql.conf') + self.assertFalse( + os.path.exists(conf_file), + "File should not exist: {0}".format(conf_file)) + conf_file = os.path.join(backup_dir, 'backups', 'node', delta_backup_id2, 'database', 'postgresql.conf') + print(conf_file) + self.assertFalse( + os.path.exists(conf_file), + "File should not exist: {0}".format(conf_file)) From 754f02228e84ac791b66b494e13d523e590490b7 Mon Sep 17 00:00:00 2001 From: Alexey Savchkov Date: Wed, 24 May 2023 15:13:01 +0300 Subject: [PATCH 466/525] PBCKP-624 Remove outdated installation instructions for Standard and Enterprise --- LICENSE | 2 +- README.md | 91 ++++++++++++++----------------------------------------- 2 files changed, 24 insertions(+), 69 deletions(-) diff --git a/LICENSE b/LICENSE index 0ba831507..66476e8a9 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2015-2020, Postgres Professional +Copyright (c) 2015-2023, Postgres Professional Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group diff --git a/README.md b/README.md index 7486a6ca6..b804eb1fb 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,7 @@ `pg_probackup` is a utility to manage backup and recovery of PostgreSQL database clusters. It is designed to perform periodic backups of the PostgreSQL instance that enable you to restore the server in case of a failure. The utility is compatible with: -* PostgreSQL 9.6, 10, 11, 12, 13, 14, 15; +* PostgreSQL 11, 12, 13, 14, 15; As compared to other backup solutions, `pg_probackup` offers the following benefits that can help you implement different backup strategies and deal with large amounts of data: * Incremental backup: page-level incremental backup allows you to save disk space, speed up backup and restore. With three different incremental modes, you can plan the backup strategy in accordance with your data flow. @@ -74,113 +74,68 @@ Installers are available in release **assets**. [Latests](https://github.com/pos #DEB Ubuntu|Debian Packages sudo sh -c 'echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup/deb/ $(lsb_release -cs) main-$(lsb_release -cs)" > /etc/apt/sources.list.d/pg_probackup.list' sudo wget -O - https://repo.postgrespro.ru/pg_probackup/keys/GPG-KEY-PG_PROBACKUP | sudo apt-key add - && sudo apt-get update -sudo apt-get install pg-probackup-{15,14,13,12,11,10} -sudo apt-get install pg-probackup-{15,14,13,12,11,10}-dbg +sudo apt-get install pg-probackup-{15,14,13,12,11} +sudo apt-get install pg-probackup-{15,14,13,12,11}-dbg #DEB-SRC Packages sudo sh -c 'echo "deb-src [arch=amd64] https://repo.postgrespro.ru/pg_probackup/deb/ $(lsb_release -cs) main-$(lsb_release -cs)" >>\ /etc/apt/sources.list.d/pg_probackup.list' && sudo apt-get update -sudo apt-get source pg-probackup-{15,14,13,12,11,10} +sudo apt-get source pg-probackup-{15,14,13,12,11} #DEB Astra Linix Orel sudo sh -c 'echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup/deb/ stretch main-stretch" > /etc/apt/sources.list.d/pg_probackup.list' sudo wget -O - https://repo.postgrespro.ru/pg_probackup/keys/GPG-KEY-PG_PROBACKUP | sudo apt-key add - && sudo apt-get update -sudo apt-get install pg-probackup-{15,14,13,12,11,10}{-dbg,} +sudo apt-get install pg-probackup-{15,14,13,12,11}{-dbg,} #RPM Centos Packages rpm -ivh https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-centos.noarch.rpm -yum install pg_probackup-{15,14,13,12,11,10} -yum install pg_probackup-{15,14,13,12,11,10}-debuginfo +yum install pg_probackup-{15,14,13,12,11} +yum install pg_probackup-{15,14,13,12,11}-debuginfo #RPM RHEL Packages rpm -ivh https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-rhel.noarch.rpm -yum install pg_probackup-{15,14,13,12,11,10} -yum install pg_probackup-{15,14,13,12,11,10}-debuginfo +yum install pg_probackup-{15,14,13,12,11} +yum install pg_probackup-{15,14,13,12,11}-debuginfo #RPM Oracle Linux Packages rpm -ivh https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-oraclelinux.noarch.rpm -yum install pg_probackup-{15,14,13,12,11,10} -yum install pg_probackup-{15,14,13,12,11,10}-debuginfo +yum install pg_probackup-{15,14,13,12,11} +yum install pg_probackup-{15,14,13,12,11}-debuginfo #SRPM Centos|RHEL|OracleLinux Packages -yumdownloader --source pg_probackup-{15,14,13,12,11,10} +yumdownloader --source pg_probackup-{15,14,13,12,11} #RPM SUSE|SLES Packages zypper install --allow-unsigned-rpm -y https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-suse.noarch.rpm -zypper --gpg-auto-import-keys install -y pg_probackup-{15,14,13,12,11,10} -zypper install pg_probackup-{15,14,13,12,11,10}-debuginfo +zypper --gpg-auto-import-keys install -y pg_probackup-{15,14,13,12,11} +zypper install pg_probackup-{15,14,13,12,11}-debuginfo #SRPM SUSE|SLES Packages -zypper si pg_probackup-{15,14,13,12,11,10} +zypper si pg_probackup-{15,14,13,12,11} #RPM ALT Linux 8 sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-p8 x86_64 vanilla" > /etc/apt/sources.list.d/pg_probackup.list' sudo apt-get update -sudo apt-get install pg_probackup-{15,14,13,12,11,10} -sudo apt-get install pg_probackup-{15,14,13,12,11,10}-debuginfo +sudo apt-get install pg_probackup-{15,14,13,12,11} +sudo apt-get install pg_probackup-{15,14,13,12,11}-debuginfo #RPM ALT Linux 9 sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-p9 x86_64 vanilla" > /etc/apt/sources.list.d/pg_probackup.list' sudo apt-get update -sudo apt-get install pg_probackup-{15,14,13,12,11,10} -sudo apt-get install pg_probackup-{15,14,13,12,11,10}-debuginfo +sudo apt-get install pg_probackup-{15,14,13,12,11} +sudo apt-get install pg_probackup-{15,14,13,12,11}-debuginfo #RPM ALT Linux 10 sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-p10 x86_64 vanilla" > /etc/apt/sources.list.d/pg_probackup.list' sudo apt-get update -sudo apt-get install pg_probackup-{15,14,13,12,11,10} -sudo apt-get install pg_probackup-{15,14,13,12,11,10}-debuginfo -``` - -#### pg_probackup for PostgresPro Standard and Enterprise -```shell -#DEB Ubuntu|Debian Packages -sudo sh -c 'echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup-forks/deb/ $(lsb_release -cs) main-$(lsb_release -cs)" > /etc/apt/sources.list.d/pg_probackup-forks.list' -sudo wget -O - https://repo.postgrespro.ru/pg_probackup-forks/keys/GPG-KEY-PG_PROBACKUP | sudo apt-key add - && sudo apt-get update -sudo apt-get install pg-probackup-{std,ent}-{14,13,12,11,10,9.6} -sudo apt-get install pg-probackup-{std,ent}-{14,13,12,11,10,9.6}-dbg - -#DEB Astra Linix Orel -sudo sh -c 'echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup-forks/deb/ stretch main-stretch" > /etc/apt/sources.list.d/pg_probackup.list' -sudo wget -O - https://repo.postgrespro.ru/pg_probackup-forks/keys/GPG-KEY-PG_PROBACKUP | sudo apt-key add - && sudo apt-get update -sudo apt-get install pg-probackup-{std,ent}-{12,11,10,9.6}{-dbg,} - - -#RPM Centos Packages -rpm -ivh https://repo.postgrespro.ru/pg_probackup-forks/keys/pg_probackup-repo-forks-centos.noarch.rpm -yum install pg_probackup-{std,ent}-{14,13,12,11,10,9.6} -yum install pg_probackup-{std,ent}-{14,13,12,11,10,9.6}-debuginfo - -#RPM RHEL Packages -rpm -ivh https://repo.postgrespro.ru/pg_probackup-forks/keys/pg_probackup-repo-forks-rhel.noarch.rpm -yum install pg_probackup-{std,ent}-{14,13,12,11,10,9.6} -yum install pg_probackup-{std,ent}-{14,13,12,11,10,9.6}-debuginfo - -#RPM Oracle Linux Packages -rpm -ivh https://repo.postgrespro.ru/pg_probackup-forks/keys/pg_probackup-repo-forks-oraclelinux.noarch.rpm -yum install pg_probackup-{std,ent}-{14,13,12,11,10,9.6} -yum install pg_probackup-{std,ent}-{14,13,12,11,10,9.6}-debuginfo - -#RPM ALT Linux 7 -sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup-forks/rpm/latest/altlinux-p7 x86_64 forks" > /etc/apt/sources.list.d/pg_probackup_forks.list' -sudo apt-get update -sudo apt-get install pg_probackup-{std,ent}-{14,13,12,11,10,9.6} -sudo apt-get install pg_probackup-{std,ent}-{14,13,12,11,10,9.6}-debuginfo - -#RPM ALT Linux 8 -sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup-forks/rpm/latest/altlinux-p8 x86_64 forks" > /etc/apt/sources.list.d/pg_probackup_forks.list' -sudo apt-get update -sudo apt-get install pg_probackup-{std,ent}-{14,13,12,11,10,9.6} -sudo apt-get install pg_probackup-{std,ent}-{14,13,12,11,10,9.6}-debuginfo - -#RPM ALT Linux 9 -sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup-forks/rpm/latest/altlinux-p9 x86_64 forks" > /etc/apt/sources.list.d/pg_probackup_forks.list' && sudo apt-get update -sudo apt-get install pg_probackup-{std,ent}-{14,13,12,11,10,9.6} -sudo apt-get install pg_probackup-{std,ent}-{14,13,12,11,10,9.6}-debuginfo +sudo apt-get install pg_probackup-{15,14,13,12,11} +sudo apt-get install pg_probackup-{15,14,13,12,11}-debuginfo ``` Once you have `pg_probackup` installed, complete [the setup](https://postgrespro.github.io/pg_probackup/#pbk-install-and-setup). +For users of Postgres Pro products, commercial editions of pg_probackup are available for installation from the corresponding Postgres Pro product repository. + ## Building from source ### Linux From c9fc20b898610207a23f29fc2ec535d49c265e18 Mon Sep 17 00:00:00 2001 From: Yuriy Sokolov Date: Thu, 25 May 2023 18:12:11 +0300 Subject: [PATCH 467/525] PBCKP-604: Allow partial incremental restore only with a flag --destroy-all-other-dbs --- src/help.c | 5 ++ src/pg_probackup.c | 3 + src/pg_probackup.h | 8 +- src/restore.c | 25 +++++- tests/expected/option_help.out | 1 + tests/expected/option_help_ru.out | 1 + tests/incr_restore_test.py | 136 +++++++++++++++++++++++++++++- 7 files changed, 170 insertions(+), 9 deletions(-) diff --git a/src/help.c b/src/help.c index 116a0711c..954ba6416 100644 --- a/src/help.c +++ b/src/help.c @@ -175,6 +175,7 @@ help_pg_probackup(void) printf(_(" [-X WALDIR | --waldir=WALDIR]\n")); printf(_(" [-I | --incremental-mode=none|checksum|lsn]\n")); printf(_(" [--db-include | --db-exclude]\n")); + printf(_(" [--destroy-all-other-dbs]\n")); printf(_(" [--remote-proto] [--remote-host]\n")); printf(_(" [--remote-port] [--remote-path] [--remote-user]\n")); printf(_(" [--ssh-options]\n")); @@ -450,6 +451,7 @@ help_restore(void) printf(_(" [-X WALDIR | --waldir=WALDIR]\n")); printf(_(" [-I | --incremental-mode=none|checksum|lsn]\n")); printf(_(" [--db-include dbname | --db-exclude dbname]\n")); + printf(_(" [--destroy-all-other-dbs]\n")); printf(_(" [--recovery-target-time=time|--recovery-target-xid=xid\n")); printf(_(" |--recovery-target-lsn=lsn [--recovery-target-inclusive=boolean]]\n")); printf(_(" [--recovery-target-timeline=timeline]\n")); @@ -497,6 +499,9 @@ help_restore(void) printf(_("\n Partial restore options:\n")); printf(_(" --db-include dbname restore only specified databases\n")); printf(_(" --db-exclude dbname do not restore specified databases\n")); + printf(_(" --destroy-all-other-dbs\n")); + printf(_(" allows to do partial restore that is prohibited by default,\n")); + printf(_(" because it might remove all other databases.\n")); printf(_("\n Recovery options:\n")); printf(_(" --recovery-target-time=time time stamp up to which recovery will proceed\n")); diff --git a/src/pg_probackup.c b/src/pg_probackup.c index 505dff89b..17beff55a 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -124,6 +124,7 @@ static parray *datname_include_list = NULL; static parray *exclude_absolute_paths_list = NULL; static parray *exclude_relative_paths_list = NULL; static char* gl_waldir_path = NULL; +static bool allow_partial_incremental = false; /* checkdb options */ bool need_amcheck = false; @@ -242,6 +243,7 @@ static ConfigOption cmd_options[] = { 's', 'S', "primary-slot-name",&replication_slot, SOURCE_CMD_STRICT }, { 'f', 'I', "incremental-mode", opt_incr_restore_mode, SOURCE_CMD_STRICT }, { 's', 'X', "waldir", &gl_waldir_path, SOURCE_CMD_STRICT }, + { 'b', 242, "destroy-all-other-dbs", &allow_partial_incremental, SOURCE_CMD_STRICT }, /* checkdb options */ { 'b', 195, "amcheck", &need_amcheck, SOURCE_CMD_STRICT }, { 'b', 196, "heapallindexed", &heapallindexed, SOURCE_CMD_STRICT }, @@ -764,6 +766,7 @@ main(int argc, char *argv[]) restore_params->partial_restore_type = NONE; restore_params->primary_conninfo = primary_conninfo; restore_params->incremental_mode = incremental_mode; + restore_params->allow_partial_incremental = allow_partial_incremental; /* handle partial restore parameters */ if (datname_exclude_list && datname_include_list) diff --git a/src/pg_probackup.h b/src/pg_probackup.h index bcea92804..5ee612e6f 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -179,6 +179,7 @@ typedef enum DestDirIncrCompatibility POSTMASTER_IS_RUNNING, SYSTEM_ID_MISMATCH, BACKUP_LABEL_EXISTS, + PARTIAL_INCREMENTAL_FORBIDDEN, DEST_IS_NOT_OK, DEST_OK } DestDirIncrCompatibility; @@ -585,7 +586,8 @@ typedef struct pgRestoreParams /* options for partial restore */ PartialRestoreType partial_restore_type; parray *partial_db_list; - + bool allow_partial_incremental; + char* waldir; } pgRestoreParams; @@ -903,7 +905,9 @@ extern parray *get_backup_filelist(pgBackup *backup, bool strict); extern parray *read_timeline_history(const char *arclog_path, TimeLineID targetTLI, bool strict); extern bool tliIsPartOfHistory(const parray *timelines, TimeLineID tli); extern DestDirIncrCompatibility check_incremental_compatibility(const char *pgdata, uint64 system_identifier, - IncrRestoreMode incremental_mode); + IncrRestoreMode incremental_mode, + parray *partial_db_list, + bool allow_partial_incremental); /* in remote.c */ extern void check_remote_agent_compatibility(int agent_version, diff --git a/src/restore.c b/src/restore.c index bb38e8d7e..5b1585024 100644 --- a/src/restore.c +++ b/src/restore.c @@ -150,6 +150,7 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg if (params->incremental_mode != INCR_NONE) { DestDirIncrCompatibility rc; + const char *message = NULL; bool ok_to_go = true; elog(INFO, "Running incremental restore into nonempty directory: \"%s\"", @@ -157,12 +158,15 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg rc = check_incremental_compatibility(instance_config.pgdata, instance_config.system_identifier, - params->incremental_mode); + params->incremental_mode, + params->partial_db_list, + params->allow_partial_incremental); if (rc == POSTMASTER_IS_RUNNING) { /* Even with force flag it is unwise to run * incremental restore over running instance */ + message = "Postmaster is running."; ok_to_go = false; } else if (rc == SYSTEM_ID_MISMATCH) @@ -174,7 +178,10 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg if (params->incremental_mode != INCR_NONE && params->force) cleanup_pgdata = true; else + { + message = "System ID mismatch."; ok_to_go = false; + } } else if (rc == BACKUP_LABEL_EXISTS) { @@ -187,7 +194,10 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg * to calculate switchpoint. */ if (params->incremental_mode == INCR_LSN) + { + message = "Backup label exists. Cannot use incremental restore in LSN mode."; ok_to_go = false; + } } else if (rc == DEST_IS_NOT_OK) { @@ -196,11 +206,16 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg * so we cannot be sure that postmaster is running or not. * It is better to just error out. */ + message = "We cannot be sure about the database state."; + ok_to_go = false; + } else if (rc == PARTIAL_INCREMENTAL_FORBIDDEN) + { + message = "Partial incremental restore into non-empty PGDATA is forbidden."; ok_to_go = false; } if (!ok_to_go) - elog(ERROR, "Incremental restore is not allowed"); + elog(ERROR, "Incremental restore is not allowed: %s", message); } else elog(ERROR, "Restore destination is not empty: \"%s\"", @@ -2142,7 +2157,9 @@ get_dbOid_exclude_list(pgBackup *backup, parray *datname_list, */ DestDirIncrCompatibility check_incremental_compatibility(const char *pgdata, uint64 system_identifier, - IncrRestoreMode incremental_mode) + IncrRestoreMode incremental_mode, + parray *partial_db_list, + bool allow_partial_incremental) { uint64 system_id_pgdata; bool system_id_match = false; @@ -2226,6 +2243,8 @@ check_incremental_compatibility(const char *pgdata, uint64 system_identifier, if (backup_label_exists) return BACKUP_LABEL_EXISTS; + if (partial_db_list && !allow_partial_incremental) + return PARTIAL_INCREMENTAL_FORBIDDEN; /* some other error condition */ if (!success) return DEST_IS_NOT_OK; diff --git a/tests/expected/option_help.out b/tests/expected/option_help.out index 5948d0503..49f79607f 100644 --- a/tests/expected/option_help.out +++ b/tests/expected/option_help.out @@ -92,6 +92,7 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database. [-X WALDIR | --waldir=WALDIR] [-I | --incremental-mode=none|checksum|lsn] [--db-include | --db-exclude] + [--destroy-all-other-dbs] [--remote-proto] [--remote-host] [--remote-port] [--remote-path] [--remote-user] [--ssh-options] diff --git a/tests/expected/option_help_ru.out b/tests/expected/option_help_ru.out index 358c49428..976932b9d 100644 --- a/tests/expected/option_help_ru.out +++ b/tests/expected/option_help_ru.out @@ -92,6 +92,7 @@ pg_probackup - утилита для управления резервным к [-X WALDIR | --waldir=WALDIR] [-I | --incremental-mode=none|checksum|lsn] [--db-include | --db-exclude] + [--destroy-all-other-dbs] [--remote-proto] [--remote-host] [--remote-port] [--remote-path] [--remote-user] [--ssh-options] diff --git a/tests/incr_restore_test.py b/tests/incr_restore_test.py index 613e4dd36..f17ee95d1 100644 --- a/tests/incr_restore_test.py +++ b/tests/incr_restore_test.py @@ -1962,7 +1962,9 @@ def test_incremental_partial_restore_exclude_checksum(self): node2, options=[ "--db-exclude=db1", "--db-exclude=db5", - "-I", "checksum"]) + "-I", "checksum", + "--destroy-all-other-dbs", + ]) pgdata2 = self.pgdata_content(node2.data_dir) @@ -2068,7 +2070,9 @@ def test_incremental_partial_restore_exclude_lsn(self): node2, options=[ "--db-exclude=db1", "--db-exclude=db5", - "-I", "lsn"]) + "-I", "lsn", + "--destroy-all-other-dbs", + ]) pgdata2 = self.pgdata_content(node2.data_dir) @@ -2188,7 +2192,8 @@ def test_incremental_partial_restore_exclude_tablespace_checksum(self): "--db-exclude=db1", "--db-exclude=db5", "-T", "{0}={1}".format( - node_tablespace, node2_tablespace)]) + node_tablespace, node2_tablespace), + "--destroy-all-other-dbs"]) # we should die here because exception is what we expect to happen self.assertEqual( 1, 0, @@ -2209,7 +2214,9 @@ def test_incremental_partial_restore_exclude_tablespace_checksum(self): "--db-exclude=db1", "--db-exclude=db5", "-T", "{0}={1}".format( - node_tablespace, node2_tablespace)]) + node_tablespace, node2_tablespace), + "--destroy-all-other-dbs", + ]) pgdata2 = self.pgdata_content(node2.data_dir) @@ -2241,6 +2248,127 @@ def test_incremental_partial_restore_exclude_tablespace_checksum(self): self.assertNotIn('PANIC', output) + def test_incremental_partial_restore_deny(self): + """ + Do now allow partial incremental restore into non-empty PGDATA + becase we can't limit WAL replay to a single database. + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + for i in range(1, 3): + node.safe_psql('postgres', f'CREATE database db{i}') + + # FULL backup + backup_id = self.backup_node(backup_dir, 'node', node) + pgdata = self.pgdata_content(node.data_dir) + + try: + self.restore_node(backup_dir, 'node', node, options=["--db-include=db1", '-I', 'LSN']) + self.fail("incremental partial restore is not allowed") + except ProbackupException as e: + self.assertIn("Incremental restore is not allowed: Postmaster is running.", e.message) + + node.safe_psql('db2', 'create table x (id int)') + node.safe_psql('db2', 'insert into x values (42)') + + node.stop() + + try: + self.restore_node(backup_dir, 'node', node, options=["--db-include=db1", '-I', 'LSN']) + self.fail("because incremental partial restore is not allowed") + except ProbackupException as e: + self.assertIn("Incremental restore is not allowed: Partial incremental restore into non-empty PGDATA is forbidden", e.message) + + node.slow_start() + value = node.execute('db2', 'select * from x')[0][0] + self.assertEqual(42, value) + + def test_deny_incremental_partial_restore_exclude_tablespace_checksum(self): + """ + Do now allow partial incremental restore into non-empty PGDATA + becase we can't limit WAL replay to a single database. + (case of tablespaces) + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + self.create_tblspace_in_node(node, 'somedata') + + node_tablespace = self.get_tblspace_path(node, 'somedata') + + tbl_oid = node.safe_psql( + 'postgres', + "SELECT oid " + "FROM pg_tablespace " + "WHERE spcname = 'somedata'").rstrip() + + for i in range(1, 10, 1): + node.safe_psql( + 'postgres', + 'CREATE database db{0} tablespace somedata'.format(i)) + + db_list_raw = node.safe_psql( + 'postgres', + 'SELECT to_json(a) ' + 'FROM (SELECT oid, datname FROM pg_database) a').rstrip() + + db_list_splitted = db_list_raw.splitlines() + + db_list = {} + for line in db_list_splitted: + line = json.loads(line) + db_list[line['datname']] = line['oid'] + + # FULL backup + backup_id = self.backup_node(backup_dir, 'node', node) + + # node2 + node2 = self.make_simple_node('node2') + node2.cleanup() + node2_tablespace = self.get_tblspace_path(node2, 'somedata') + + # in node2 restore full backup + self.restore_node( + backup_dir, 'node', + node2, options=[ + "-T", f"{node_tablespace}={node2_tablespace}"]) + + # partial incremental restore into node2 + try: + self.restore_node(backup_dir, 'node', node2, + options=["-I", "checksum", + "--db-exclude=db1", + "--db-exclude=db5", + "-T", f"{node_tablespace}={node2_tablespace}"]) + self.fail("remapped tablespace contain old data") + except ProbackupException as e: + pass + + try: + self.restore_node(backup_dir, 'node', node2, + options=[ + "-I", "checksum", "--force", + "--db-exclude=db1", "--db-exclude=db5", + "-T", f"{node_tablespace}={node2_tablespace}"]) + self.fail("incremental partial restore is not allowed") + except ProbackupException as e: + self.assertIn("Incremental restore is not allowed: Partial incremental restore into non-empty PGDATA is forbidden", e.message) + def test_incremental_pg_filenode_map(self): """ https://github.com/postgrespro/pg_probackup/issues/320 From db12a039f6c794d4d3e09ee65e8cb8a5e346658a Mon Sep 17 00:00:00 2001 From: Viktoria Shepard Date: Thu, 25 May 2023 18:12:41 +0300 Subject: [PATCH 468/525] PBCKP-604 doc add option --destroy-all-other-dbs --- doc/pgprobackup.xml | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/doc/pgprobackup.xml b/doc/pgprobackup.xml index 2cb10e379..297b124f0 100644 --- a/doc/pgprobackup.xml +++ b/doc/pgprobackup.xml @@ -5850,6 +5850,34 @@ pg_probackup catchup -b catchup_mode + + + Testing and Debugging Options + + This section describes options useful only in a test or development environment. + + + + + + + + By default, pg_probackup exits with an + error if an attempt is made to perform a partial incremental restore + since this destroys databases not included in the restore set. This + flag allows you to suppress the error and proceed with the partial + incremental restore (e.g., to keep a development database snapshot + up-to-date with a production one). This option can be used with the + command. + + + Never use this flag in a production cluster. + + + + + + From a29e378f34d12495f1b64da08f76fb7ad17aab89 Mon Sep 17 00:00:00 2001 From: Sergey Fukanchik Date: Mon, 18 Sep 2023 19:48:22 +0300 Subject: [PATCH 469/525] PBCKP-698 allow relative paths in dir_create_dir --- src/dir.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/dir.c b/src/dir.c index a16e0f396..7ba831c3b 100644 --- a/src/dir.c +++ b/src/dir.c @@ -155,7 +155,7 @@ dir_create_dir(const char *dir, mode_t mode, bool strict) get_parent_directory(parent); /* Create parent first */ - if (access(parent, F_OK) == -1) + if (strlen(parent) > 0 && access(parent, F_OK) == -1) dir_create_dir(parent, mode, false); /* Create directory */ From 9762426ce90c33ffb9fa3896228fe4e58f878aa9 Mon Sep 17 00:00:00 2001 From: Sergey Fukanchik Date: Mon, 18 Sep 2023 18:12:06 +0300 Subject: [PATCH 470/525] PBCKP-732 ignore PGDATA setting in catchup mode as we use --source-pgdata instead --- src/pg_probackup.c | 2 +- tests/catchup_test.py | 39 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 40 insertions(+), 1 deletion(-) diff --git a/src/pg_probackup.c b/src/pg_probackup.c index 17beff55a..30b4212b4 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -682,7 +682,7 @@ main(int argc, char *argv[]) if (instance_config.pgdata != NULL) canonicalize_path(instance_config.pgdata); if (instance_config.pgdata != NULL && - backup_subcmd != ARCHIVE_GET_CMD && + (backup_subcmd != ARCHIVE_GET_CMD && backup_subcmd != CATCHUP_CMD) && !is_absolute_path(instance_config.pgdata)) elog(ERROR, "-D, --pgdata must be an absolute path"); diff --git a/tests/catchup_test.py b/tests/catchup_test.py index 21bcd7973..cf8388dd2 100644 --- a/tests/catchup_test.py +++ b/tests/catchup_test.py @@ -1585,3 +1585,42 @@ def test_dry_run_catchup_delta(self): # Cleanup src_pg.stop() + + def test_pgdata_is_ignored(self): + """ In catchup we still allow PGDATA to be set either from command line + or from the env var. This test that PGDATA is actually ignored and + --source-pgadta is used instead + """ + node = self.make_simple_node('node', + set_replication = True + ) + node.slow_start() + + # do full catchup + dest = self.make_empty_node('dst') + self.catchup_node( + backup_mode = 'FULL', + source_pgdata = node.data_dir, + destination_node = dest, + options = ['-d', 'postgres', '-p', str(node.port), '--stream', '--pgdata=xxx'] + ) + + self.compare_pgdata( + self.pgdata_content(node.data_dir), + self.pgdata_content(dest.data_dir) + ) + + os.environ['PGDATA']='xxx' + + dest2 = self.make_empty_node('dst') + self.catchup_node( + backup_mode = 'FULL', + source_pgdata = node.data_dir, + destination_node = dest2, + options = ['-d', 'postgres', '-p', str(node.port), '--stream'] + ) + + self.compare_pgdata( + self.pgdata_content(node.data_dir), + self.pgdata_content(dest2.data_dir) + ) From eb5ccf91b88a92f86631a11a916bd425da86c717 Mon Sep 17 00:00:00 2001 From: Victor Spirin Date: Thu, 21 Sep 2023 17:28:47 +0300 Subject: [PATCH 471/525] PBCKP-751: Fixed for PG 16 build and removed some compilation warnings. --- po/LINGUAS | 1 + src/data.c | 8 ++++---- src/dir.c | 1 - src/show.c | 10 +++++----- src/stream.c | 8 ++++++-- src/validate.c | 4 +--- 6 files changed, 17 insertions(+), 15 deletions(-) create mode 100644 po/LINGUAS diff --git a/po/LINGUAS b/po/LINGUAS new file mode 100644 index 000000000..562ba4cf0 --- /dev/null +++ b/po/LINGUAS @@ -0,0 +1 @@ +ru diff --git a/src/data.c b/src/data.c index 21c41e0b6..a287218ea 100644 --- a/src/data.c +++ b/src/data.c @@ -142,7 +142,7 @@ page_may_be_compressed(Page page, CompressAlg alg, uint32 backup_version) phdr = (PageHeader) page; /* First check if page header is valid (it seems to be fast enough check) */ - if (!(PageGetPageSize(phdr) == BLCKSZ && + if (!(PageGetPageSize(page) == BLCKSZ && // PageGetPageLayoutVersion(phdr) == PG_PAGE_LAYOUT_VERSION && (phdr->pd_flags & ~PD_VALID_FLAG_BITS) == 0 && phdr->pd_lower >= SizeOfPageHeaderData && @@ -181,7 +181,7 @@ parse_page(Page page, XLogRecPtr *lsn) /* Get lsn from page header */ *lsn = PageXLogRecPtrGet(phdr->pd_lsn); - if (PageGetPageSize(phdr) == BLCKSZ && + if (PageGetPageSize(page) == BLCKSZ && // PageGetPageLayoutVersion(phdr) == PG_PAGE_LAYOUT_VERSION && (phdr->pd_flags & ~PD_VALID_FLAG_BITS) == 0 && phdr->pd_lower >= SizeOfPageHeaderData && @@ -203,10 +203,10 @@ get_header_errormsg(Page page, char **errormsg) PageHeader phdr = (PageHeader) page; *errormsg = pgut_malloc(ERRMSG_MAX_LEN); - if (PageGetPageSize(phdr) != BLCKSZ) + if (PageGetPageSize(page) != BLCKSZ) snprintf(*errormsg, ERRMSG_MAX_LEN, "page header invalid, " "page size %lu is not equal to block size %u", - PageGetPageSize(phdr), BLCKSZ); + PageGetPageSize(page), BLCKSZ); else if (phdr->pd_lower < SizeOfPageHeaderData) snprintf(*errormsg, ERRMSG_MAX_LEN, "page header invalid, " diff --git a/src/dir.c b/src/dir.c index 7ba831c3b..353ed2d43 100644 --- a/src/dir.c +++ b/src/dir.c @@ -1175,7 +1175,6 @@ check_tablespace_mapping(pgBackup *backup, bool incremental, bool force, bool pg { pgFile *link = (pgFile *) parray_get(links, i); const char *linked_path = link->linked; - TablespaceListCell *cell; bool remapped = false; for (cell = tablespace_dirs.head; cell; cell = cell->next) diff --git a/src/show.c b/src/show.c index cc22a2acb..86a122698 100644 --- a/src/show.c +++ b/src/show.c @@ -137,7 +137,7 @@ do_show(CatalogState *catalogState, InstanceState *instanceState, show_instance_start(); for (i = 0; i < parray_num(instances); i++) { - InstanceState *instanceState = parray_get(instances, i); + instanceState = parray_get(instances, i); if (interrupted) elog(ERROR, "Interrupted during show"); @@ -202,22 +202,22 @@ pretty_size(int64 size, char *buf, size_t len) return; } - if (Abs(size) < limit) + if (size < limit) snprintf(buf, len, "%dB", (int) size); else { size >>= 9; - if (Abs(size) < limit2) + if (size < limit2) snprintf(buf, len, "%dkB", (int) half_rounded(size)); else { size >>= 10; - if (Abs(size) < limit2) + if (size < limit2) snprintf(buf, len, "%dMB", (int) half_rounded(size)); else { size >>= 10; - if (Abs(size) < limit2) + if (size < limit2) snprintf(buf, len, "%dGB", (int) half_rounded(size)); else { diff --git a/src/stream.c b/src/stream.c index 73bea6780..77453e997 100644 --- a/src/stream.c +++ b/src/stream.c @@ -307,7 +307,11 @@ StreamLog(void *arg) } #if PG_VERSION_NUM >= 100000 +#if PG_VERSION_NUM >= 160000 + if (!ctl.walmethod->ops->finish(ctl.walmethod)) +#else if (!ctl.walmethod->finish()) +#endif { interrupted = true; elog(ERROR, "Could not finish writing WAL files: %s", @@ -529,7 +533,7 @@ get_history_streaming(ConnectionOptions *conn_opt, TimeLineID tli, parray *backu /* link parent to child */ for (i = 0; i < parray_num(tli_list); i++) { - timelineInfo *tlinfo = (timelineInfo *) parray_get(tli_list, i); + tlinfo = (timelineInfo *) parray_get(tli_list, i); for (j = 0; j < parray_num(tli_list); j++) { @@ -546,7 +550,7 @@ get_history_streaming(ConnectionOptions *conn_opt, TimeLineID tli, parray *backu /* add backups to each timeline info */ for (i = 0; i < parray_num(tli_list); i++) { - timelineInfo *tlinfo = parray_get(tli_list, i); + tlinfo = parray_get(tli_list, i); for (j = 0; j < parray_num(backup_list); j++) { pgBackup *backup = parray_get(backup_list, j); diff --git a/src/validate.c b/src/validate.c index 471351678..0887b2e7a 100644 --- a/src/validate.c +++ b/src/validate.c @@ -401,8 +401,6 @@ do_validate_all(CatalogState *catalogState, InstanceState *instanceState) { char child[MAXPGPATH]; struct stat st; - InstanceState *instanceState; - /* skip entries point current dir or parent dir */ if (strcmp(dent->d_name, ".") == 0 || @@ -420,7 +418,7 @@ do_validate_all(CatalogState *catalogState, InstanceState *instanceState) /* * Initialize instance configuration. */ - instanceState = pgut_new(InstanceState); + instanceState = pgut_new(InstanceState); /* memory leak */ strncpy(instanceState->instance_name, dent->d_name, MAXPGPATH); join_path_components(instanceState->instance_backup_subdir_path, From 4868daae347efaf3bbd46ee31f974a023015f41a Mon Sep 17 00:00:00 2001 From: "z.kasymalieva" Date: Wed, 18 Oct 2023 15:50:14 +0300 Subject: [PATCH 472/525] [PBCKP-770] The line informing that the remote mode parameters are added to PG_PROBACKUP.CONF has been removed; the set_config command must be used instead. --- doc/pgprobackup.xml | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/doc/pgprobackup.xml b/doc/pgprobackup.xml index 297b124f0..22ed9dfd3 100644 --- a/doc/pgprobackup.xml +++ b/doc/pgprobackup.xml @@ -569,10 +569,9 @@ pg_probackup add-instance -B backup_dir -D backups/instance_name directory contains the pg_probackup.conf configuration file that controls - pg_probackup settings for this backup instance. If you run this - command with the - remote_options, the specified - parameters will be added to pg_probackup.conf. + pg_probackup settings for this backup instance. To add + remote_options to the configuration file, use the + command. For details on how to fine-tune pg_probackup configuration, see From 915d06655445c47e8a6764ed5e32e5e69fe87534 Mon Sep 17 00:00:00 2001 From: Viktoriia Shepard Date: Fri, 27 Oct 2023 19:12:16 +0200 Subject: [PATCH 473/525] fix tests for Pg16 and EE16 --- tests/backup_test.py | 33 +++++++++++++++++++++++++-------- 1 file changed, 25 insertions(+), 8 deletions(-) diff --git a/tests/backup_test.py b/tests/backup_test.py index 86a2124dc..dc60228b5 100644 --- a/tests/backup_test.py +++ b/tests/backup_test.py @@ -3075,11 +3075,20 @@ def test_missing_replication_permission(self): except ProbackupException as e: # 9.5: ERROR: must be superuser or replication role to run a backup # >=9.6: FATAL: must be superuser or replication role to start walsender - self.assertRegex( - e.message, - "ERROR: must be superuser or replication role to run a backup|FATAL: must be superuser or replication role to start walsender", - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) + if self.pg_config_version < 160000: + self.assertRegex( + e.message, + "ERROR: must be superuser or replication role to run a backup|" + "FATAL: must be superuser or replication role to start walsender", + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) + else: + self.assertRegex( + e.message, + "FATAL: permission denied to start WAL sender\n" + "DETAIL: Only roles with the REPLICATION", + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) # @unittest.skip("skip") def test_missing_replication_permission_1(self): @@ -3228,9 +3237,17 @@ def test_missing_replication_permission_1(self): # 'WARNING: could not connect to database backupdb: connection to server at "localhost" (127.0.0.1), port 29732 failed: FATAL: must be superuser or replication role to start walsender' # OS-dependant messages: # 'WARNING: could not connect to database backupdb: connection to server at "localhost" (::1), port 12101 failed: Connection refused\n\tIs the server running on that host and accepting TCP/IP connections?\nconnection to server at "localhost" (127.0.0.1), port 12101 failed: FATAL: must be superuser or replication role to start walsender' - self.assertRegex( - output, - r'WARNING: could not connect to database backupdb:[\s\S]*?FATAL: must be superuser or replication role to start walsender') + + if self.pg_config_version < 160000: + self.assertRegex( + output, + r'WARNING: could not connect to database backupdb:[\s\S]*?' + r'FATAL: must be superuser or replication role to start walsender') + else: + self.assertRegex( + output, + r'WARNING: could not connect to database backupdb:[\s\S]*?' + r'FATAL: permission denied to start WAL sender') # @unittest.skip("skip") def test_basic_backup_default_transaction_read_only(self): From c7ca6cb9c7a859c199c55c57963c5abd5357b52f Mon Sep 17 00:00:00 2001 From: Alexey Savchkov Date: Sat, 28 Oct 2023 09:05:31 +0700 Subject: [PATCH 474/525] Up version --- src/pg_probackup.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 5ee612e6f..7bbee7cd2 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -353,7 +353,7 @@ typedef enum ShowFormat #define BYTES_INVALID (-1) /* file didn`t changed since previous backup, DELTA backup do not rely on it */ #define FILE_NOT_FOUND (-2) /* file disappeared during backup */ #define BLOCKNUM_INVALID (-1) -#define PROGRAM_VERSION "2.5.12" +#define PROGRAM_VERSION "2.5.13" /* update when remote agent API or behaviour changes */ #define AGENT_PROTOCOL_VERSION 20509 From 48efe9086f9a9171b1e51fdf9105edcd667332a4 Mon Sep 17 00:00:00 2001 From: Alexey Savchkov Date: Sat, 28 Oct 2023 19:28:33 +0700 Subject: [PATCH 475/525] Add the 16th version in Readme --- README.md | 48 ++++++++++++++++++++++++------------------------ 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/README.md b/README.md index 7486a6ca6..396699805 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,7 @@ `pg_probackup` is a utility to manage backup and recovery of PostgreSQL database clusters. It is designed to perform periodic backups of the PostgreSQL instance that enable you to restore the server in case of a failure. The utility is compatible with: -* PostgreSQL 9.6, 10, 11, 12, 13, 14, 15; +* PostgreSQL 9.6, 10, 11, 12, 13, 14, 15, 16 As compared to other backup solutions, `pg_probackup` offers the following benefits that can help you implement different backup strategies and deal with large amounts of data: * Incremental backup: page-level incremental backup allows you to save disk space, speed up backup and restore. With three different incremental modes, you can plan the backup strategy in accordance with your data flow. @@ -41,9 +41,9 @@ Regardless of the chosen backup type, all backups taken with `pg_probackup` supp ## ptrack support `PTRACK` backup support provided via following options: -* vanilla PostgreSQL 11, 12, 13, 14, 15 with [ptrack extension](https://github.com/postgrespro/ptrack) -* Postgres Pro Standard 11, 12, 13, 14 -* Postgres Pro Enterprise 11, 12, 13, 14 +* vanilla PostgreSQL 11, 12, 13, 14, 15, 16 with [ptrack extension](https://github.com/postgrespro/ptrack) +* Postgres Pro Standard 11, 12, 13, 14, 15, 16 +* Postgres Pro Enterprise 11, 12, 13, 14, 15, 16 ## Limitations @@ -74,62 +74,62 @@ Installers are available in release **assets**. [Latests](https://github.com/pos #DEB Ubuntu|Debian Packages sudo sh -c 'echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup/deb/ $(lsb_release -cs) main-$(lsb_release -cs)" > /etc/apt/sources.list.d/pg_probackup.list' sudo wget -O - https://repo.postgrespro.ru/pg_probackup/keys/GPG-KEY-PG_PROBACKUP | sudo apt-key add - && sudo apt-get update -sudo apt-get install pg-probackup-{15,14,13,12,11,10} -sudo apt-get install pg-probackup-{15,14,13,12,11,10}-dbg +sudo apt-get install pg-probackup-{16,15,14,13,12,11,10} +sudo apt-get install pg-probackup-{16,15,14,13,12,11,10}-dbg #DEB-SRC Packages sudo sh -c 'echo "deb-src [arch=amd64] https://repo.postgrespro.ru/pg_probackup/deb/ $(lsb_release -cs) main-$(lsb_release -cs)" >>\ /etc/apt/sources.list.d/pg_probackup.list' && sudo apt-get update -sudo apt-get source pg-probackup-{15,14,13,12,11,10} +sudo apt-get source pg-probackup-{16,15,14,13,12,11,10} #DEB Astra Linix Orel sudo sh -c 'echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup/deb/ stretch main-stretch" > /etc/apt/sources.list.d/pg_probackup.list' sudo wget -O - https://repo.postgrespro.ru/pg_probackup/keys/GPG-KEY-PG_PROBACKUP | sudo apt-key add - && sudo apt-get update -sudo apt-get install pg-probackup-{15,14,13,12,11,10}{-dbg,} +sudo apt-get install pg-probackup-{16,15,14,13,12,11,10}{-dbg,} #RPM Centos Packages rpm -ivh https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-centos.noarch.rpm -yum install pg_probackup-{15,14,13,12,11,10} -yum install pg_probackup-{15,14,13,12,11,10}-debuginfo +yum install pg_probackup-{16,15,14,13,12,11,10} +yum install pg_probackup-{16,15,14,13,12,11,10}-debuginfo #RPM RHEL Packages rpm -ivh https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-rhel.noarch.rpm -yum install pg_probackup-{15,14,13,12,11,10} -yum install pg_probackup-{15,14,13,12,11,10}-debuginfo +yum install pg_probackup-{16,15,14,13,12,11,10} +yum install pg_probackup-{16,15,14,13,12,11,10}-debuginfo #RPM Oracle Linux Packages rpm -ivh https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-oraclelinux.noarch.rpm -yum install pg_probackup-{15,14,13,12,11,10} -yum install pg_probackup-{15,14,13,12,11,10}-debuginfo +yum install pg_probackup-{16,15,14,13,12,11,10} +yum install pg_probackup-{16,15,14,13,12,11,10}-debuginfo #SRPM Centos|RHEL|OracleLinux Packages -yumdownloader --source pg_probackup-{15,14,13,12,11,10} +yumdownloader --source pg_probackup-{16,15,14,13,12,11,10} #RPM SUSE|SLES Packages zypper install --allow-unsigned-rpm -y https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-suse.noarch.rpm -zypper --gpg-auto-import-keys install -y pg_probackup-{15,14,13,12,11,10} -zypper install pg_probackup-{15,14,13,12,11,10}-debuginfo +zypper --gpg-auto-import-keys install -y pg_probackup-{16,15,14,13,12,11,10} +zypper install pg_probackup-{16,15,14,13,12,11,10}-debuginfo #SRPM SUSE|SLES Packages -zypper si pg_probackup-{15,14,13,12,11,10} +zypper si pg_probackup-{16,15,14,13,12,11,10} #RPM ALT Linux 8 sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-p8 x86_64 vanilla" > /etc/apt/sources.list.d/pg_probackup.list' sudo apt-get update -sudo apt-get install pg_probackup-{15,14,13,12,11,10} -sudo apt-get install pg_probackup-{15,14,13,12,11,10}-debuginfo +sudo apt-get install pg_probackup-{16,15,14,13,12,11,10} +sudo apt-get install pg_probackup-{16,15,14,13,12,11,10}-debuginfo #RPM ALT Linux 9 sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-p9 x86_64 vanilla" > /etc/apt/sources.list.d/pg_probackup.list' sudo apt-get update -sudo apt-get install pg_probackup-{15,14,13,12,11,10} -sudo apt-get install pg_probackup-{15,14,13,12,11,10}-debuginfo +sudo apt-get install pg_probackup-{16,15,14,13,12,11,10} +sudo apt-get install pg_probackup-{16,15,14,13,12,11,10}-debuginfo #RPM ALT Linux 10 sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-p10 x86_64 vanilla" > /etc/apt/sources.list.d/pg_probackup.list' sudo apt-get update -sudo apt-get install pg_probackup-{15,14,13,12,11,10} -sudo apt-get install pg_probackup-{15,14,13,12,11,10}-debuginfo +sudo apt-get install pg_probackup-{16,15,14,13,12,11,10} +sudo apt-get install pg_probackup-{16,15,14,13,12,11,10}-debuginfo ``` #### pg_probackup for PostgresPro Standard and Enterprise From eb160266253f6fa059f59e52a19d9e215018c431 Mon Sep 17 00:00:00 2001 From: Alexey Savchkov Date: Wed, 1 Nov 2023 23:22:51 +0700 Subject: [PATCH 476/525] Add psycopg2 to the testing environment --- .github/workflows/build.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 6f99d0f27..c3ad89568 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -72,8 +72,7 @@ jobs: - name: Install Testgres run: | git clone -b no-port-for --single-branch --depth 1 https://github.com/postgrespro/testgres.git - cd testgres - python setup.py install + pip3 install psycopg2 ./testgres # Grant the Github runner user full control of the workspace for initdb to successfully process the data folder - name: Test Probackup From 3207d6c636a04b5849f2ab0892c90237562d0564 Mon Sep 17 00:00:00 2001 From: oleg gurev Date: Wed, 8 Nov 2023 13:44:51 +0300 Subject: [PATCH 477/525] PBCKP-782 doc added note in Remote Mode section --- doc/Readme.md | 3 +++ doc/pgprobackup.xml | 8 ++++++++ 2 files changed, 11 insertions(+) diff --git a/doc/Readme.md b/doc/Readme.md index 756c6aaa0..0e1d64590 100644 --- a/doc/Readme.md +++ b/doc/Readme.md @@ -3,3 +3,6 @@ xmllint --noout --valid probackup.xml xsltproc stylesheet.xsl probackup.xml >pg-probackup.html ``` +> [!NOTE] +>Install ```docbook-xsl``` if you got +>``` "xsl:import : unable to load http://docbook.sourceforge.net/release/xsl/current/xhtml/docbook.xsl"``` \ No newline at end of file diff --git a/doc/pgprobackup.xml b/doc/pgprobackup.xml index 22ed9dfd3..70586c478 100644 --- a/doc/pgprobackup.xml +++ b/doc/pgprobackup.xml @@ -1970,6 +1970,14 @@ pg_probackup restore -B backup_dir --instance + + + In addition to SSH connection, pg_probackup uses + a regular connection to the database to manage the remote operation. + See for details of how to set up + a database connection. + + The typical workflow is as follows: From 70b97d851635ad3fb2fea3d025880c6319276a1c Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Mon, 13 Nov 2023 15:23:07 +0300 Subject: [PATCH 478/525] declare XID_FMT for PGPRO_EE if not defined --- src/pg_probackup.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 7bbee7cd2..48b9bf884 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -110,6 +110,8 @@ extern const char *PROGRAM_EMAIL; /* 64-bit xid support for PGPRO_EE */ #ifndef PGPRO_EE #define XID_FMT "%u" +#elif !defined(XID_FMT) +#define XID_FMT UINT64_FORMAT #endif #ifndef STDIN_FILENO From de531e62e4e25526a21b9d57652518a034d815fe Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Mon, 13 Nov 2023 15:26:23 +0300 Subject: [PATCH 479/525] PBCKP-797: fix race condition by waiting and filling same amount of rows. --- tests/page_test.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/tests/page_test.py b/tests/page_test.py index 786374bdb..99f3ce992 100644 --- a/tests/page_test.py +++ b/tests/page_test.py @@ -6,6 +6,7 @@ import subprocess import gzip import shutil +import time class PageTest(ProbackupTest, unittest.TestCase): @@ -893,7 +894,7 @@ def test_page_backup_with_alien_wal_segment(self): "create table t_heap as select i as id, " "md5(i::text) as text, " "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,1000) i;") + "from generate_series(0,10000) i;") alien_node.safe_psql( "postgres", @@ -905,7 +906,7 @@ def test_page_backup_with_alien_wal_segment(self): "create table t_heap_alien as select i as id, " "md5(i::text) as text, " "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,100000) i;") + "from generate_series(0,10000) i;") # copy latest wal segment wals_dir = os.path.join(backup_dir, 'wal', 'alien_node') @@ -916,9 +917,9 @@ def test_page_backup_with_alien_wal_segment(self): file = os.path.join(wals_dir, filename) file_destination = os.path.join( os.path.join(backup_dir, 'wal', 'node'), filename) -# file = os.path.join(wals_dir, '000000010000000000000004') - print(file) - print(file_destination) + start = time.time() + while not os.path.exists(file_destination) and time.time() - start < 20: + time.sleep(0.1) os.remove(file_destination) os.rename(file, file_destination) From 3963f39ce670400d2965545b1984ca9bf7cf5c53 Mon Sep 17 00:00:00 2001 From: Alexey Savchkov Date: Wed, 24 May 2023 15:13:01 +0300 Subject: [PATCH 480/525] PBCKP-624 Remove outdated installation instructions for Standard and Enterprise --- LICENSE | 2 +- README.md | 91 ++++++++++++++----------------------------------------- 2 files changed, 24 insertions(+), 69 deletions(-) diff --git a/LICENSE b/LICENSE index 0ba831507..66476e8a9 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2015-2020, Postgres Professional +Copyright (c) 2015-2023, Postgres Professional Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group diff --git a/README.md b/README.md index 396699805..973816c26 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,7 @@ `pg_probackup` is a utility to manage backup and recovery of PostgreSQL database clusters. It is designed to perform periodic backups of the PostgreSQL instance that enable you to restore the server in case of a failure. The utility is compatible with: -* PostgreSQL 9.6, 10, 11, 12, 13, 14, 15, 16 +* PostgreSQL 11, 12, 13, 14, 15; As compared to other backup solutions, `pg_probackup` offers the following benefits that can help you implement different backup strategies and deal with large amounts of data: * Incremental backup: page-level incremental backup allows you to save disk space, speed up backup and restore. With three different incremental modes, you can plan the backup strategy in accordance with your data flow. @@ -74,113 +74,68 @@ Installers are available in release **assets**. [Latests](https://github.com/pos #DEB Ubuntu|Debian Packages sudo sh -c 'echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup/deb/ $(lsb_release -cs) main-$(lsb_release -cs)" > /etc/apt/sources.list.d/pg_probackup.list' sudo wget -O - https://repo.postgrespro.ru/pg_probackup/keys/GPG-KEY-PG_PROBACKUP | sudo apt-key add - && sudo apt-get update -sudo apt-get install pg-probackup-{16,15,14,13,12,11,10} -sudo apt-get install pg-probackup-{16,15,14,13,12,11,10}-dbg +sudo apt-get install pg-probackup-{15,14,13,12,11} +sudo apt-get install pg-probackup-{15,14,13,12,11}-dbg #DEB-SRC Packages sudo sh -c 'echo "deb-src [arch=amd64] https://repo.postgrespro.ru/pg_probackup/deb/ $(lsb_release -cs) main-$(lsb_release -cs)" >>\ /etc/apt/sources.list.d/pg_probackup.list' && sudo apt-get update -sudo apt-get source pg-probackup-{16,15,14,13,12,11,10} +sudo apt-get source pg-probackup-{15,14,13,12,11} #DEB Astra Linix Orel sudo sh -c 'echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup/deb/ stretch main-stretch" > /etc/apt/sources.list.d/pg_probackup.list' sudo wget -O - https://repo.postgrespro.ru/pg_probackup/keys/GPG-KEY-PG_PROBACKUP | sudo apt-key add - && sudo apt-get update -sudo apt-get install pg-probackup-{16,15,14,13,12,11,10}{-dbg,} +sudo apt-get install pg-probackup-{15,14,13,12,11}{-dbg,} #RPM Centos Packages rpm -ivh https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-centos.noarch.rpm -yum install pg_probackup-{16,15,14,13,12,11,10} -yum install pg_probackup-{16,15,14,13,12,11,10}-debuginfo +yum install pg_probackup-{15,14,13,12,11} +yum install pg_probackup-{15,14,13,12,11}-debuginfo #RPM RHEL Packages rpm -ivh https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-rhel.noarch.rpm -yum install pg_probackup-{16,15,14,13,12,11,10} -yum install pg_probackup-{16,15,14,13,12,11,10}-debuginfo +yum install pg_probackup-{15,14,13,12,11} +yum install pg_probackup-{15,14,13,12,11}-debuginfo #RPM Oracle Linux Packages rpm -ivh https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-oraclelinux.noarch.rpm -yum install pg_probackup-{16,15,14,13,12,11,10} -yum install pg_probackup-{16,15,14,13,12,11,10}-debuginfo +yum install pg_probackup-{15,14,13,12,11} +yum install pg_probackup-{15,14,13,12,11}-debuginfo #SRPM Centos|RHEL|OracleLinux Packages -yumdownloader --source pg_probackup-{16,15,14,13,12,11,10} +yumdownloader --source pg_probackup-{15,14,13,12,11} #RPM SUSE|SLES Packages zypper install --allow-unsigned-rpm -y https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-suse.noarch.rpm -zypper --gpg-auto-import-keys install -y pg_probackup-{16,15,14,13,12,11,10} -zypper install pg_probackup-{16,15,14,13,12,11,10}-debuginfo +zypper --gpg-auto-import-keys install -y pg_probackup-{15,14,13,12,11} +zypper install pg_probackup-{15,14,13,12,11}-debuginfo #SRPM SUSE|SLES Packages -zypper si pg_probackup-{16,15,14,13,12,11,10} +zypper si pg_probackup-{15,14,13,12,11} #RPM ALT Linux 8 sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-p8 x86_64 vanilla" > /etc/apt/sources.list.d/pg_probackup.list' sudo apt-get update -sudo apt-get install pg_probackup-{16,15,14,13,12,11,10} -sudo apt-get install pg_probackup-{16,15,14,13,12,11,10}-debuginfo +sudo apt-get install pg_probackup-{15,14,13,12,11} +sudo apt-get install pg_probackup-{15,14,13,12,11}-debuginfo #RPM ALT Linux 9 sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-p9 x86_64 vanilla" > /etc/apt/sources.list.d/pg_probackup.list' sudo apt-get update -sudo apt-get install pg_probackup-{16,15,14,13,12,11,10} -sudo apt-get install pg_probackup-{16,15,14,13,12,11,10}-debuginfo +sudo apt-get install pg_probackup-{15,14,13,12,11} +sudo apt-get install pg_probackup-{15,14,13,12,11}-debuginfo #RPM ALT Linux 10 sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-p10 x86_64 vanilla" > /etc/apt/sources.list.d/pg_probackup.list' sudo apt-get update -sudo apt-get install pg_probackup-{16,15,14,13,12,11,10} -sudo apt-get install pg_probackup-{16,15,14,13,12,11,10}-debuginfo -``` - -#### pg_probackup for PostgresPro Standard and Enterprise -```shell -#DEB Ubuntu|Debian Packages -sudo sh -c 'echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup-forks/deb/ $(lsb_release -cs) main-$(lsb_release -cs)" > /etc/apt/sources.list.d/pg_probackup-forks.list' -sudo wget -O - https://repo.postgrespro.ru/pg_probackup-forks/keys/GPG-KEY-PG_PROBACKUP | sudo apt-key add - && sudo apt-get update -sudo apt-get install pg-probackup-{std,ent}-{14,13,12,11,10,9.6} -sudo apt-get install pg-probackup-{std,ent}-{14,13,12,11,10,9.6}-dbg - -#DEB Astra Linix Orel -sudo sh -c 'echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup-forks/deb/ stretch main-stretch" > /etc/apt/sources.list.d/pg_probackup.list' -sudo wget -O - https://repo.postgrespro.ru/pg_probackup-forks/keys/GPG-KEY-PG_PROBACKUP | sudo apt-key add - && sudo apt-get update -sudo apt-get install pg-probackup-{std,ent}-{12,11,10,9.6}{-dbg,} - - -#RPM Centos Packages -rpm -ivh https://repo.postgrespro.ru/pg_probackup-forks/keys/pg_probackup-repo-forks-centos.noarch.rpm -yum install pg_probackup-{std,ent}-{14,13,12,11,10,9.6} -yum install pg_probackup-{std,ent}-{14,13,12,11,10,9.6}-debuginfo - -#RPM RHEL Packages -rpm -ivh https://repo.postgrespro.ru/pg_probackup-forks/keys/pg_probackup-repo-forks-rhel.noarch.rpm -yum install pg_probackup-{std,ent}-{14,13,12,11,10,9.6} -yum install pg_probackup-{std,ent}-{14,13,12,11,10,9.6}-debuginfo - -#RPM Oracle Linux Packages -rpm -ivh https://repo.postgrespro.ru/pg_probackup-forks/keys/pg_probackup-repo-forks-oraclelinux.noarch.rpm -yum install pg_probackup-{std,ent}-{14,13,12,11,10,9.6} -yum install pg_probackup-{std,ent}-{14,13,12,11,10,9.6}-debuginfo - -#RPM ALT Linux 7 -sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup-forks/rpm/latest/altlinux-p7 x86_64 forks" > /etc/apt/sources.list.d/pg_probackup_forks.list' -sudo apt-get update -sudo apt-get install pg_probackup-{std,ent}-{14,13,12,11,10,9.6} -sudo apt-get install pg_probackup-{std,ent}-{14,13,12,11,10,9.6}-debuginfo - -#RPM ALT Linux 8 -sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup-forks/rpm/latest/altlinux-p8 x86_64 forks" > /etc/apt/sources.list.d/pg_probackup_forks.list' -sudo apt-get update -sudo apt-get install pg_probackup-{std,ent}-{14,13,12,11,10,9.6} -sudo apt-get install pg_probackup-{std,ent}-{14,13,12,11,10,9.6}-debuginfo - -#RPM ALT Linux 9 -sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup-forks/rpm/latest/altlinux-p9 x86_64 forks" > /etc/apt/sources.list.d/pg_probackup_forks.list' && sudo apt-get update -sudo apt-get install pg_probackup-{std,ent}-{14,13,12,11,10,9.6} -sudo apt-get install pg_probackup-{std,ent}-{14,13,12,11,10,9.6}-debuginfo +sudo apt-get install pg_probackup-{15,14,13,12,11} +sudo apt-get install pg_probackup-{15,14,13,12,11}-debuginfo ``` Once you have `pg_probackup` installed, complete [the setup](https://postgrespro.github.io/pg_probackup/#pbk-install-and-setup). +For users of Postgres Pro products, commercial editions of pg_probackup are available for installation from the corresponding Postgres Pro product repository. + ## Building from source ### Linux From 8408753f8d176a04ad25f5a0ad1d317b16db9bf6 Mon Sep 17 00:00:00 2001 From: Alexey Savchkov Date: Sat, 28 Oct 2023 19:28:33 +0700 Subject: [PATCH 481/525] Cherry-pick Add the 16th version in Readme --- README.md | 42 +++++++++++++++++++++--------------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/README.md b/README.md index 973816c26..c72b65dab 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,7 @@ `pg_probackup` is a utility to manage backup and recovery of PostgreSQL database clusters. It is designed to perform periodic backups of the PostgreSQL instance that enable you to restore the server in case of a failure. The utility is compatible with: -* PostgreSQL 11, 12, 13, 14, 15; +* PostgreSQL 11, 12, 13, 14, 15, 16 As compared to other backup solutions, `pg_probackup` offers the following benefits that can help you implement different backup strategies and deal with large amounts of data: * Incremental backup: page-level incremental backup allows you to save disk space, speed up backup and restore. With three different incremental modes, you can plan the backup strategy in accordance with your data flow. @@ -74,62 +74,62 @@ Installers are available in release **assets**. [Latests](https://github.com/pos #DEB Ubuntu|Debian Packages sudo sh -c 'echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup/deb/ $(lsb_release -cs) main-$(lsb_release -cs)" > /etc/apt/sources.list.d/pg_probackup.list' sudo wget -O - https://repo.postgrespro.ru/pg_probackup/keys/GPG-KEY-PG_PROBACKUP | sudo apt-key add - && sudo apt-get update -sudo apt-get install pg-probackup-{15,14,13,12,11} -sudo apt-get install pg-probackup-{15,14,13,12,11}-dbg +sudo apt-get install pg-probackup-{16,15,14,13,12,11} +sudo apt-get install pg-probackup-{16,15,14,13,12,11}-dbg #DEB-SRC Packages sudo sh -c 'echo "deb-src [arch=amd64] https://repo.postgrespro.ru/pg_probackup/deb/ $(lsb_release -cs) main-$(lsb_release -cs)" >>\ /etc/apt/sources.list.d/pg_probackup.list' && sudo apt-get update -sudo apt-get source pg-probackup-{15,14,13,12,11} +sudo apt-get source pg-probackup-{16,15,14,13,12,11,10} #DEB Astra Linix Orel sudo sh -c 'echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup/deb/ stretch main-stretch" > /etc/apt/sources.list.d/pg_probackup.list' sudo wget -O - https://repo.postgrespro.ru/pg_probackup/keys/GPG-KEY-PG_PROBACKUP | sudo apt-key add - && sudo apt-get update -sudo apt-get install pg-probackup-{15,14,13,12,11}{-dbg,} +sudo apt-get install pg-probackup-{16,15,14,13,12,11}{-dbg,} #RPM Centos Packages rpm -ivh https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-centos.noarch.rpm -yum install pg_probackup-{15,14,13,12,11} -yum install pg_probackup-{15,14,13,12,11}-debuginfo +yum install pg_probackup-{16,15,14,13,12,11} +yum install pg_probackup-{16,15,14,13,12,11}-debuginfo #RPM RHEL Packages rpm -ivh https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-rhel.noarch.rpm -yum install pg_probackup-{15,14,13,12,11} -yum install pg_probackup-{15,14,13,12,11}-debuginfo +yum install pg_probackup-{16,15,14,13,12,11} +yum install pg_probackup-{16,15,14,13,12,11}-debuginfo #RPM Oracle Linux Packages rpm -ivh https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-oraclelinux.noarch.rpm -yum install pg_probackup-{15,14,13,12,11} -yum install pg_probackup-{15,14,13,12,11}-debuginfo +yum install pg_probackup-{16,15,14,13,12,11} +yum install pg_probackup-{16,15,14,13,12,11}-debuginfo #SRPM Centos|RHEL|OracleLinux Packages -yumdownloader --source pg_probackup-{15,14,13,12,11} +yumdownloader --source pg_probackup-{16,15,14,13,12,11} #RPM SUSE|SLES Packages zypper install --allow-unsigned-rpm -y https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-suse.noarch.rpm -zypper --gpg-auto-import-keys install -y pg_probackup-{15,14,13,12,11} -zypper install pg_probackup-{15,14,13,12,11}-debuginfo +zypper --gpg-auto-import-keys install -y pg_probackup-{16,15,14,13,12,11} +zypper install pg_probackup-{16,15,14,13,12,11}-debuginfo #SRPM SUSE|SLES Packages -zypper si pg_probackup-{15,14,13,12,11} +zypper si pg_probackup-{16,15,14,13,12,11} #RPM ALT Linux 8 sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-p8 x86_64 vanilla" > /etc/apt/sources.list.d/pg_probackup.list' sudo apt-get update -sudo apt-get install pg_probackup-{15,14,13,12,11} -sudo apt-get install pg_probackup-{15,14,13,12,11}-debuginfo +sudo apt-get install pg_probackup-{16,15,14,13,12,11} +sudo apt-get install pg_probackup-{16,15,14,13,12,11}-debuginfo #RPM ALT Linux 9 sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-p9 x86_64 vanilla" > /etc/apt/sources.list.d/pg_probackup.list' sudo apt-get update -sudo apt-get install pg_probackup-{15,14,13,12,11} -sudo apt-get install pg_probackup-{15,14,13,12,11}-debuginfo +sudo apt-get install pg_probackup-{16,15,14,13,12,11} +sudo apt-get install pg_probackup-{16,15,14,13,12,11}-debuginfo #RPM ALT Linux 10 sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-p10 x86_64 vanilla" > /etc/apt/sources.list.d/pg_probackup.list' sudo apt-get update -sudo apt-get install pg_probackup-{15,14,13,12,11} -sudo apt-get install pg_probackup-{15,14,13,12,11}-debuginfo +sudo apt-get install pg_probackup-{16,15,14,13,12,11} +sudo apt-get install pg_probackup-{16,15,14,13,12,11}-debuginfo ``` Once you have `pg_probackup` installed, complete [the setup](https://postgrespro.github.io/pg_probackup/#pbk-install-and-setup). From 623b659fe8071bc79006996d6511d90dcd09f9db Mon Sep 17 00:00:00 2001 From: Alexey Savchkov Date: Wed, 22 Nov 2023 11:35:21 +0700 Subject: [PATCH 482/525] Add the installation steps on various systems --- README.md | 64 +------------ doc/pgprobackup.xml | 219 +++++++++++++++++++++++++++++++++++++++++++- doc/stylesheet.css | 3 +- 3 files changed, 218 insertions(+), 68 deletions(-) diff --git a/README.md b/README.md index c72b65dab..1cd518849 100644 --- a/README.md +++ b/README.md @@ -69,68 +69,8 @@ For detailed release plans check [Milestones](https://github.com/postgrespro/pg_ Installers are available in release **assets**. [Latests](https://github.com/postgrespro/pg_probackup/releases/latest). ### Linux Installation -#### pg_probackup for vanilla PostgreSQL -```shell -#DEB Ubuntu|Debian Packages -sudo sh -c 'echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup/deb/ $(lsb_release -cs) main-$(lsb_release -cs)" > /etc/apt/sources.list.d/pg_probackup.list' -sudo wget -O - https://repo.postgrespro.ru/pg_probackup/keys/GPG-KEY-PG_PROBACKUP | sudo apt-key add - && sudo apt-get update -sudo apt-get install pg-probackup-{16,15,14,13,12,11} -sudo apt-get install pg-probackup-{16,15,14,13,12,11}-dbg - -#DEB-SRC Packages -sudo sh -c 'echo "deb-src [arch=amd64] https://repo.postgrespro.ru/pg_probackup/deb/ $(lsb_release -cs) main-$(lsb_release -cs)" >>\ - /etc/apt/sources.list.d/pg_probackup.list' && sudo apt-get update -sudo apt-get source pg-probackup-{16,15,14,13,12,11,10} - -#DEB Astra Linix Orel -sudo sh -c 'echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup/deb/ stretch main-stretch" > /etc/apt/sources.list.d/pg_probackup.list' -sudo wget -O - https://repo.postgrespro.ru/pg_probackup/keys/GPG-KEY-PG_PROBACKUP | sudo apt-key add - && sudo apt-get update -sudo apt-get install pg-probackup-{16,15,14,13,12,11}{-dbg,} - -#RPM Centos Packages -rpm -ivh https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-centos.noarch.rpm -yum install pg_probackup-{16,15,14,13,12,11} -yum install pg_probackup-{16,15,14,13,12,11}-debuginfo - -#RPM RHEL Packages -rpm -ivh https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-rhel.noarch.rpm -yum install pg_probackup-{16,15,14,13,12,11} -yum install pg_probackup-{16,15,14,13,12,11}-debuginfo - -#RPM Oracle Linux Packages -rpm -ivh https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-oraclelinux.noarch.rpm -yum install pg_probackup-{16,15,14,13,12,11} -yum install pg_probackup-{16,15,14,13,12,11}-debuginfo - -#SRPM Centos|RHEL|OracleLinux Packages -yumdownloader --source pg_probackup-{16,15,14,13,12,11} - -#RPM SUSE|SLES Packages -zypper install --allow-unsigned-rpm -y https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-suse.noarch.rpm -zypper --gpg-auto-import-keys install -y pg_probackup-{16,15,14,13,12,11} -zypper install pg_probackup-{16,15,14,13,12,11}-debuginfo - -#SRPM SUSE|SLES Packages -zypper si pg_probackup-{16,15,14,13,12,11} - -#RPM ALT Linux 8 -sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-p8 x86_64 vanilla" > /etc/apt/sources.list.d/pg_probackup.list' -sudo apt-get update -sudo apt-get install pg_probackup-{16,15,14,13,12,11} -sudo apt-get install pg_probackup-{16,15,14,13,12,11}-debuginfo - -#RPM ALT Linux 9 -sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-p9 x86_64 vanilla" > /etc/apt/sources.list.d/pg_probackup.list' -sudo apt-get update -sudo apt-get install pg_probackup-{16,15,14,13,12,11} -sudo apt-get install pg_probackup-{16,15,14,13,12,11}-debuginfo - -#RPM ALT Linux 10 -sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-p10 x86_64 vanilla" > /etc/apt/sources.list.d/pg_probackup.list' -sudo apt-get update -sudo apt-get install pg_probackup-{16,15,14,13,12,11} -sudo apt-get install pg_probackup-{16,15,14,13,12,11}-debuginfo -``` + +See the [Installation](https://postgrespro.github.io/pg_probackup/#pbk-install) section in the documentation. Once you have `pg_probackup` installed, complete [the setup](https://postgrespro.github.io/pg_probackup/#pbk-install-and-setup). diff --git a/doc/pgprobackup.xml b/doc/pgprobackup.xml index 70586c478..2ec4258cb 100644 --- a/doc/pgprobackup.xml +++ b/doc/pgprobackup.xml @@ -172,7 +172,10 @@ doc/src/sgml/pgprobackup.sgml Overview - Installation and Setup + Installation + + + Setup Command-Line Reference @@ -451,8 +454,215 @@ doc/src/sgml/pgprobackup.sgml - - Installation and Setup + + Installation + + Installation on Debian family systems (Debian, Ubuntu etc.) + + You may need to use apt-get instead of apt on older systems in the commands below. + + + + Add the pg_probackup repository GPG key + +sudo apt install gpg wget +wget -O - https://repo.postgrespro.ru/pg_probackup/keys/GPG-KEY-PG_PROBACKUP | \ +sudo apt-key add - + + + + Setup the binary package repository + +. /etc/os-release +echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup/deb $VERSION_CODENAME main-$VERSION_CODENAME" | \ +sudo tee /etc/apt/sources.list.d/pg_probackup.list + + + + Optionally setup the source package repository for rebuilding the binaries + +echo "deb-src [arch=amd64] https://repo.postgrespro.ru/pg_probackup/deb $VERSION_CODENAME main-$VERSION_CODENAME" | \ +sudo tee -a /etc/apt/sources.list.d/pg_probackup.list + + + + List the available pg_probackup packages + + + Using apt: + +sudo apt update +apt search pg_probackup + + + + Using apt-get: + +sudo apt-get update +apt-cache search pg_probackup + + + + + + Install or upgrade a pg_probackup version of your choice + +sudo apt install pg-probackup-15 + + + + Optionally install the debug package + +sudo apt install pg-probackup-15-dbg + + + + Optionally install the source package (provided you have set up the source package repository as described above) + +sudo apt install dpkg-dev +sudo apt source pg-probackup-15 + + + + + + Installation on Red Hat family systems (CentOS, Oracle Linux etc.) + + You may need to use yum instead of dnf on older systems in the commands below. + + + + Install the pg_probackup repository + +dnf install https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-centos.noarch.rpm + + + + List the available pg_probackup packages + +dnf search pg_probackup + + + + Install or upgrade a pg_probackup version of your choice + +dnf install pg_probackup-15 + + + + Optionally install the debug package + +dnf install pg_probackup-15-debuginfo + + + + Optionally install the source package for rebuilding the binaries + + + Using dnf: + +dnf install 'dnf-command(download)' +dnf download --source pg_probackup-15 + + + + Using yum: + +yumdownloader --source pg_probackup-15 + + + + + + + + Installation on ALT Linux + + You may need to use yum instead of dnf on older systems in the commands below. + + + + Setup the repository + + + On ALT Linux 10: + +. /etc/os-release +echo "rpm http://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-p$VERSION_ID x86_64 vanilla" | \ +sudo tee /etc/apt/sources.list.d/pg_probackup.list + + + + On ALT Linux 8 and 9: + +. /etc/os-release +echo "rpm http://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-$VERSION_ID x86_64 vanilla" | \ +sudo tee /etc/apt/sources.list.d/pg_probackup.list + + + + + + List the available pg_probackup packages + +sudo apt-get update +apt-cache search pg_probackup + + + + Install or upgrade a pg_probackup version of your choice + +sudo apt-get install pg_probackup-15 + + + + Optionally install the debug package + +sudo apt-get install pg_probackup-15-debuginfo + + + + + + Installation on SUSE Linux + + + Add the pg_probackup repository GPG key + +zypper in -y gpg wget +wget -O GPG-KEY-PG_PROBACKUP https://repo.postgrespro.ru/pg_probackup/keys/GPG-KEY-PG_PROBACKUP +rpm --import GPG-KEY-PG_PROBACKUP + + + + Setup the repository + +zypper in https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-suse.noarch.rpm + + + + List the available pg_probackup packages + +zypper se pg_probackup + + + + Install or upgrade a pg_probackup version of your choice + +zypper in pg_probackup-15 + + + + Optionally install the source package for rebuilding the binaries + +zypper si pg_probackup-15 + + + + + + + Setup Once you have pg_probackup installed, complete the following setup: @@ -1986,8 +2196,7 @@ pg_probackup restore -B backup_dir --instance On your backup host, configure pg_probackup as explained in the section - Installation and - Setup. For the + Setup. For the and commands, make sure to specify remote diff --git a/doc/stylesheet.css b/doc/stylesheet.css index 4d84058f5..31464154b 100644 --- a/doc/stylesheet.css +++ b/doc/stylesheet.css @@ -119,7 +119,8 @@ body { } .book code, kbd, pre, samp { - font-family: monospace,monospace; + font-family: monospace,monospace; + font-size: 90%; } .book .txtCommentsWrap { From 7c29b63e2f5308fd5d77b45c9d1d28f1b16e286c Mon Sep 17 00:00:00 2001 From: Elena Indrupskaya Date: Mon, 28 Nov 2022 15:52:13 +0300 Subject: [PATCH 483/525] [DOC] [PGPRO-7104] Remove outdated options and PostgreSQL versions from documentation [skip travis] --- doc/pgprobackup.xml | 172 ++------------------------------------------ 1 file changed, 4 insertions(+), 168 deletions(-) diff --git a/doc/pgprobackup.xml b/doc/pgprobackup.xml index 2ec4258cb..3dadc0aad 100644 --- a/doc/pgprobackup.xml +++ b/doc/pgprobackup.xml @@ -164,7 +164,7 @@ doc/src/sgml/pgprobackup.sgml recovery of PostgreSQL database clusters. It is designed to perform periodic backups of the PostgreSQL instance that enable you to restore the server in case of a failure. - pg_probackup supports PostgreSQL 9.5 or higher. + pg_probackup supports PostgreSQL 10 or higher. @@ -416,7 +416,7 @@ doc/src/sgml/pgprobackup.sgml - On Unix systems, for PostgreSQL 10 or lower, + On Unix systems, for PostgreSQL 10, a backup can be made only by the same OS user that has started the PostgreSQL server. For example, if PostgreSQL server is started by user postgres, the backup command must also be run @@ -821,49 +821,6 @@ pg_probackup add-instance -B backup_dir -D used for connection to the PostgreSQL server: - - For PostgreSQL 9.5: - - -BEGIN; -CREATE ROLE backup WITH LOGIN; -GRANT USAGE ON SCHEMA pg_catalog TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; -COMMIT; - - - For PostgreSQL 9.6: - - -BEGIN; -CREATE ROLE backup WITH LOGIN; -GRANT USAGE ON SCHEMA pg_catalog TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_checkpoint() TO backup; -COMMIT; - - - For PostgreSQL versions 10 — 14: - BEGIN; CREATE ROLE backup WITH LOGIN; @@ -1110,7 +1067,7 @@ archive_command = '"install_dir/pg_probackup" archive Setting up Backup from Standby - For PostgreSQL 9.6 or higher, pg_probackup can take backups from + pg_probackup can take backups from a standby server. This requires the following additional setup: @@ -5013,8 +4970,7 @@ pg_probackup catchup -b catchup_mode Specifies the LSN of the write-ahead log location up to which - recovery will proceed. Can be used only when restoring - a database cluster of major version 10 or higher. + recovery will proceed. @@ -5976,124 +5932,6 @@ pg_probackup catchup -b catchup_mode - - Replica Options - - This section describes the options related to taking a backup - from standby. - - - - Starting from pg_probackup 2.0.24, backups can be - taken from standby without connecting to the master server, - so these options are no longer required. In lower versions, - pg_probackup had to connect to the master to determine - recovery time — the earliest moment for which you can - restore a consistent state of the database cluster. - - - - - - - - - Deprecated. Specifies the name of the database on the master - server to connect to. The connection is used only for managing - the backup process, so you can connect to any existing - database. Can be set in the pg_probackup.conf using the - command. - - - Default: postgres, the default PostgreSQL database - - - - - - - - - Deprecated. Specifies the host name of the system on which the - master server is running. - - - - - - - - - Deprecated. Specifies the TCP port or the local Unix domain - socket file extension on which the master server is listening - for connections. - - - Default: 5432, the PostgreSQL default port - - - - - - - - - Deprecated. User name to connect as. - - - Default: postgres, - the PostgreSQL default user name - - - - - - - - - - Deprecated. Wait time for WAL segment streaming via - replication, in seconds. By default, pg_probackup waits 300 - seconds. You can also define this parameter in the - pg_probackup.conf configuration file using the - command. - - - Default: 300 sec - - - - - - - - - Testing and Debugging Options - - This section describes options useful only in a test or development environment. - - - - - - - - By default, pg_probackup exits with an - error if an attempt is made to perform a partial incremental restore - since this destroys databases not included in the restore set. This - flag allows you to suppress the error and proceed with the partial - incremental restore (e.g., to keep a development database snapshot - up-to-date with a production one). This option can be used with the - command. - - - Never use this flag in a production cluster. - - - - - - @@ -6305,8 +6143,6 @@ xlog-seg-size = 16777216 pgdatabase = backupdb pghost = postgres_host pguser = backup -# Replica parameters -replica-timeout = 5min # Archive parameters archive-timeout = 5min # Logging parameters From 363024d5f83b3381d8bb80d1b1bbd5a907d4d091 Mon Sep 17 00:00:00 2001 From: Daniel Shelepanov Date: Wed, 7 Dec 2022 13:35:25 +0300 Subject: [PATCH 484/525] Documentation hot fix --- doc/pgprobackup.xml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/doc/pgprobackup.xml b/doc/pgprobackup.xml index 3dadc0aad..590f18d62 100644 --- a/doc/pgprobackup.xml +++ b/doc/pgprobackup.xml @@ -821,6 +821,9 @@ pg_probackup add-instance -B backup_dir -D used for connection to the PostgreSQL server: + + For PostgreSQL versions 10 — 14: + BEGIN; CREATE ROLE backup WITH LOGIN; From 72a9605217ab1437d3e8ee36b93f54ebd4ec2913 Mon Sep 17 00:00:00 2001 From: Alexey Savchkov Date: Thu, 23 Nov 2023 11:08:30 +0700 Subject: [PATCH 485/525] Fix a typo --- doc/pgprobackup.xml | 3 --- 1 file changed, 3 deletions(-) diff --git a/doc/pgprobackup.xml b/doc/pgprobackup.xml index 590f18d62..5610cd150 100644 --- a/doc/pgprobackup.xml +++ b/doc/pgprobackup.xml @@ -577,9 +577,6 @@ yumdownloader --source pg_probackup-15 Installation on ALT Linux - - You may need to use yum instead of dnf on older systems in the commands below. - Setup the repository From b52b4d9fcbbb5d414d4c2d50771e5abd7e025cbd Mon Sep 17 00:00:00 2001 From: Daria Lepikhova Date: Fri, 24 Nov 2023 13:56:27 +0700 Subject: [PATCH 486/525] PBCKP-816: Remove version 10 from pg_probackup docs --- README.md | 2 +- doc/pgprobackup.xml | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 1cd518849..6f7ba5df9 100644 --- a/README.md +++ b/README.md @@ -94,7 +94,7 @@ cd && git clone https://github.com/postgrespro/ ### Windows Currently pg_probackup can be build using only MSVC 2013. -Build PostgreSQL using [pgwininstall](https://github.com/postgrespro/pgwininstall) or [PostgreSQL instruction](https://www.postgresql.org/docs/10/install-windows-full.html) with MSVC 2013. +Build PostgreSQL using [pgwininstall](https://github.com/postgrespro/pgwininstall) or [PostgreSQL instruction](https://www.postgresql.org/docs/current/install-windows-full.html) with MSVC 2013. If zlib support is needed, src/tools/msvc/config.pl must contain path to directory with compiled zlib. [Example](https://gist.githubusercontent.com/gsmol/80989f976ce9584824ae3b1bfb00bd87/raw/240032950d4ac4801a79625dd00c8f5d4ed1180c/gistfile1.txt) ```shell diff --git a/doc/pgprobackup.xml b/doc/pgprobackup.xml index 5610cd150..875250566 100644 --- a/doc/pgprobackup.xml +++ b/doc/pgprobackup.xml @@ -164,7 +164,7 @@ doc/src/sgml/pgprobackup.sgml recovery of PostgreSQL database clusters. It is designed to perform periodic backups of the PostgreSQL instance that enable you to restore the server in case of a failure. - pg_probackup supports PostgreSQL 10 or higher. + pg_probackup supports PostgreSQL 11 or higher. @@ -416,7 +416,7 @@ doc/src/sgml/pgprobackup.sgml - On Unix systems, for PostgreSQL 10, + On Unix systems, for PostgreSQL 11, a backup can be made only by the same OS user that has started the PostgreSQL server. For example, if PostgreSQL server is started by user postgres, the backup command must also be run @@ -819,7 +819,7 @@ pg_probackup add-instance -B backup_dir -D to the PostgreSQL server: - For PostgreSQL versions 10 — 14: + For PostgreSQL versions 11 — 14: BEGIN; @@ -1807,7 +1807,7 @@ pg_probackup restore -B backup_dir --instance primary_conninfo parameter; you have to add the password manually or use the --primary-conninfo option, if required. - For PostgreSQL 11 or lower, + For PostgreSQL 11, recovery settings are written into the recovery.conf file. Starting from PostgreSQL 12, pg_probackup writes these settings into From b5fca40c1a8d059304ae0854cc64b69ac6588b08 Mon Sep 17 00:00:00 2001 From: Alexey Savchkov Date: Fri, 24 Nov 2023 16:21:08 +0700 Subject: [PATCH 487/525] PBCKP-816 Store the GPG key in the recommended way --- doc/pgprobackup.xml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/pgprobackup.xml b/doc/pgprobackup.xml index 875250566..31f0c5292 100644 --- a/doc/pgprobackup.xml +++ b/doc/pgprobackup.xml @@ -466,8 +466,8 @@ doc/src/sgml/pgprobackup.sgml Add the pg_probackup repository GPG key sudo apt install gpg wget -wget -O - https://repo.postgrespro.ru/pg_probackup/keys/GPG-KEY-PG_PROBACKUP | \ -sudo apt-key add - +wget -qO - https://repo.postgrespro.ru/pg_probackup/keys/GPG-KEY-PG_PROBACKUP | \ +sudo tee /etc/apt/trusted.gpg.d/pg_probackup.asc From f6f5bfa2916d155f459f5f771c9c6051b95b9c68 Mon Sep 17 00:00:00 2001 From: Alexey Savchkov Date: Fri, 24 Nov 2023 18:09:01 +0700 Subject: [PATCH 488/525] PBCKP-816 Update a link --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 6f7ba5df9..2279b97a4 100644 --- a/README.md +++ b/README.md @@ -72,7 +72,7 @@ Installers are available in release **assets**. [Latests](https://github.com/pos See the [Installation](https://postgrespro.github.io/pg_probackup/#pbk-install) section in the documentation. -Once you have `pg_probackup` installed, complete [the setup](https://postgrespro.github.io/pg_probackup/#pbk-install-and-setup). +Once you have `pg_probackup` installed, complete [the setup](https://postgrespro.github.io/pg_probackup/#pbk-setup). For users of Postgres Pro products, commercial editions of pg_probackup are available for installation from the corresponding Postgres Pro product repository. From 41f5baae72e7615a6bad6325476ec6498327cb9e Mon Sep 17 00:00:00 2001 From: Alexey Savchkov Date: Sun, 26 Nov 2023 21:42:06 +0700 Subject: [PATCH 489/525] Switch to the new GPG key in the Debian installation --- doc/pgprobackup.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/pgprobackup.xml b/doc/pgprobackup.xml index 31f0c5292..1f764a432 100644 --- a/doc/pgprobackup.xml +++ b/doc/pgprobackup.xml @@ -466,7 +466,7 @@ doc/src/sgml/pgprobackup.sgml Add the pg_probackup repository GPG key sudo apt install gpg wget -wget -qO - https://repo.postgrespro.ru/pg_probackup/keys/GPG-KEY-PG_PROBACKUP | \ +wget -qO - https://repo.postgrespro.ru/pg_probackup/keys/GPG-KEY-PG-PROBACKUP | \ sudo tee /etc/apt/trusted.gpg.d/pg_probackup.asc From 1ee26f9912e83353ccf0db0c0055ee3a131b4bde Mon Sep 17 00:00:00 2001 From: Nikolay Zakharov Date: Wed, 13 Dec 2023 15:23:57 +0300 Subject: [PATCH 490/525] Fix bug: terminate program if file is not exists (#839) --- src/utils/file.c | 33 ++++++++++++++++++++++----------- 1 file changed, 22 insertions(+), 11 deletions(-) diff --git a/src/utils/file.c b/src/utils/file.c index e062a2133..d39d3e320 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -1159,24 +1159,35 @@ fio_stat(char const* path, struct stat* st, bool follow_symlink, fio_location lo bool fio_is_same_file(char const* filename1, char const* filename2, bool follow_symlink, fio_location location) { + char *abs_name1 = make_absolute_path(filename1); + char *abs_name2 = make_absolute_path(filename2); + bool result = strcmp(abs_name1, abs_name2) == 0; + #ifndef WIN32 - struct stat stat1, stat2; + if (!result) + { + struct stat stat1, stat2; - if (fio_stat(filename1, &stat1, follow_symlink, location) < 0) - elog(ERROR, "Can't stat file \"%s\": %s", filename1, strerror(errno)); + if (fio_stat(filename1, &stat1, follow_symlink, location) < 0) + { + if (errno == ENOENT) + return false; + elog(ERROR, "Can't stat file \"%s\": %s", filename1, strerror(errno)); + } - if (fio_stat(filename2, &stat2, follow_symlink, location) < 0) - elog(ERROR, "Can't stat file \"%s\": %s", filename2, strerror(errno)); + if (fio_stat(filename2, &stat2, follow_symlink, location) < 0) + { + if (errno == ENOENT) + return false; + elog(ERROR, "Can't stat file \"%s\": %s", filename2, strerror(errno)); + } - return stat1.st_ino == stat2.st_ino && stat1.st_dev == stat2.st_dev; -#else - char *abs_name1 = make_absolute_path(filename1); - char *abs_name2 = make_absolute_path(filename2); - bool result = strcmp(abs_name1, abs_name2) == 0; + result = (stat1.st_ino == stat2.st_ino && stat1.st_dev == stat2.st_dev); + } +#endif free(abs_name2); free(abs_name1); return result; -#endif } /* From d26df12019d6a00e27645d7eb84be71b2b968138 Mon Sep 17 00:00:00 2001 From: Victor Spirin Date: Wed, 6 Dec 2023 20:32:35 +0300 Subject: [PATCH 491/525] PBCKP-819: Fixed the recovery-target-timeline command line parameter. It may contain 'current' or 'latest' keywords. recovery-target can be 'latest' too for compatibility with previous versions --- src/pg_probackup.c | 8 ++-- src/pg_probackup.h | 3 +- src/restore.c | 38 +++++++++++++-- tests/restore_test.py | 109 +++++++++++++++++++++++++++++++++++++++++- 4 files changed, 147 insertions(+), 11 deletions(-) diff --git a/src/pg_probackup.c b/src/pg_probackup.c index 30b4212b4..6653898e4 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -98,7 +98,7 @@ static char *target_time = NULL; static char *target_xid = NULL; static char *target_lsn = NULL; static char *target_inclusive = NULL; -static TimeLineID target_tli; +static char *target_tli_string; /* timeline number, "current" or "latest"*/ static char *target_stop; static bool target_immediate; static char *target_name = NULL; @@ -227,7 +227,7 @@ static ConfigOption cmd_options[] = { 's', 137, "recovery-target-xid", &target_xid, SOURCE_CMD_STRICT }, { 's', 144, "recovery-target-lsn", &target_lsn, SOURCE_CMD_STRICT }, { 's', 138, "recovery-target-inclusive", &target_inclusive, SOURCE_CMD_STRICT }, - { 'u', 139, "recovery-target-timeline", &target_tli, SOURCE_CMD_STRICT }, + { 's', 139, "recovery-target-timeline", &target_tli_string, SOURCE_CMD_STRICT }, { 's', 157, "recovery-target", &target_stop, SOURCE_CMD_STRICT }, { 'f', 'T', "tablespace-mapping", opt_tablespace_map, SOURCE_CMD_STRICT }, { 'f', 155, "external-mapping", opt_externaldir_map, SOURCE_CMD_STRICT }, @@ -285,7 +285,7 @@ static ConfigOption cmd_options[] = { 's', 136, "time", &target_time, SOURCE_CMD_STRICT }, { 's', 137, "xid", &target_xid, SOURCE_CMD_STRICT }, { 's', 138, "inclusive", &target_inclusive, SOURCE_CMD_STRICT }, - { 'u', 139, "timeline", &target_tli, SOURCE_CMD_STRICT }, + { 's', 139, "timeline", &target_tli_string, SOURCE_CMD_STRICT }, { 's', 144, "lsn", &target_lsn, SOURCE_CMD_STRICT }, { 'b', 140, "immediate", &target_immediate, SOURCE_CMD_STRICT }, @@ -739,7 +739,7 @@ main(int argc, char *argv[]) */ recovery_target_options = parseRecoveryTargetOptions(target_time, target_xid, - target_inclusive, target_tli, target_lsn, + target_inclusive, target_tli_string, target_lsn, (target_stop != NULL) ? target_stop : (target_immediate) ? "immediate" : NULL, target_name, target_action); diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 48b9bf884..8246c517d 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -564,6 +564,7 @@ typedef struct pgRecoveryTarget const char *target_stop; const char *target_name; const char *target_action; + const char *target_tli_string; /* timeline number, "current" or "latest" from recovery_target_timeline option*/ } pgRecoveryTarget; /* Options needed for restore and validate commands */ @@ -893,7 +894,7 @@ extern bool satisfy_recovery_target(const pgBackup *backup, const pgRecoveryTarget *rt); extern pgRecoveryTarget *parseRecoveryTargetOptions( const char *target_time, const char *target_xid, - const char *target_inclusive, TimeLineID target_tli, const char* target_lsn, + const char *target_inclusive, const char *target_tli_string, const char* target_lsn, const char *target_stop, const char *target_name, const char *target_action); diff --git a/src/restore.c b/src/restore.c index 5b1585024..707dca0c8 100644 --- a/src/restore.c +++ b/src/restore.c @@ -1332,8 +1332,10 @@ create_recovery_conf(InstanceState *instanceState, time_t backup_id, } /* restore-target='latest' support */ - target_latest = rt->target_stop != NULL && - strcmp(rt->target_stop, "latest") == 0; + target_latest = (rt->target_tli_string != NULL && + strcmp(rt->target_tli_string, "latest") == 0) || + (rt->target_stop != NULL && + strcmp(rt->target_stop, "latest") == 0); target_immediate = rt->target_stop != NULL && strcmp(rt->target_stop, "immediate") == 0; @@ -1359,6 +1361,13 @@ create_recovery_conf(InstanceState *instanceState, time_t backup_id, rt->xid_string || rt->lsn_string || rt->target_name || target_immediate || target_latest || restore_command_provided) params->recovery_settings_mode = PITR_REQUESTED; + /* + * The recovery-target-timeline option can be 'latest' for streaming backups. + * This operation requires a WAL archive for PITR. + */ + if (rt->target_tli && backup->stream && params->recovery_settings_mode != PITR_REQUESTED) + elog(WARNING, "The '--recovery-target-timeline' option applied for STREAM backup. " + "The timeline number will be ignored."); elog(LOG, "----------------------------------------"); @@ -1438,14 +1447,20 @@ print_recovery_settings(InstanceState *instanceState, FILE *fp, pgBackup *backup fio_fprintf(fp, "recovery_target_timeline = '%u'\n", rt->target_tli); else { + if (rt->target_tli_string) + fio_fprintf(fp, "recovery_target_timeline = '%s'\n", rt->target_tli_string); + else if (rt->target_stop && (strcmp(rt->target_stop, "latest") == 0)) + fio_fprintf(fp, "recovery_target_timeline = 'latest'\n"); #if PG_VERSION_NUM >= 120000 - + else + { /* * In PG12 default recovery target timeline was changed to 'latest', which * is extremely risky. Explicitly preserve old behavior of recovering to current * timneline for PG12. */ fio_fprintf(fp, "recovery_target_timeline = 'current'\n"); + } #endif } @@ -1877,7 +1892,7 @@ pgRecoveryTarget * parseRecoveryTargetOptions(const char *target_time, const char *target_xid, const char *target_inclusive, - TimeLineID target_tli, + const char *target_tli_string, const char *target_lsn, const char *target_stop, const char *target_name, @@ -1950,7 +1965,20 @@ parseRecoveryTargetOptions(const char *target_time, target_inclusive); } - rt->target_tli = target_tli; + rt->target_tli_string = target_tli_string; + rt->target_tli = 0; + /* target_tli can contains timeline number, "current" or "latest" */ + if(target_tli_string && strcmp(target_tli_string, "current") != 0 && strcmp(target_tli_string, "latest") != 0) + { + errno = 0; + rt->target_tli = strtoul(target_tli_string, NULL, 10); + if (errno == EINVAL || errno == ERANGE || !rt->target_tli) + { + elog(ERROR, "Invalid value for '--recovery-target-timeline' option '%s'", + target_tli_string); + } + } + if (target_stop) { if ((strcmp(target_stop, "immediate") != 0) diff --git a/tests/restore_test.py b/tests/restore_test.py index 67e99515c..df836aada 100644 --- a/tests/restore_test.py +++ b/tests/restore_test.py @@ -1916,7 +1916,9 @@ def test_restore_target_immediate_archive(self): with open(recovery_conf, 'r') as f: self.assertIn("recovery_target = 'immediate'", f.read()) - # @unittest.skip("skip") + # Skipped, because default recovery_target_timeline is 'current' + # Before PBCKP-598 the --recovery-target=latest' option did not work and this test allways passed + @unittest.skip("skip") def test_restore_target_latest_archive(self): """ make sure that recovery_target 'latest' @@ -3818,3 +3820,108 @@ def test_restore_with_waldir(self): wal_path=os.path.join(node.data_dir, "pg_xlog") self.assertEqual(os.path.islink(wal_path), True) + + # @unittest.skip("skip") + def test_restore_to_latest_timeline(self): + """recovery to latest timeline""" + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + + node.pgbench_init(scale=2) + + before1 = node.table_checksum("pgbench_branches") + backup_id = self.backup_node(backup_dir, 'node', node) + + node.stop() + node.cleanup() + + self.assertIn( + "INFO: Restore of backup {0} completed.".format(backup_id), + self.restore_node( + backup_dir, 'node', node, options=["-j", "4"]), + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd)) + + + + node.slow_start() + pgbench = node.pgbench( + stdout=subprocess.PIPE, stderr=subprocess.STDOUT, + options=['-T', '10', '-c', '2', '--no-vacuum']) + pgbench.wait() + pgbench.stdout.close() + + before2 = node.table_checksum("pgbench_branches") + self.backup_node(backup_dir, 'node', node) + + node.stop() + node.cleanup() + # restore from first backup + restore_result = self.restore_node(backup_dir, 'node', node, + options=[ + "-j", "4", "--recovery-target-timeline=latest", "-i", backup_id] + ) + self.assertIn( + "INFO: Restore of backup {0} completed.".format(backup_id), restore_result, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd)) + + # check recovery_target_timeline option in the recovery_conf + recovery_target_timeline = self.get_recovery_conf(node)["recovery_target_timeline"] + self.assertEqual(recovery_target_timeline, "latest") + # check recovery-target=latest option for compatibility with previous versions + node.cleanup() + restore_result = self.restore_node(backup_dir, 'node', node, + options=[ + "-j", "4", "--recovery-target=latest", "-i", backup_id] + ) + self.assertIn( + "INFO: Restore of backup {0} completed.".format(backup_id), restore_result, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd)) + + # check recovery_target_timeline option in the recovery_conf + recovery_target_timeline = self.get_recovery_conf(node)["recovery_target_timeline"] + self.assertEqual(recovery_target_timeline, "latest") + + # start postgres and promote wal files to latest timeline + node.slow_start() + + # check for the latest updates + after = node.table_checksum("pgbench_branches") + self.assertEqual(before2, after) + + # checking recovery_target_timeline=current is the default option + if self.pg_config_version >= self.version_to_num('12.0'): + node.stop() + node.cleanup() + + # restore from first backup + restore_result = self.restore_node(backup_dir, 'node', node, + options=[ + "-j", "4", "-i", backup_id] + ) + + self.assertIn( + "INFO: Restore of backup {0} completed.".format(backup_id), restore_result, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd)) + + # check recovery_target_timeline option in the recovery_conf + recovery_target_timeline = self.get_recovery_conf(node)["recovery_target_timeline"] + self.assertEqual(recovery_target_timeline, "current") + + # start postgres with current timeline + node.slow_start() + + # check for the current updates + after = node.table_checksum("pgbench_branches") + self.assertEqual(before1, after) From 52e47fe19659f6f5be59fc36acac53aba0e4033d Mon Sep 17 00:00:00 2001 From: Oleg Gurev Date: Sun, 3 Dec 2023 23:59:35 +0300 Subject: [PATCH 492/525] [PBCKP-218] Incremental restore and missing pg_control (issue #304) - pg_control file backup after all other files in backup - pg_control file restore last in full restore - rename pg_control to pg_control.pbk.bak at start of non-full restore - remove pg_control.pbk.bak in the end of successfull non-full restore - use pg_control.pbk.bak after failed non-full restore - added tests for full and incremental restore Tags: backup, catchup, restore --- src/backup.c | 58 +++++++++++--- src/catchup.c | 46 ++++++++++- src/dir.c | 2 +- src/pg_probackup.h | 3 + src/restore.c | 83 +++++++++++++++++++- src/util.c | 20 +++++ tests/helpers/ptrack_helpers.py | 2 +- tests/incr_restore_test.py | 88 ++++++++++++++++++++- tests/restore_test.py | 135 +++++++++++++++++--------------- 9 files changed, 354 insertions(+), 83 deletions(-) diff --git a/src/backup.c b/src/backup.c index 41f035a86..78c3512e9 100644 --- a/src/backup.c +++ b/src/backup.c @@ -122,6 +122,8 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, char pretty_time[20]; char pretty_bytes[20]; + pgFile *src_pg_control_file = NULL; + elog(INFO, "Database backup start"); if(current.external_dir_str) { @@ -424,6 +426,24 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, } + /* + * find pg_control file + * We'll copy it last + */ + { + int control_file_elem_index; + pgFile search_key; + MemSet(&search_key, 0, sizeof(pgFile)); + /* pgFileCompareRelPathWithExternal uses only .rel_path and .external_dir_num for comparision */ + search_key.rel_path = XLOG_CONTROL_FILE; + search_key.external_dir_num = 0; + control_file_elem_index = parray_bsearch_index(backup_files_list, &search_key, pgFileCompareRelPathWithExternal); + + if (control_file_elem_index < 0) + elog(ERROR, "File \"%s\" not found in PGDATA %s", XLOG_CONTROL_FILE, current.database_dir); + src_pg_control_file = (pgFile *)parray_get(backup_files_list, control_file_elem_index); + } + /* setup thread locks */ pfilearray_clear_locks(backup_files_list); @@ -483,6 +503,26 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, backup_isok = false; } + /* copy pg_control at very end */ + if (backup_isok) + { + + elog(progress ? INFO : LOG, "Progress: Backup file \"%s\"", + src_pg_control_file->rel_path); + + char from_fullpath[MAXPGPATH]; + char to_fullpath[MAXPGPATH]; + join_path_components(from_fullpath, instance_config.pgdata, src_pg_control_file->rel_path); + join_path_components(to_fullpath, current.database_dir, src_pg_control_file->rel_path); + + backup_non_data_file(src_pg_control_file, NULL, + from_fullpath, to_fullpath, + current.backup_mode, current.parent_backup, + true); + } + + + time(&end_time); pretty_time_interval(difftime(end_time, start_time), pretty_time, lengthof(pretty_time)); @@ -510,17 +550,8 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, { pgFile *pg_control = NULL; - for (i = 0; i < parray_num(backup_files_list); i++) - { - pgFile *tmp_file = (pgFile *) parray_get(backup_files_list, i); + pg_control = src_pg_control_file; - if (tmp_file->external_dir_num == 0 && - (strcmp(tmp_file->rel_path, XLOG_CONTROL_FILE) == 0)) - { - pg_control = tmp_file; - break; - } - } if (!pg_control) elog(ERROR, "Failed to find file \"%s\" in backup filelist.", @@ -2076,6 +2107,13 @@ backup_files(void *arg) /* We have already copied all directories */ if (S_ISDIR(file->mode)) continue; + /* + * Don't copy the pg_control file now, we'll copy it last + */ + if(file->external_dir_num == 0 && pg_strcasecmp(file->rel_path, XLOG_CONTROL_FILE) == 0) + { + continue; + } if (arguments->thread_num == 1) { diff --git a/src/catchup.c b/src/catchup.c index 427542dda..00752b194 100644 --- a/src/catchup.c +++ b/src/catchup.c @@ -171,10 +171,13 @@ catchup_preflight_checks(PGNodeInfo *source_node_info, PGconn *source_conn, if (current.backup_mode != BACKUP_MODE_FULL) { - dest_id = get_system_identifier(dest_pgdata, FIO_LOCAL_HOST, false); + ControlFileData dst_control; + get_control_file_or_back_file(dest_pgdata, FIO_LOCAL_HOST, &dst_control); + dest_id = dst_control.system_identifier; + if (source_conn_id != dest_id) - elog(ERROR, "Database identifiers mismatch: we connected to DB id %lu, but in \"%s\" we found id %lu", - source_conn_id, dest_pgdata, dest_id); + elog(ERROR, "Database identifiers mismatch: we connected to DB id %llu, but in \"%s\" we found id %llu", + (long long)source_conn_id, dest_pgdata, (long long)dest_id); } } @@ -640,6 +643,9 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, ssize_t transfered_walfiles_bytes = 0; char pretty_source_bytes[20]; + char dest_pg_control_fullpath[MAXPGPATH]; + char dest_pg_control_bak_fullpath[MAXPGPATH]; + source_conn = catchup_init_state(&source_node_info, source_pgdata, dest_pgdata); catchup_preflight_checks(&source_node_info, source_conn, source_pgdata, dest_pgdata); @@ -935,6 +941,9 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, Assert(file->external_dir_num == 0); if (pg_strcasecmp(file->name, RELMAPPER_FILENAME) == 0) redundant = true; + /* global/pg_control.pbk.bak is always keeped, because it's needed for restart failed incremental restore */ + if (pg_strcasecmp(file->rel_path, XLOG_CONTROL_BAK_FILE) == 0) + redundant = false; /* if file does not exists in destination list, then we can safely unlink it */ if (redundant) @@ -966,6 +975,28 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, if (dest_filelist) parray_qsort(dest_filelist, pgFileCompareRelPathWithExternal); + join_path_components(dest_pg_control_fullpath, dest_pgdata, XLOG_CONTROL_FILE); + join_path_components(dest_pg_control_bak_fullpath, dest_pgdata, XLOG_CONTROL_BAK_FILE); + /* + * rename (if it exist) dest control file before restoring + * if it doesn't exist, that mean, that we already restoring in a previously failed + * pgdata, where XLOG_CONTROL_BAK_FILE exist + */ + if (current.backup_mode != BACKUP_MODE_FULL && !dry_run) + { + if (!fio_access(dest_pg_control_fullpath, F_OK, FIO_LOCAL_HOST)) + { + pgFile *dst_control; + dst_control = pgFileNew(dest_pg_control_bak_fullpath, XLOG_CONTROL_BAK_FILE, + true,0, FIO_BACKUP_HOST); + + if(!fio_access(dest_pg_control_bak_fullpath, F_OK, FIO_LOCAL_HOST)) + fio_delete(dst_control->mode, dest_pg_control_bak_fullpath, FIO_LOCAL_HOST); + fio_rename(dest_pg_control_fullpath, dest_pg_control_bak_fullpath, FIO_LOCAL_HOST); + pgFileFree(dst_control); + } + } + /* run copy threads */ elog(INFO, "Start transferring data files"); time(&start_time); @@ -985,6 +1016,15 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, copy_pgcontrol_file(from_fullpath, FIO_DB_HOST, to_fullpath, FIO_LOCAL_HOST, source_pg_control_file); transfered_datafiles_bytes += source_pg_control_file->size; + + /* Now backup control file can be deled */ + if (current.backup_mode != BACKUP_MODE_FULL && !fio_access(dest_pg_control_bak_fullpath, F_OK, FIO_LOCAL_HOST)){ + pgFile *dst_control; + dst_control = pgFileNew(dest_pg_control_bak_fullpath, XLOG_CONTROL_BAK_FILE, + true,0, FIO_BACKUP_HOST); + fio_delete(dst_control->mode, dest_pg_control_bak_fullpath, FIO_LOCAL_HOST); + pgFileFree(dst_control); + } } if (!catchup_isok && !dry_run) diff --git a/src/dir.c b/src/dir.c index 353ed2d43..4b1bc2816 100644 --- a/src/dir.c +++ b/src/dir.c @@ -1867,4 +1867,4 @@ set_forkname(pgFile *file) file->segno = segno; file->is_datafile = file->forkName == none; return true; -} +} \ No newline at end of file diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 8246c517d..61dd2ce0e 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -91,6 +91,7 @@ extern const char *PROGRAM_EMAIL; #define DATABASE_MAP "database_map" #define HEADER_MAP "page_header_map" #define HEADER_MAP_TMP "page_header_map_tmp" +#define XLOG_CONTROL_BAK_FILE XLOG_CONTROL_FILE".pbk.bak" /* default replication slot names */ #define DEFAULT_TEMP_SLOT_NAME "pg_probackup_slot"; @@ -1209,6 +1210,8 @@ extern uint32 get_xlog_seg_size(const char *pgdata_path); extern void get_redo(const char *pgdata_path, fio_location pgdata_location, RedoParams *redo); extern void set_min_recovery_point(pgFile *file, const char *backup_path, XLogRecPtr stop_backup_lsn); +extern void get_control_file_or_back_file(const char *pgdata_path, fio_location location, + ControlFileData *control); extern void copy_pgcontrol_file(const char *from_fullpath, fio_location from_location, const char *to_fullpath, fio_location to_location, pgFile *file); diff --git a/src/restore.c b/src/restore.c index 707dca0c8..535faebfb 100644 --- a/src/restore.c +++ b/src/restore.c @@ -39,6 +39,8 @@ typedef struct int ret; } restore_files_arg; +static bool control_downloaded = false; +static ControlFileData instance_control; static void print_recovery_settings(InstanceState *instanceState, FILE *fp, pgBackup *backup, @@ -501,6 +503,9 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg if (redo.checksum_version == 0) elog(ERROR, "Incremental restore in 'lsn' mode require " "data_checksums to be enabled in destination data directory"); + if (!control_downloaded) + get_control_file_or_back_file(instance_config.pgdata, FIO_DB_HOST, + &instance_control); timelines = read_timeline_history(instanceState->instance_wal_subdir_path, redo.tli, false); @@ -719,6 +724,10 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain, parray *pgdata_files = NULL; parray *dest_files = NULL; parray *external_dirs = NULL; + pgFile *dest_pg_control_file = NULL; + char dest_pg_control_fullpath[MAXPGPATH]; + char dest_pg_control_bak_fullpath[MAXPGPATH]; + /* arrays with meta info for multi threaded backup */ pthread_t *threads; restore_files_arg *threads_args; @@ -922,6 +931,11 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain, pg_strcasecmp(file->name, RELMAPPER_FILENAME) == 0) redundant = true; + /* global/pg_control.pbk.bak are always keeped, because it's needed for restart failed incremental restore */ + if (file->external_dir_num == 0 && + pg_strcasecmp(file->rel_path, XLOG_CONTROL_BAK_FILE) == 0) + redundant = false; + /* do not delete the useful internal directories */ if (S_ISDIR(file->mode) && !redundant) continue; @@ -974,6 +988,42 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain, dest_bytes = dest_backup->pgdata_bytes; pretty_size(dest_bytes, pretty_dest_bytes, lengthof(pretty_dest_bytes)); + /* + * [Issue #313] + * find pg_control file (in already sorted earlier dest_files, see parray_qsort(backup->files...)) + * and exclude it from list for future special processing + */ + { + int control_file_elem_index; + pgFile search_key; + MemSet(&search_key, 0, sizeof(pgFile)); + /* pgFileCompareRelPathWithExternal uses only .rel_path and .external_dir_num for comparision */ + search_key.rel_path = XLOG_CONTROL_FILE; + search_key.external_dir_num = 0; + control_file_elem_index = parray_bsearch_index(dest_files, &search_key, pgFileCompareRelPathWithExternal); + + if (control_file_elem_index < 0) + elog(ERROR, "File \"%s\" not found in backup %s", XLOG_CONTROL_FILE, base36enc(dest_backup->start_time)); + dest_pg_control_file = (pgFile *) parray_get(dest_files, control_file_elem_index); + parray_remove(dest_files, control_file_elem_index); + + join_path_components(dest_pg_control_fullpath, pgdata_path, XLOG_CONTROL_FILE); + join_path_components(dest_pg_control_bak_fullpath, pgdata_path, XLOG_CONTROL_BAK_FILE); + /* + * rename (if it exist) dest control file before restoring + * if it doesn't exist, that mean, that we already restoring in a previously failed + * pgdata, where XLOG_CONTROL_BAK_FILE exist + */ + if (params->incremental_mode != INCR_NONE) + { + if (fio_access(dest_pg_control_fullpath,F_OK,FIO_DB_HOST) == 0){ + if (fio_rename(dest_pg_control_fullpath, dest_pg_control_bak_fullpath, FIO_DB_HOST) < 0) + elog(WARNING, "Cannot rename file \"%s\" to \"%s\": %s", + dest_pg_control_fullpath, dest_pg_control_bak_fullpath, strerror(errno)); + } + } + } + elog(INFO, "Start restoring backup files. PGDATA size: %s", pretty_dest_bytes); time(&start_time); thread_interrupted = false; @@ -1014,6 +1064,32 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain, total_bytes += threads_args[i].restored_bytes; } + /* [Issue #313] copy pg_control at very end */ + if (restore_isok) + { + FILE *out = NULL; + elog(progress ? INFO : LOG, "Progress: Restore file \"%s\"", + dest_pg_control_file->rel_path); + + out = fio_fopen(dest_pg_control_fullpath, PG_BINARY_R "+", FIO_DB_HOST); + + total_bytes += restore_non_data_file(parent_chain, + dest_backup, + dest_pg_control_file, + out, + dest_pg_control_fullpath, false); + fio_fclose(out); + /* Now backup control file can be deleted */ + if (params->incremental_mode != INCR_NONE) + { + pgFile *dst_control; + dst_control = pgFileNew(dest_pg_control_bak_fullpath, XLOG_CONTROL_BAK_FILE, + true,0, FIO_BACKUP_HOST); + fio_delete(dst_control->mode, dest_pg_control_bak_fullpath, FIO_LOCAL_HOST); + pgFileFree(dst_control); + } + } + time(&end_time); pretty_time_interval(difftime(end_time, start_time), pretty_time, lengthof(pretty_time)); @@ -1098,6 +1174,8 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain, parray_free(pgdata_files); } + if(dest_pg_control_file) pgFileFree(dest_pg_control_file); + for (i = parray_num(parent_chain) - 1; i >= 0; i--) { pgBackup *backup = (pgBackup *) parray_get(parent_chain, i); @@ -2230,7 +2308,10 @@ check_incremental_compatibility(const char *pgdata, uint64 system_identifier, */ elog(LOG, "Trying to read pg_control file in destination directory"); - system_id_pgdata = get_system_identifier(pgdata, FIO_DB_HOST, false); + get_control_file_or_back_file(pgdata, FIO_DB_HOST, &instance_control); + control_downloaded = true; + + system_id_pgdata = instance_control.system_identifier; if (system_id_pgdata == instance_config.system_identifier) system_id_match = true; diff --git a/src/util.c b/src/util.c index 1407f03cc..3c0a33453 100644 --- a/src/util.c +++ b/src/util.c @@ -190,6 +190,26 @@ get_current_timeline_from_control(const char *pgdata_path, fio_location location return ControlFile.checkPointCopy.ThisTimeLineID; } +void +get_control_file_or_back_file(const char *pgdata_path, fio_location location, ControlFileData *control) +{ + char *buffer; + size_t size; + + /* First fetch file... */ + buffer = slurpFile(pgdata_path, XLOG_CONTROL_FILE, &size, true, location); + + if (!buffer || size == 0){ + /* Error read XLOG_CONTROL_FILE or file is truncated, trying read backup */ + buffer = slurpFile(pgdata_path, XLOG_CONTROL_BAK_FILE, &size, true, location); + if (!buffer) + elog(ERROR, "Could not read %s and %s files\n", XLOG_CONTROL_FILE, XLOG_CONTROL_BAK_FILE); /* Maybe it should be PANIC? */ + } + digestControlFile(control, buffer, size); + pg_free(buffer); +} + + /* * Get last check point record ptr from pg_tonrol. */ diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index 6b665097c..da8ece15e 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -1783,7 +1783,7 @@ def pgdata_content(self, pgdata, ignore_ptrack=True, exclude_dirs=None): 'ptrack_control', 'ptrack_init', 'pg_control', 'probackup_recovery.conf', 'recovery.signal', 'standby.signal', 'ptrack.map', 'ptrack.map.mmap', - 'ptrack.map.tmp' + 'ptrack.map.tmp', 'recovery.done','backup_label.old' ] if exclude_dirs: diff --git a/tests/incr_restore_test.py b/tests/incr_restore_test.py index f17ee95d1..eea0e313b 100644 --- a/tests/incr_restore_test.py +++ b/tests/incr_restore_test.py @@ -9,8 +9,9 @@ import hashlib import shutil import json -from testgres import QueryException - +from testgres import QueryException, StartNodeException +import stat +from stat import S_ISDIR class IncrRestoreTest(ProbackupTest, unittest.TestCase): @@ -2426,3 +2427,86 @@ def test_incremental_pg_filenode_map(self): 'select 1') # check that MinRecPoint and BackupStartLsn are correctly used in case of --incrementa-lsn + + # @unittest.skip("skip") + def test_incr_restore_issue_313(self): + """ + Check that failed incremental restore can be restarted + """ + self._check_gdb_flag_or_skip_test + node = self.make_simple_node('node', + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.pgbench_init(scale = 50) + + full_backup_id = self.backup_node(backup_dir, 'node', node, backup_type='full') + + pgbench = node.pgbench( + stdout=subprocess.PIPE, stderr=subprocess.STDOUT, + options=['-T', '10', '-c', '1', '--no-vacuum']) + pgbench.wait() + pgbench.stdout.close() + + last_backup_id = self.backup_node(backup_dir, 'node', node, backup_type='delta') + + pgdata = self.pgdata_content(node.data_dir) + node.cleanup() + + self.restore_node(backup_dir, 'node', node, backup_id=full_backup_id) + + count = 0 + filelist = self.get_backup_filelist(backup_dir, 'node', last_backup_id) + for file in filelist: + # count only nondata files + if int(filelist[file]['is_datafile']) == 0 and \ + not stat.S_ISDIR(int(filelist[file]['mode'])) and \ + not filelist[file]['size'] == '0' and \ + file != 'database_map': + count += 1 + + gdb = self.restore_node(backup_dir, 'node', node, gdb=True, + backup_id=last_backup_id, options=['--progress', '--incremental-mode=checksum']) + gdb.verbose = False + gdb.set_breakpoint('restore_non_data_file') + gdb.run_until_break() + gdb.continue_execution_until_break(count - 1) + gdb.quit() + + bak_file = os.path.join(node.data_dir, 'global', 'pg_control.pbk.bak') + self.assertTrue( + os.path.exists(bak_file), + "pg_control bak File should not exist: {0}".format(bak_file)) + + try: + node.slow_start() + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because backup is not fully restored") + except StartNodeException as e: + self.assertIn( + 'Cannot start node', + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + with open(os.path.join(node.logs_dir, 'postgresql.log'), 'r') as f: + if self.pg_config_version >= 120000: + self.assertIn( + "PANIC: could not read file \"global/pg_control\"", + f.read()) + else: + self.assertIn( + "PANIC: could not read from control file", + f.read()) + self.restore_node(backup_dir, 'node', node, + backup_id=last_backup_id, options=['--progress', '--incremental-mode=checksum']) + node.slow_start() + self.compare_pgdata(pgdata, self.pgdata_content(node.data_dir)) diff --git a/tests/restore_test.py b/tests/restore_test.py index df836aada..b6664252e 100644 --- a/tests/restore_test.py +++ b/tests/restore_test.py @@ -3,11 +3,11 @@ from .helpers.ptrack_helpers import ProbackupTest, ProbackupException import subprocess import sys -from time import sleep from datetime import datetime, timedelta, timezone import hashlib import shutil import json +import stat from shutil import copyfile from testgres import QueryException, StartNodeException from stat import S_ISDIR @@ -3709,66 +3709,6 @@ def test_concurrent_restore(self): self.compare_pgdata(pgdata1, pgdata2) self.compare_pgdata(pgdata2, pgdata3) - # skip this test until https://github.com/postgrespro/pg_probackup/pull/399 - @unittest.skip("skip") - def test_restore_issue_313(self): - """ - Check that partially restored PostgreSQL instance cannot be started - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # FULL backup - backup_id = self.backup_node(backup_dir, 'node', node) - node.cleanup() - - count = 0 - filelist = self.get_backup_filelist(backup_dir, 'node', backup_id) - for file in filelist: - # count only nondata files - if int(filelist[file]['is_datafile']) == 0 and int(filelist[file]['size']) > 0: - count += 1 - - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - node_restored.cleanup() - self.restore_node(backup_dir, 'node', node_restored) - - gdb = self.restore_node(backup_dir, 'node', node, gdb=True, options=['--progress']) - gdb.verbose = False - gdb.set_breakpoint('restore_non_data_file') - gdb.run_until_break() - gdb.continue_execution_until_break(count - 2) - gdb.quit() - - # emulate the user or HA taking care of PG configuration - for fname in os.listdir(node_restored.data_dir): - if fname.endswith('.conf'): - os.rename( - os.path.join(node_restored.data_dir, fname), - os.path.join(node.data_dir, fname)) - - try: - node.slow_start() - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because backup is not fully restored") - except StartNodeException as e: - self.assertIn( - 'Cannot start node', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - # @unittest.skip("skip") def test_restore_with_waldir(self): """recovery using tablespace-mapping option and page backup""" @@ -3833,8 +3773,6 @@ def test_restore_to_latest_timeline(self): self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) node.slow_start() - - node.pgbench_init(scale=2) before1 = node.table_checksum("pgbench_branches") @@ -3850,8 +3788,6 @@ def test_restore_to_latest_timeline(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(self.output), self.cmd)) - - node.slow_start() pgbench = node.pgbench( stdout=subprocess.PIPE, stderr=subprocess.STDOUT, @@ -3925,3 +3861,72 @@ def test_restore_to_latest_timeline(self): # check for the current updates after = node.table_checksum("pgbench_branches") self.assertEqual(before1, after) + + def test_restore_issue_313(self): + """ + Check that partially restored PostgreSQL instance cannot be started + """ + self._check_gdb_flag_or_skip_test + node = self.make_simple_node('node', + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + # FULL backup + backup_id = self.backup_node(backup_dir, 'node', node) + node.cleanup() + + count = 0 + filelist = self.get_backup_filelist(backup_dir, 'node', backup_id) + for file in filelist: + # count only nondata files + if int(filelist[file]['is_datafile']) == 0 and \ + not stat.S_ISDIR(int(filelist[file]['mode'])) and \ + not filelist[file]['size'] == '0' and \ + file != 'database_map': + count += 1 + + node_restored = self.make_simple_node('node_restored') + node_restored.cleanup() + self.restore_node(backup_dir, 'node', node_restored) + + gdb = self.restore_node(backup_dir, 'node', node, gdb=True, options=['--progress']) + gdb.verbose = False + gdb.set_breakpoint('restore_non_data_file') + gdb.run_until_break() + gdb.continue_execution_until_break(count - 1) + gdb.quit() + + # emulate the user or HA taking care of PG configuration + for fname in os.listdir(node_restored.data_dir): + if fname.endswith('.conf'): + os.rename( + os.path.join(node_restored.data_dir, fname), + os.path.join(node.data_dir, fname)) + + try: + node.slow_start() + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because backup is not fully restored") + except StartNodeException as e: + self.assertIn( + 'Cannot start node', + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + with open(os.path.join(node.logs_dir, 'postgresql.log'), 'r') as f: + if self.pg_config_version >= 120000: + self.assertIn( + "PANIC: could not read file \"global/pg_control\"", + f.read()) + else: + self.assertIn( + "PANIC: could not read from control file", + f.read()) From 343ed029a4277e0217da692dffe1a6748c80eaaa Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Fri, 23 Dec 2022 09:20:42 +0300 Subject: [PATCH 493/525] test_recovery_target_lsn_backup_victim - looks like it should pass and not fail --- tests/false_positive_test.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/false_positive_test.py b/tests/false_positive_test.py index fbb785c60..ea82cb18f 100644 --- a/tests/false_positive_test.py +++ b/tests/false_positive_test.py @@ -203,13 +203,16 @@ def test_recovery_target_time_backup_victim(self): backup_dir, 'node', options=['--recovery-target-time={0}'.format(target_time)]) - @unittest.expectedFailure + # @unittest.expectedFailure # @unittest.skip("skip") def test_recovery_target_lsn_backup_victim(self): """ Check that for validation to recovery target probackup chooses valid backup https://github.com/postgrespro/pg_probackup/issues/104 + + @y.sokolov: looks like this test should pass. + So I commented 'expectedFailure' """ backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( From 46c8a3351dafaa48b6ed05efffd26b641d0ac48b Mon Sep 17 00:00:00 2001 From: Alexey Savchkov Date: Wed, 6 Dec 2023 11:59:39 +0300 Subject: [PATCH 494/525] Rewrite How-To into a Quick Start --- doc/pgprobackup.xml | 623 +++++++++++++++++++++++--------------------- 1 file changed, 332 insertions(+), 291 deletions(-) diff --git a/doc/pgprobackup.xml b/doc/pgprobackup.xml index 1f764a432..466f474f5 100644 --- a/doc/pgprobackup.xml +++ b/doc/pgprobackup.xml @@ -171,6 +171,9 @@ doc/src/sgml/pgprobackup.sgml Overview + + Quick Start + Installation @@ -453,7 +456,267 @@ doc/src/sgml/pgprobackup.sgml - + + Quick Start + + To quickly get started with pg_probackup, complete the steps below. This will set up FULL and DELTA backups in the remote mode and demonstrate some + basic pg_probackup operations. In the following, these terms are used: + + + + + backupPostgreSQL + role used to connect to the PostgreSQL + cluster. + + + + + backupdb — database used used to connect to the + PostgreSQL cluster. + + + + + backup_host — host with the backup catalog. + + + + + backup — user on + backup_host running all pg_probackup + operations. + + + + + /mnt/backups — directory on + backup_host where the backup catalog is stored. + + + + + postgres_host — host with the + PostgreSQL cluster. + + + + + postgres — user on + postgres_host under which + PostgreSQL cluster processes are running. + + + + + /var/lib/postgresql/16/main — + PostgreSQL data directory on + postgres_host. + + + + + Steps to perform: + + + Install <application>pg_probackup</application> on both <literal>backup_host</literal> and <literal>postgres_host</literal>. + + + <link linkend="pbk-setup-ssh">Set up an SSH connection</link> from <literal>backup_host</literal> to <literal>postgres_host</literal>. + + + <link linkend="pbk-configuring-the-database-cluster">Configure</link> your database cluster for <link linkend="pbk-setting-up-stream-backups">STREAM backups</link>. + + + Initialize the backup catalog: + +backup_user@backup_host:~$ pg_probackup-16 init -B /mnt/backups +INFO: Backup catalog '/mnt/backups' successfully initialized + + + + Add a backup instance called <literal>mydb</literal> to the backup catalog: + +backup_user@backup_host:~$ pg_probackup-16 add-instance \ + -B /mnt/backups \ + -D /var/lib/postgresql/16/main \ + --instance=mydb \ + --remote-host=postgres_host \ + --remote-user=postgres +INFO: Instance 'mydb' successfully initialized + + + + Make a FULL backup: + +backup_user@backup_host:~$ pg_probackup-16 backup \ + -B /mnt/backups \ + -b FULL \ + --instance=mydb \ + --stream \ + --remote-host=postgres_host \ + --remote-user=postgres \ + -U backup \ + -d backupdb +INFO: Backup start, pg_probackup version: 2.5.13, instance: mydb, backup ID: S6OBFN, backup mode: FULL, wal mode: STREAM, remote: true, compress-algorithm: none, compress-level: 1 +INFO: Database backup start +INFO: wait for pg_backup_start() +INFO: Wait for WAL segment /mnt/backups/backups/mydb/S6OBFN/database/pg_wal/000000010000000000000002 to be streamed +INFO: PGDATA size: 29MB +INFO: Current Start LSN: 0/2000060, TLI: 1 +INFO: Start transferring data files +INFO: Data files are transferred, time elapsed: 1s +INFO: wait for pg_stop_backup() +INFO: pg_stop backup() successfully executed +INFO: stop_lsn: 0/2003CB0 +INFO: Getting the Recovery Time from WAL +INFO: Syncing backup files to disk +INFO: Backup files are synced, time elapsed: 0 +INFO: Validating backup S6OBFN +INFO: Backup S6OBFN data files are valid +INFO: Backup S6OBFN resident size: 45MB +INFO: Backup S6OBFN completed + + + + List the backups of the instance: + +backup_user@backup_host:~$ pg_probackup-16 show -B /mnt/backups --instance=mydb +================================================================================================================================ + Instance Version ID Recovery Time Mode WAL Mode TLI Time Data WAL Zratio Start LSN Stop LSN Status +================================================================================================================================ + mydb 16 S6OBFN 2024-01-03 06:59:49+00 FULL STREAM 1/0 10s 29MB 16MB 1.00 0/2000060 0/2003CB0 OK + + + + Make an incremental backup in the DELTA mode: + +backup_user@backup_host:~$ pg_probackup-16 backup \ + -B /mnt/backups \ + -b delta \ + --instance=mydb \ + --stream \ + --remote-host=postgres_host \ + --remote-user=postgres \ + -U backup \ + -d backupdb +INFO: Backup start, pg_probackup version: 2.5.13, instance: mydb, backup ID: S6OBLG, backup mode: DELTA, wal mode: STREAM, remote: true, compress-algorithm: none, compress-level: 1 +INFO: Database backup start +INFO: wait for pg_backup_start() +INFO: Parent backup: S6OBFN +INFO: Wait for WAL segment /mnt/backups/backups/mydb/S6OBLG/database/pg_wal/000000010000000000000004 to be streamed +INFO: PGDATA size: 29MB +INFO: Current Start LSN: 0/4000028, TLI: 1 +INFO: Parent Start LSN: 0/2000060, TLI: 1 +INFO: Start transferring data files +INFO: Data files are transferred, time elapsed: 1s +INFO: wait for pg_stop_backup() +INFO: pg_stop backup() successfully executed +INFO: stop_lsn: 0/4000168 +INFO: Getting the Recovery Time from WAL +INFO: Syncing backup files to disk +INFO: Backup files are synced, time elapsed: 0 +INFO: Validating backup S6OBLG +INFO: Backup S6OBLG data files are valid +INFO: Backup S6OBLG resident size: 32MB +INFO: Backup S6OBLG completed + + + + Add or modify some parameters in the <application>pg_probackup</application> + configuration file, so that you do not have to specify them each time on the command line: + +backup_user@backup_host:~$ pg_probackup-16 set-config \ + -B /mnt/backups \ + --instance=mydb \ + --remote-host=postgres_host \ + --remote-user=postgres \ + -U backup \ + -d backupdb + + + + Check the configuration of the instance: + +backup_user@backup_host:~$ pg_probackup-16 show-config -B /mnt/backups --instance=mydb +# Backup instance information +pgdata = /var/lib/postgresql/16/main +system-identifier = 7319761899046784808 +xlog-seg-size = 16777216 +# Connection parameters +pgdatabase = backupdb +pghost = postgres_host +pguser = backup +# Replica parameters +replica-timeout = 5min +# Archive parameters +archive-timeout = 5min +# Logging parameters +log-level-console = INFO +log-level-file = OFF +log-format-console = PLAIN +log-format-file = PLAIN +log-filename = pg_probackup.log +log-rotation-size = 0TB +log-rotation-age = 0d +# Retention parameters +retention-redundancy = 0 +retention-window = 0 +wal-depth = 0 +# Compression parameters +compress-algorithm = none +compress-level = 1 +# Remote access parameters +remote-proto = ssh +remote-host = postgres_host +remote-user = postgres + + + Note that the parameters not modified via set-config retain their default values. + + + + Make another incremental backup in the DELTA mode, omitting + the parameters stored in the configuration file earlier: + +backup_user@backup_host:~$ pg_probackup-16 backup -B /mnt/backups --instance=mydb -b delta --stream +INFO: Backup start, pg_probackup version: 2.5.13, instance: mydb, backup ID: S6OBQO, backup mode: DELTA, wal mode: STREAM, remote: true, compress-algorithm: none, compress-level: 1 +INFO: Database backup start +INFO: wait for pg_backup_start() +INFO: Parent backup: S6OBLG +INFO: Wait for WAL segment /mnt/backups/backups/mydb/S6OBQO/database/pg_wal/000000010000000000000006 to be streamed +INFO: PGDATA size: 29MB +INFO: Current Start LSN: 0/6000028, TLI: 1 +INFO: Parent Start LSN: 0/4000028, TLI: 1 +INFO: Start transferring data files +INFO: Data files are transferred, time elapsed: 1s +INFO: wait for pg_stop_backup() +INFO: pg_stop backup() successfully executed +INFO: stop_lsn: 0/6000168 +INFO: Getting the Recovery Time from WAL +INFO: Syncing backup files to disk +INFO: Backup files are synced, time elapsed: 0 +INFO: Validating backup S6OBQO +INFO: Backup S6OBQO data files are valid +INFO: Backup S6OBQO resident size: 32MB +INFO: Backup S6OBQO completed + + + + List the backups of the instance again: + +backup_user@backup_host:~$ pg_probackup-16 show -B /mnt/backups --instance=mydb +================================================================================================================================== + Instance Version ID Recovery Time Mode WAL Mode TLI Time Data WAL Zratio Start LSN Stop LSN Status +================================================================================================================================== + mydb 16 S6OBQO 2024-01-03 07:06:26+00 DELTA STREAM 1/1 6s 111kB 32MB 1.00 0/6000028 0/6000168 OK + mydb 16 S6OBLG 2024-01-03 07:03:18+00 DELTA STREAM 1/1 10s 127kB 32MB 1.00 0/4000028 0/4000168 OK + mydb 16 S6OBFN 2024-01-03 06:59:49+00 FULL STREAM 1/0 10s 29MB 16MB 1.00 0/2000060 0/2003CB0 OK + + + + + Installation @@ -744,7 +1007,7 @@ pg_probackup init -B backup_dir pg_probackup add-instance -B backup_dir -D data_dir --instance instance_name [remote_options] - where: + Where: @@ -812,11 +1075,19 @@ pg_probackup add-instance -B backup_dir -D backup role is used as an example. + + For security reasons, it is recommended to run the configuration SQL queries below + in a separate database. + + +postgres=# CREATE DATABASE backupdb; +postgres=# \c backupdb + To perform a , the following permissions for role backup are required only in the database used for - connection to the PostgreSQL server: + connection to the PostgreSQL server. For PostgreSQL versions 11 — 14: @@ -908,7 +1179,18 @@ COMMIT; - Grant the REPLICATION privilege to the backup role: + If the backup role does not exist, create it with + the REPLICATION privilege when + Configuring the + Database Cluster: + + +CREATE ROLE backup WITH LOGIN REPLICATION; + + + + + If the backup role already exists, grant it with the REPLICATION privilege: ALTER ROLE backup WITH REPLICATION; @@ -1203,7 +1485,7 @@ GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; If you are going to use pg_probackup in remote mode via SSH, complete the following steps: - + Install pg_probackup on both systems: @@ -1213,7 +1495,7 @@ GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; - For communication between the hosts set up the passwordless + For communication between the hosts set up a passwordless SSH connection between backup user on backup_host and postgres user on @@ -1222,54 +1504,64 @@ GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; [backup@backup_host] ssh-copy-id postgres@db_host + + Where: + + + + + backup_host is the system with + backup catalog. + + + + + db_host is the system with PostgreSQL + cluster. + + + + + backup is the OS user on + backup_host used to run pg_probackup. + + + + + postgres is the user on + postgres_host under which + PostgreSQL cluster processes are running. + For PostgreSQL 11 or higher a + more secure approach can be used thanks to + allow-group-access feature. + + + If you are going to rely on continuous - WAL archiving, set up passwordless SSH - connection between postgres user on - db_host and backup + WAL archiving, set up a passwordless SSH + connection between the postgres user on + db_host and the backup user on backup_host: [postgres@db_host] ssh-copy-id backup@backup_host - - - where: - - - - - backup_host is the system with - backup catalog. - - - - - db_host is the system with PostgreSQL - cluster. - - - - - backup is the OS user on - backup_host used to run pg_probackup. - - - postgres is the OS user on - db_host used to start the PostgreSQL - cluster. For PostgreSQL 11 or higher a - more secure approach can be used thanks to - allow-group-access - feature. + Make sure pg_probackup on postgres_host + can be located when a connection via SSH is made. For example, for Bash, you can + modify PATH in ~/.bashrc of the backup user. + Alternatively, for pg_probackup commands, specify the path to the directory + containing the pg_probackup binary on postgres_host via + the --remote-path option. - + pg_probackup in the remote mode via SSH works as follows: @@ -1763,7 +2055,7 @@ pg_probackup validate -B backup_dir --instance backup_dir --instance instance_name -i backup_id - where: + Where: @@ -5935,257 +6227,6 @@ pg_probackup catchup -b catchup_mode - - How-To - - All examples below assume the remote mode of operations via - SSH. If you are planning to run backup and - restore operation locally, skip the - Setup passwordless SSH connection step - and omit all options. - - - Examples are based on Ubuntu 18.04, - PostgreSQL 11, and pg_probackup - 2.2.0. - - - - - backupPostgreSQL - role used for connection to PostgreSQL - cluster. - - - - - backupdb — database used for connection - to PostgreSQL cluster. - - - - - backup_host — host with backup catalog. - - - - - backupman — user on - backup_host running all pg_probackup - operations. - - - - - /mnt/backups — directory on - backup_host where backup catalog is stored. - - - - - postgres_host — host with PostgreSQL - cluster. - - - - - postgres — user on - postgres_host that has started the PostgreSQL cluster. - - - - - /var/lib/postgresql/11/mainPostgreSQL - data directory on postgres_host. - - - - - Minimal Setup - - This scenario illustrates setting up standalone FULL and DELTA backups. - - - - Set up passwordless SSH connection from - <literal>backup_host</literal> to - <literal>postgres_host</literal>: - -[backupman@backup_host] ssh-copy-id postgres@postgres_host - - - - Configure your <productname>PostgreSQL</productname> cluster. - - For security purposes, it is recommended to use a separate - database for backup operations. - - -postgres=# -CREATE DATABASE backupdb; - - - Connect to the backupdb database, create the - probackup role, and grant the following - permissions to this role: - - -backupdb=# -BEGIN; -CREATE ROLE backup WITH LOGIN REPLICATION; -GRANT USAGE ON SCHEMA pg_catalog TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_checkpoint() TO backup; -COMMIT; - - - - Initialize the backup catalog: - -[backupman@backup_host]$ pg_probackup-11 init -B /mnt/backups -INFO: Backup catalog '/mnt/backups' successfully inited - - - - Add instance <literal>pg-11</literal> to the backup catalog: - -[backupman@backup_host]$ pg_probackup-11 add-instance -B /mnt/backups --instance pg-11 --remote-host=postgres_host --remote-user=postgres -D /var/lib/postgresql/11/main -INFO: Instance 'node' successfully inited - - - - Take a FULL backup: - -[backupman@backup_host] pg_probackup-11 backup -B /mnt/backups --instance pg-11 -b FULL --stream --remote-host=postgres_host --remote-user=postgres -U backup -d backupdb -INFO: Backup start, pg_probackup version: 2.2.0, instance: node, backup ID: PZ7YK2, backup mode: FULL, wal mode: STREAM, remote: true, compress-algorithm: none, compress-level: 1 -INFO: Start transferring data files -INFO: Data files are transferred -INFO: wait for pg_stop_backup() -INFO: pg_stop backup() successfully executed -INFO: Validating backup PZ7YK2 -INFO: Backup PZ7YK2 data files are valid -INFO: Backup PZ7YK2 resident size: 196MB -INFO: Backup PZ7YK2 completed - - - - Let's take a look at the backup catalog: - -[backupman@backup_host] pg_probackup-11 show -B /mnt/backups --instance pg-11 - -BACKUP INSTANCE 'pg-11' -================================================================================================================================== - Instance Version ID Recovery Time Mode WAL Mode TLI Time Data WAL Zratio Start LSN Stop LSN Status -================================================================================================================================== - node 11 PZ7YK2 2019-10-11 19:45:45+03 FULL STREAM 1/0 11s 180MB 16MB 1.00 0/3C000028 0/3C000198 OK - - - - Take an incremental backup in the DELTA mode: - -[backupman@backup_host] pg_probackup-11 backup -B /mnt/backups --instance pg-11 -b delta --stream --remote-host=postgres_host --remote-user=postgres -U backup -d backupdb -INFO: Backup start, pg_probackup version: 2.2.0, instance: node, backup ID: PZ7YMP, backup mode: DELTA, wal mode: STREAM, remote: true, compress-algorithm: none, compress-level: 1 -INFO: Parent backup: PZ7YK2 -INFO: Start transferring data files -INFO: Data files are transferred -INFO: wait for pg_stop_backup() -INFO: pg_stop backup() successfully executed -INFO: Validating backup PZ7YMP -INFO: Backup PZ7YMP data files are valid -INFO: Backup PZ7YMP resident size: 32MB -INFO: Backup PZ7YMP completed - - - - Let's add some parameters to <application>pg_probackup</application> - configuration file, so that you can omit them from the command line: - -[backupman@backup_host] pg_probackup-11 set-config -B /mnt/backups --instance pg-11 --remote-host=postgres_host --remote-user=postgres -U backup -d backupdb - - - - Take another incremental backup in the DELTA mode, omitting - some of the previous parameters: - -[backupman@backup_host] pg_probackup-11 backup -B /mnt/backups --instance pg-11 -b delta --stream -INFO: Backup start, pg_probackup version: 2.2.0, instance: node, backup ID: PZ7YR5, backup mode: DELTA, wal mode: STREAM, remote: true, compress-algorithm: none, compress-level: 1 -INFO: Parent backup: PZ7YMP -INFO: Start transferring data files -INFO: Data files are transferred -INFO: wait for pg_stop_backup() -INFO: pg_stop backup() successfully executed -INFO: Validating backup PZ7YR5 -INFO: Backup PZ7YR5 data files are valid -INFO: Backup PZ7YR5 resident size: 32MB -INFO: Backup PZ7YR5 completed - - - - Let's take a look at the instance configuration: - -[backupman@backup_host] pg_probackup-11 show-config -B /mnt/backups --instance pg-11 - -# Backup instance information -pgdata = /var/lib/postgresql/11/main -system-identifier = 6746586934060931492 -xlog-seg-size = 16777216 -# Connection parameters -pgdatabase = backupdb -pghost = postgres_host -pguser = backup -# Archive parameters -archive-timeout = 5min -# Logging parameters -log-level-console = INFO -log-level-file = OFF -log-format-console = PLAIN -log-format-file = PLAIN -log-filename = pg_probackup.log -log-rotation-size = 0 -log-rotation-age = 0 -# Retention parameters -retention-redundancy = 0 -retention-window = 0 -wal-depth = 0 -# Compression parameters -compress-algorithm = none -compress-level = 1 -# Remote access parameters -remote-proto = ssh -remote-host = postgres_host - - - Note that we are getting the default values for other options - that were not overwritten by the set-config command. - - - - Let's take a look at the backup catalog: - -[backupman@backup_host] pg_probackup-11 show -B /mnt/backups --instance pg-11 - -==================================================================================================================================== - Instance Version ID Recovery Time Mode WAL Mode TLI Time Data WAL Zratio Start LSN Stop LSN Status -==================================================================================================================================== - node 11 PZ7YR5 2019-10-11 19:49:56+03 DELTA STREAM 1/1 10s 112kB 32MB 1.00 0/41000028 0/41000160 OK - node 11 PZ7YMP 2019-10-11 19:47:16+03 DELTA STREAM 1/1 10s 376kB 32MB 1.00 0/3E000028 0/3F0000B8 OK - node 11 PZ7YK2 2019-10-11 19:45:45+03 FULL STREAM 1/0 11s 180MB 16MB 1.00 0/3C000028 0/3C000198 OK - - - - - - Versioning From 36b9761fa1d92c277c5a6630c43defb3e6a4af58 Mon Sep 17 00:00:00 2001 From: Alexey Savchkov Date: Fri, 8 Dec 2023 11:44:18 +0700 Subject: [PATCH 495/525] PBCKP-817 Update documentation examples to 2.7.0 --- doc/pgprobackup.xml | 41 +++++++++++++++++++++-------------------- 1 file changed, 21 insertions(+), 20 deletions(-) diff --git a/doc/pgprobackup.xml b/doc/pgprobackup.xml index 466f474f5..fb2a8f599 100644 --- a/doc/pgprobackup.xml +++ b/doc/pgprobackup.xml @@ -483,7 +483,7 @@ doc/src/sgml/pgprobackup.sgml - backup — user on + backup_user — user on backup_host running all pg_probackup operations. @@ -1472,12 +1472,12 @@ GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; Configuring the Remote Mode - pg_probackup supports the remote mode that allows to perform - backup, restore and WAL archiving operations remotely. In this - mode, the backup catalog is stored on a local system, while - PostgreSQL instance to backup and/or to restore is located on a - remote system. Currently the only supported remote protocol is - SSH. + pg_probackup supports the remote mode that + allows to perform backup, restore and WAL archiving operations remotely. + In this mode, the backup catalog is stored on a local system, while + PostgreSQL instance to backup and/or to restore + is located on a remote system. Currently the only supported remote + protocol is SSH. Set up SSH @@ -1490,19 +1490,19 @@ GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; Install pg_probackup on both systems: backup_host and - db_host. + postgres_host. For communication between the hosts set up a passwordless - SSH connection between backup user on - backup_host and + SSH connection between the backup_user user on + backup_host and the postgres user on - db_host: + postgres_host: -[backup@backup_host] ssh-copy-id postgres@db_host +[backup_user@backup_host] ssh-copy-id postgres@postgres_host Where: @@ -1516,13 +1516,13 @@ GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; - db_host is the system with PostgreSQL + postgres_host is the system with the PostgreSQL cluster. - backup is the OS user on + backup_user is the OS user on backup_host used to run pg_probackup. @@ -1544,18 +1544,19 @@ GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; continuous WAL archiving, set up a passwordless SSH connection between the postgres user on - db_host and the backup + postgres_host and the backup user on backup_host: -[postgres@db_host] ssh-copy-id backup@backup_host +[postgres@postgres_host] ssh-copy-id backup_user@backup_host Make sure pg_probackup on postgres_host can be located when a connection via SSH is made. For example, for Bash, you can - modify PATH in ~/.bashrc of the backup user. + modify PATH in ~/.bashrc of the postgres user + (above the line in bashrc which exits the script for non-interactive shells). Alternatively, for pg_probackup commands, specify the path to the directory containing the pg_probackup binary on postgres_host via the --remote-path option. @@ -1611,10 +1612,10 @@ GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; The main process is usually started on backup_host and connects to - db_host, but in case of + postgres_host, but in case of archive-push and archive-get commands the main process - is started on db_host and connects to + is started on postgres_host and connects to backup_host. @@ -1635,7 +1636,7 @@ GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; Compression is always done on - db_host, while decompression is always done on + postgres_host, while decompression is always done on backup_host. From 9587b75b1fbde7231388af2ff0e7c6d86722e55b Mon Sep 17 00:00:00 2001 From: Elena Indrupskaya Date: Wed, 13 Dec 2023 06:33:15 +0300 Subject: [PATCH 496/525] A few documentation edits --- doc/pgprobackup.xml | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/doc/pgprobackup.xml b/doc/pgprobackup.xml index fb2a8f599..547b76b0a 100644 --- a/doc/pgprobackup.xml +++ b/doc/pgprobackup.xml @@ -2434,7 +2434,8 @@ pg_probackup restore -B backup_dir --instance In addition to SSH connection, pg_probackup uses a regular connection to the database to manage the remote operation. - See for details of how to set up + See the section Configuring + the Database Cluster for details of how to set up a database connection. @@ -3941,7 +3942,7 @@ pg_probackup delete -B backup_dir --instance To prepare for cloning/synchronizing a PostgreSQL instance, - set up the source instance server as follows: + set up the source server as follows: @@ -3964,7 +3965,7 @@ pg_probackup delete -B backup_dir --instance Before cloning/synchronizing a PostgreSQL instance, ensure that the source - instance server is running and accepting connections. To clone/sync a PostgreSQL instance, + server is running and accepting connections. To clone/sync a PostgreSQL instance, on the server with the destination instance, you can run the command as follows: @@ -4007,7 +4008,7 @@ pg_probackup catchup -b catchup_mode --source-pgdata= By specifying the option, you can set STREAM WAL delivery mode of copying, which will include all the necessary WAL files by streaming them from - the instance server via replication protocol. + the server via replication protocol. You can use connection_options to specify @@ -4998,7 +4999,7 @@ pg_probackup catchup -b catchup_mode Copies the instance in STREAM WAL delivery mode, including all the necessary WAL files by streaming them from - the instance server via replication protocol. + the server via replication protocol. From c9439b65e26b3bd8210e2903c23f23976ec4e284 Mon Sep 17 00:00:00 2001 From: Elena Indrupskaya Date: Mon, 18 Dec 2023 15:37:22 +0300 Subject: [PATCH 497/525] [DOC] Fix syntax errorn in doc step elements --- doc/pgprobackup.xml | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/doc/pgprobackup.xml b/doc/pgprobackup.xml index 547b76b0a..7e0787c8e 100644 --- a/doc/pgprobackup.xml +++ b/doc/pgprobackup.xml @@ -519,23 +519,23 @@ doc/src/sgml/pgprobackup.sgml Steps to perform: - Install <application>pg_probackup</application> on both <literal>backup_host</literal> and <literal>postgres_host</literal>. + Install pg_probackup on both backup_host and postgres_host. - <link linkend="pbk-setup-ssh">Set up an SSH connection</link> from <literal>backup_host</literal> to <literal>postgres_host</literal>. + Set up an SSH connection from backup_host to postgres_host. - <link linkend="pbk-configuring-the-database-cluster">Configure</link> your database cluster for <link linkend="pbk-setting-up-stream-backups">STREAM backups</link>. + Configure your database cluster for STREAM backups. - Initialize the backup catalog: + Initialize the backup catalog: backup_user@backup_host:~$ pg_probackup-16 init -B /mnt/backups INFO: Backup catalog '/mnt/backups' successfully initialized - Add a backup instance called <literal>mydb</literal> to the backup catalog: + Add a backup instance called mydb to the backup catalog: backup_user@backup_host:~$ pg_probackup-16 add-instance \ -B /mnt/backups \ @@ -547,7 +547,7 @@ INFO: Instance 'mydb' successfully initialized - Make a FULL backup: + Make a FULL backup: backup_user@backup_host:~$ pg_probackup-16 backup \ -B /mnt/backups \ @@ -579,7 +579,7 @@ INFO: Backup S6OBFN completed - List the backups of the instance: + List the backups of the instance: backup_user@backup_host:~$ pg_probackup-16 show -B /mnt/backups --instance=mydb ================================================================================================================================ @@ -589,7 +589,7 @@ backup_user@backup_host:~$ pg_probackup-16 show -B /mnt/backups --instance=mydb - Make an incremental backup in the DELTA mode: + Make an incremental backup in the DELTA mode: backup_user@backup_host:~$ pg_probackup-16 backup \ -B /mnt/backups \ @@ -623,8 +623,8 @@ INFO: Backup S6OBLG completed - Add or modify some parameters in the <application>pg_probackup</application> - configuration file, so that you do not have to specify them each time on the command line: + Add or modify some parameters in the pg_probackup + configuration file, so that you do not have to specify them each time on the command line: backup_user@backup_host:~$ pg_probackup-16 set-config \ -B /mnt/backups \ @@ -636,7 +636,7 @@ backup_user@backup_host:~$ pg_probackup-16 set-config \ - Check the configuration of the instance: + Check the configuration of the instance: backup_user@backup_host:~$ pg_probackup-16 show-config -B /mnt/backups --instance=mydb # Backup instance information @@ -676,8 +676,8 @@ remote-user = postgres - Make another incremental backup in the DELTA mode, omitting - the parameters stored in the configuration file earlier: + Make another incremental backup in the DELTA mode, omitting + the parameters stored in the configuration file earlier: backup_user@backup_host:~$ pg_probackup-16 backup -B /mnt/backups --instance=mydb -b delta --stream INFO: Backup start, pg_probackup version: 2.5.13, instance: mydb, backup ID: S6OBQO, backup mode: DELTA, wal mode: STREAM, remote: true, compress-algorithm: none, compress-level: 1 @@ -703,7 +703,7 @@ INFO: Backup S6OBQO completed - List the backups of the instance again: + List the backups of the instance again: backup_user@backup_host:~$ pg_probackup-16 show -B /mnt/backups --instance=mydb ================================================================================================================================== From f8c46efb5a700167bcb31cfcb3b41cfe50687ce6 Mon Sep 17 00:00:00 2001 From: Elena Indrupskaya Date: Tue, 26 Dec 2023 14:52:48 +0300 Subject: [PATCH 498/525] [PBCKP-865] Fix minor but grammar --- doc/pgprobackup.xml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/doc/pgprobackup.xml b/doc/pgprobackup.xml index 7e0787c8e..14f14040e 100644 --- a/doc/pgprobackup.xml +++ b/doc/pgprobackup.xml @@ -472,7 +472,7 @@ doc/src/sgml/pgprobackup.sgml - backupdb — database used used to connect to the + backupdb — database used to connect to the PostgreSQL cluster. @@ -1473,7 +1473,7 @@ GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; Configuring the Remote Mode pg_probackup supports the remote mode that - allows to perform backup, restore and WAL archiving operations remotely. + allows you to perform backup, restore and WAL archiving operations remotely. In this mode, the backup catalog is stored on a local system, while PostgreSQL instance to backup and/or to restore is located on a remote system. Currently the only supported remote @@ -1556,7 +1556,7 @@ GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; Make sure pg_probackup on postgres_host can be located when a connection via SSH is made. For example, for Bash, you can modify PATH in ~/.bashrc of the postgres user - (above the line in bashrc which exits the script for non-interactive shells). + (above the line in bashrc that exits the script for non-interactive shells). Alternatively, for pg_probackup commands, specify the path to the directory containing the pg_probackup binary on postgres_host via the --remote-path option. @@ -2188,7 +2188,7 @@ pg_probackup restore -B backup_dir --instance LSN — read the pg_control in the - data directory to obtain redo LSN and redo TLI, which allows + data directory to obtain redo LSN and redo TLI, which allows you to determine a point in history(shiftpoint), where data directory state shifted from target backup chain history. If shiftpoint is not within reach of backup chain history, then restore is aborted. @@ -2417,7 +2417,7 @@ pg_probackup restore -B backup_dir --instance Using <application>pg_probackup</application> in the Remote Mode - pg_probackup supports the remote mode that allows to perform + pg_probackup supports the remote mode that allows you to perform backup and restore operations remotely via SSH. In this mode, the backup catalog is stored on a local system, while PostgreSQL instance to be backed @@ -3782,7 +3782,7 @@ pg_probackup merge -B backup_dir --instance pg_probackup in the remote mode. From b4035fd23d0fde7b517d2738c2506eb842f245d1 Mon Sep 17 00:00:00 2001 From: Alexey Savchkov Date: Wed, 27 Dec 2023 05:50:42 +0300 Subject: [PATCH 499/525] PBCKP-817 Add an example of the restore command --- doc/pgprobackup.xml | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/doc/pgprobackup.xml b/doc/pgprobackup.xml index 14f14040e..7d5ab5ccf 100644 --- a/doc/pgprobackup.xml +++ b/doc/pgprobackup.xml @@ -565,7 +565,7 @@ INFO: Wait for WAL segment /mnt/backups/backups/mydb/S6OBFN/database/pg_wal/0000 INFO: PGDATA size: 29MB INFO: Current Start LSN: 0/2000060, TLI: 1 INFO: Start transferring data files -INFO: Data files are transferred, time elapsed: 1s +INFO: Data files are transferred, time elapsed: 0 INFO: wait for pg_stop_backup() INFO: pg_stop backup() successfully executed INFO: stop_lsn: 0/2003CB0 @@ -689,7 +689,7 @@ INFO: PGDATA size: 29MB INFO: Current Start LSN: 0/6000028, TLI: 1 INFO: Parent Start LSN: 0/4000028, TLI: 1 INFO: Start transferring data files -INFO: Data files are transferred, time elapsed: 1s +INFO: Data files are transferred, time elapsed: 0 INFO: wait for pg_stop_backup() INFO: pg_stop backup() successfully executed INFO: stop_lsn: 0/6000168 @@ -714,6 +714,27 @@ backup_user@backup_host:~$ pg_probackup-16 show -B /mnt/backups --instance=mydb mydb 16 S6OBFN 2024-01-03 06:59:49+00 FULL STREAM 1/0 10s 29MB 16MB 1.00 0/2000060 0/2003CB0 OK + + Restore the data from the latest available backup to an arbitrary location: + +backup_user@backup_host:~$ pg_probackup-16 restore -B /mnt/backups -D /var/lib/postgresql/16/staging --instance=mydb +INFO: Validating parents for backup S6OBQO +INFO: Validating backup S6OBFN +INFO: Backup S6OBFN data files are valid +INFO: Validating backup S6OBLG +INFO: Backup S6OBLG data files are valid +INFO: Validating backup S6OBQO +INFO: Backup S6OBQO data files are valid +INFO: Backup S6OBQO WAL segments are valid +INFO: Backup S6OBQO is valid. +INFO: Restoring the database from backup at 2024-01-03 07:06:24+00 +INFO: Start restoring backup files. PGDATA size: 61MB +INFO: Backup files are restored. Transfered bytes: 61MB, time elapsed: 1s +INFO: Restore incremental ratio (less is better): 100% (61MB/61MB) +INFO: Syncing restored files to disk +INFO: Restored backup files are synced, time elapsed: 0 +INFO: Restore of backup S6OBQO completed. + From 9a91ea78dd7a5694bc2688616a3028df259209cc Mon Sep 17 00:00:00 2001 From: Alexey Savchkov Date: Wed, 3 Jan 2024 17:12:40 +0700 Subject: [PATCH 500/525] Add a link to the installation section in the Quick start --- doc/pgprobackup.xml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/doc/pgprobackup.xml b/doc/pgprobackup.xml index 7d5ab5ccf..26dfde6e1 100644 --- a/doc/pgprobackup.xml +++ b/doc/pgprobackup.xml @@ -519,7 +519,7 @@ doc/src/sgml/pgprobackup.sgml Steps to perform: - Install pg_probackup on both backup_host and postgres_host. + Install pg_probackup on both backup_host and postgres_host. Set up an SSH connection from backup_host to postgres_host. @@ -734,6 +734,7 @@ INFO: Restore incremental ratio (less is better): 100% (61MB/61MB) INFO: Syncing restored files to disk INFO: Restored backup files are synced, time elapsed: 0 INFO: Restore of backup S6OBQO completed. + From 58752c5a755e29ed690ce6dea9e96eb7419d6fe4 Mon Sep 17 00:00:00 2001 From: oleg gurev Date: Fri, 26 Jan 2024 11:02:37 +0300 Subject: [PATCH 501/525] [PBCKP-198] Added tablespaces into show command output - Solve Issue #431 - Output all tablespaces in backup in JSON output - Output all tablespaces in backup in PLAIN output --- src/show.c | 54 ++++++++++++++++++++++++++++++++++++++++++++++ tests/show_test.py | 36 +++++++++++++++++++++++++++++++ 2 files changed, 90 insertions(+) diff --git a/src/show.c b/src/show.c index 86a122698..810262df6 100644 --- a/src/show.c +++ b/src/show.c @@ -67,6 +67,7 @@ static void show_archive_plain(const char *instance_name, uint32 xlog_seg_size, parray *timelines_list, bool show_name); static void show_archive_json(const char *instance_name, uint32 xlog_seg_size, parray *tli_list); +static bool backup_has_tablespace_map(pgBackup *backup); static PQExpBufferData show_buf; static bool first_instance = true; @@ -479,6 +480,32 @@ print_backup_json_object(PQExpBuffer buf, pgBackup *backup) appendPQExpBuffer(buf, "%u", backup->content_crc); } + /* print tablespaces list */ + if (backup_has_tablespace_map(backup)) + { + parray *links = parray_new(); + + json_add_key(buf, "tablespace_map", json_level); + json_add(buf, JT_BEGIN_ARRAY, &json_level); + + read_tablespace_map(links, backup->root_dir); + parray_qsort(links, pgFileCompareLinked); + + for (size_t i = 0; i < parray_num(links); i++){ + pgFile *link = (pgFile *) parray_get(links, i); + if (i) + appendPQExpBufferChar(buf, ','); + json_add(buf, JT_BEGIN_OBJECT, &json_level); + json_add_value(buf, "oid", link->name, json_level, true); + json_add_value(buf, "path", link->linked, json_level, true); + json_add(buf, JT_END_OBJECT, &json_level); + } + /* End of tablespaces */ + json_add(buf, JT_END_ARRAY, &json_level); + parray_walk(links, pgFileFree); + parray_free(links); + } + json_add(buf, JT_END_OBJECT, &json_level); } @@ -521,7 +548,27 @@ show_backup(InstanceState *instanceState, time_t requested_backup_id) } if (show_format == SHOW_PLAIN) + { pgBackupWriteControl(stdout, backup, false); + + /* print tablespaces list */ + if (backup_has_tablespace_map(backup)) + { + parray *links = parray_new(); + + fio_fprintf(stdout, "\ntablespace_map = '"); + + read_tablespace_map(links, backup->root_dir); + parray_qsort(links, pgFileCompareLinked); + + for (size_t i = 0; i < parray_num(links); i++){ + pgFile *link = (pgFile *) parray_get(links, i); + fio_fprintf(stdout, "%s %s%s", link->name, link->linked, (i < parray_num(links) - 1) ? "; " : "'\n"); + } + parray_walk(links, pgFileFree); + parray_free(links); + } + } else elog(ERROR, "Invalid show format %d", (int) show_format); @@ -1174,3 +1221,10 @@ show_archive_json(const char *instance_name, uint32 xlog_seg_size, first_instance = false; } + +static bool backup_has_tablespace_map(pgBackup *backup) +{ + char map_path[MAXPGPATH]; + join_path_components(map_path, backup->database_dir, PG_TABLESPACE_MAP_FILE); + return fileExists(map_path, FIO_BACKUP_HOST); +} diff --git a/tests/show_test.py b/tests/show_test.py index c4b96499d..27b6fab96 100644 --- a/tests/show_test.py +++ b/tests/show_test.py @@ -507,3 +507,39 @@ def test_color_with_no_terminal(self): '[0m', e.message, '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) + + # @unittest.skip("skip") + def test_tablespace_print_issue_431(self): + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # Create tablespace + tblspc_path = os.path.join(node.base_dir, "tblspc") + os.makedirs(tblspc_path) + with node.connect("postgres") as con: + con.connection.autocommit = True + con.execute("CREATE TABLESPACE tblspc LOCATION '%s'" % tblspc_path) + con.connection.autocommit = False + con.execute("CREATE TABLE test (id int) TABLESPACE tblspc") + con.execute("INSERT INTO test VALUES (1)") + con.commit() + + full_backup_id = self.backup_node(backup_dir, 'node', node) + self.assertIn("OK", self.show_pb(backup_dir,'node', as_text=True)) + # Check that tablespace info exists. JSON + self.assertIn("tablespace_map", self.show_pb(backup_dir, 'node', as_text=True)) + self.assertIn("oid", self.show_pb(backup_dir, 'node', as_text=True)) + self.assertIn("path", self.show_pb(backup_dir, 'node', as_text=True)) + self.assertIn(tblspc_path, self.show_pb(backup_dir, 'node', as_text=True)) + # Check that tablespace info exists. PLAIN + self.assertIn("tablespace_map", self.show_pb(backup_dir, 'node', backup_id=full_backup_id, as_text=True, as_json=False)) + self.assertIn(tblspc_path, self.show_pb(backup_dir, 'node', backup_id=full_backup_id, as_text=True, as_json=False)) + # Check that tablespace info NOT exists if backup id not provided. PLAIN + self.assertNotIn("tablespace_map", self.show_pb(backup_dir, 'node', as_text=True, as_json=False)) From 09236c6583a89bce6231abdd38d1b4e3b5062d0b Mon Sep 17 00:00:00 2001 From: Oleg Gurev Date: Tue, 19 Dec 2023 14:56:09 +0300 Subject: [PATCH 502/525] [PBCKP-874] Addeded "Logging options" section to help.c for - add-instance - archive-push - archive-get - catchup commands --- src/help.c | 96 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 96 insertions(+) diff --git a/src/help.c b/src/help.c index 954ba6416..b3ba02160 100644 --- a/src/help.c +++ b/src/help.c @@ -983,6 +983,30 @@ help_add_instance(void) printf(_(" --remote-user=username user name for ssh connection (default: current user)\n")); printf(_(" --ssh-options=ssh_options additional ssh options (default: none)\n")); printf(_(" (example: --ssh-options='-c cipher_spec -F configfile')\n\n")); + + printf(_("\n Logging options:\n")); + printf(_(" --log-level-console=log-level-console\n")); + printf(_(" level for console logging (default: info)\n")); + printf(_(" available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n")); + printf(_(" --log-level-file=log-level-file\n")); + printf(_(" level for file logging (default: off)\n")); + printf(_(" available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n")); + printf(_(" --log-format-file=log-format-file\n")); + printf(_(" defines the format of log files (default: plain)\n")); + printf(_(" available options: 'plain', 'json'\n")); + printf(_(" --log-filename=log-filename\n")); + printf(_(" filename for file logging (default: 'pg_probackup.log')\n")); + printf(_(" support strftime format (example: pg_probackup-%%Y-%%m-%%d_%%H%%M%%S.log)\n")); + printf(_(" --error-log-filename=error-log-filename\n")); + printf(_(" filename for error logging (default: none)\n")); + printf(_(" --log-directory=log-directory\n")); + printf(_(" directory for file logging (default: BACKUP_PATH/log)\n")); + printf(_(" --log-rotation-size=log-rotation-size\n")); + printf(_(" rotate logfile if its size exceeds this value; 0 disables; (default: 0)\n")); + printf(_(" available units: 'kB', 'MB', 'GB', 'TB' (default: kB)\n")); + printf(_(" --log-rotation-age=log-rotation-age\n")); + printf(_(" rotate logfile if its age exceeds this value; 0 disables; (default: 0)\n")); + printf(_(" available units: 'ms', 's', 'min', 'h', 'd' (default: min)\n")); } static void @@ -1030,6 +1054,30 @@ help_archive_push(void) printf(_(" --compress-level=compress-level\n")); printf(_(" level of compression [0-9] (default: 1)\n")); + printf(_("\n Logging options:\n")); + printf(_(" --log-level-console=log-level-console\n")); + printf(_(" level for console logging (default: info)\n")); + printf(_(" available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n")); + printf(_(" --log-level-file=log-level-file\n")); + printf(_(" level for file logging (default: off)\n")); + printf(_(" available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n")); + printf(_(" --log-format-file=log-format-file\n")); + printf(_(" defines the format of log files (default: plain)\n")); + printf(_(" available options: 'plain', 'json'\n")); + printf(_(" --log-filename=log-filename\n")); + printf(_(" filename for file logging (default: 'pg_probackup.log')\n")); + printf(_(" support strftime format (example: pg_probackup-%%Y-%%m-%%d_%%H%%M%%S.log)\n")); + printf(_(" --error-log-filename=error-log-filename\n")); + printf(_(" filename for error logging (default: none)\n")); + printf(_(" --log-directory=log-directory\n")); + printf(_(" directory for file logging (default: BACKUP_PATH/log)\n")); + printf(_(" --log-rotation-size=log-rotation-size\n")); + printf(_(" rotate logfile if its size exceeds this value; 0 disables; (default: 0)\n")); + printf(_(" available units: 'kB', 'MB', 'GB', 'TB' (default: kB)\n")); + printf(_(" --log-rotation-age=log-rotation-age\n")); + printf(_(" rotate logfile if its age exceeds this value; 0 disables; (default: 0)\n")); + printf(_(" available units: 'ms', 's', 'min', 'h', 'd' (default: min)\n")); + printf(_("\n Remote options:\n")); printf(_(" --remote-proto=protocol remote protocol to use\n")); printf(_(" available options: 'ssh', 'none' (default: ssh)\n")); @@ -1065,6 +1113,30 @@ help_archive_get(void) printf(_(" --prefetch-dir=path location of the store area for prefetched WAL files\n")); printf(_(" --no-validate-wal skip validation of prefetched WAL file before using it\n")); + printf(_("\n Logging options:\n")); + printf(_(" --log-level-console=log-level-console\n")); + printf(_(" level for console logging (default: info)\n")); + printf(_(" available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n")); + printf(_(" --log-level-file=log-level-file\n")); + printf(_(" level for file logging (default: off)\n")); + printf(_(" available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n")); + printf(_(" --log-format-file=log-format-file\n")); + printf(_(" defines the format of log files (default: plain)\n")); + printf(_(" available options: 'plain', 'json'\n")); + printf(_(" --log-filename=log-filename\n")); + printf(_(" filename for file logging (default: 'pg_probackup.log')\n")); + printf(_(" support strftime format (example: pg_probackup-%%Y-%%m-%%d_%%H%%M%%S.log)\n")); + printf(_(" --error-log-filename=error-log-filename\n")); + printf(_(" filename for error logging (default: none)\n")); + printf(_(" --log-directory=log-directory\n")); + printf(_(" directory for file logging (default: BACKUP_PATH/log)\n")); + printf(_(" --log-rotation-size=log-rotation-size\n")); + printf(_(" rotate logfile if its size exceeds this value; 0 disables; (default: 0)\n")); + printf(_(" available units: 'kB', 'MB', 'GB', 'TB' (default: kB)\n")); + printf(_(" --log-rotation-age=log-rotation-age\n")); + printf(_(" rotate logfile if its age exceeds this value; 0 disables; (default: 0)\n")); + printf(_(" available units: 'ms', 's', 'min', 'h', 'd' (default: min)\n")); + printf(_("\n Remote options:\n")); printf(_(" --remote-proto=protocol remote protocol to use\n")); printf(_(" available options: 'ssh', 'none' (default: ssh)\n")); @@ -1131,6 +1203,30 @@ help_catchup(void) printf(_(" -w, --no-password never prompt for password\n")); printf(_(" -W, --password force password prompt\n\n")); + printf(_("\n Logging options:\n")); + printf(_(" --log-level-console=log-level-console\n")); + printf(_(" level for console logging (default: info)\n")); + printf(_(" available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n")); + printf(_(" --log-level-file=log-level-file\n")); + printf(_(" level for file logging (default: off)\n")); + printf(_(" available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n")); + printf(_(" --log-format-file=log-format-file\n")); + printf(_(" defines the format of log files (default: plain)\n")); + printf(_(" available options: 'plain', 'json'\n")); + printf(_(" --log-filename=log-filename\n")); + printf(_(" filename for file logging (default: 'pg_probackup.log')\n")); + printf(_(" support strftime format (example: pg_probackup-%%Y-%%m-%%d_%%H%%M%%S.log)\n")); + printf(_(" --error-log-filename=error-log-filename\n")); + printf(_(" filename for error logging (default: none)\n")); + printf(_(" --log-directory=log-directory\n")); + printf(_(" directory for file logging (default: BACKUP_PATH/log)\n")); + printf(_(" --log-rotation-size=log-rotation-size\n")); + printf(_(" rotate logfile if its size exceeds this value; 0 disables; (default: 0)\n")); + printf(_(" available units: 'kB', 'MB', 'GB', 'TB' (default: kB)\n")); + printf(_(" --log-rotation-age=log-rotation-age\n")); + printf(_(" rotate logfile if its age exceeds this value; 0 disables; (default: 0)\n")); + printf(_(" available units: 'ms', 's', 'min', 'h', 'd' (default: min)\n")); + printf(_("\n Remote options:\n")); printf(_(" --remote-proto=protocol remote protocol to use\n")); printf(_(" available options: 'ssh', 'none' (default: ssh)\n")); From 43318d696be4dcec37eeb433ea4086ec2127a24b Mon Sep 17 00:00:00 2001 From: Oleg Gurev Date: Fri, 12 Jan 2024 17:37:51 +0300 Subject: [PATCH 503/525] [PBCKP-874] Addeded default unit output for config-show - config-show command now has --default-units parameter - If it is provided, all time and memory - configuration options will be printed in their default units - test added, test_help_1 corrected --- src/configure.c | 7 ++++-- src/help.c | 4 ++- src/pg_probackup.c | 5 +++- src/pg_probackup.h | 2 +- src/utils/configuration.c | 41 +++++++++++++++++++++---------- src/utils/configuration.h | 1 + tests/expected/option_help.out | 1 + tests/expected/option_help_ru.out | 1 + tests/option_test.py | 19 ++++++++++++++ 9 files changed, 63 insertions(+), 18 deletions(-) diff --git a/src/configure.c b/src/configure.c index f7befb0c5..4f6774d55 100644 --- a/src/configure.c +++ b/src/configure.c @@ -269,7 +269,7 @@ static const char *current_group = NULL; * Show configure options including default values. */ void -do_show_config(void) +do_show_config(bool show_default_units) { int i; @@ -277,10 +277,13 @@ do_show_config(void) for (i = 0; instance_options[i].type; i++) { + if (show_default_units && strchr("bBiIuU", instance_options[i].type) && instance_options[i].get_value == *option_get_value) + instance_options[i].flags |= GET_VAL_IN_DEFAULT_UNITS; /* Set flag */ if (show_format == SHOW_PLAIN) show_configure_plain(&instance_options[i]); else show_configure_json(&instance_options[i]); + instance_options[i].flags &= ~(GET_VAL_IN_DEFAULT_UNITS); /* Reset flag. It was resetted in option_get_value(). Probably this reset isn't needed */ } show_configure_end(); @@ -801,6 +804,6 @@ show_configure_json(ConfigOption *opt) return; json_add_value(&show_buf, opt->lname, value, json_level, - true); + !(opt->flags & GET_VAL_IN_DEFAULT_UNITS)); pfree(value); } diff --git a/src/help.c b/src/help.c index b3ba02160..0ccae938f 100644 --- a/src/help.c +++ b/src/help.c @@ -121,6 +121,7 @@ help_pg_probackup(void) printf(_("\n %s show-config -B backup-path --instance=instance_name\n"), PROGRAM_NAME); printf(_(" [--format=format]\n")); + printf(_(" [--default-units]\n")); printf(_(" [--help]\n")); printf(_("\n %s backup -B backup-path -b backup-mode --instance=instance_name\n"), PROGRAM_NAME); @@ -953,7 +954,8 @@ help_show_config(void) printf(_(" -B, --backup-path=backup-path location of the backup storage area\n")); printf(_(" --instance=instance_name name of the instance\n")); - printf(_(" --format=format show format=PLAIN|JSON\n\n")); + printf(_(" --format=format show format=PLAIN|JSON\n")); + printf(_(" --default-units show memory and time values in default units\n\n")); } static void diff --git a/src/pg_probackup.c b/src/pg_probackup.c index 6653898e4..9b896d8bc 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -164,6 +164,7 @@ bool no_validate_wal = false; /* show options */ ShowFormat show_format = SHOW_PLAIN; bool show_archive = false; +static bool show_default_units = false; /* set-backup options */ int64 ttl = -1; @@ -275,6 +276,8 @@ static ConfigOption cmd_options[] = /* show options */ { 'f', 165, "format", opt_show_format, SOURCE_CMD_STRICT }, { 'b', 166, "archive", &show_archive, SOURCE_CMD_STRICT }, + /* show-config options */ + { 'b', 167, "default-units", &show_default_units,SOURCE_CMD_STRICT }, /* set-backup options */ { 'I', 170, "ttl", &ttl, SOURCE_CMD_STRICT, SOURCE_DEFAULT, 0, OPTION_UNIT_S, option_get_value}, { 's', 171, "expire-time", &expire_time_string, SOURCE_CMD_STRICT }, @@ -1049,7 +1052,7 @@ main(int argc, char *argv[]) do_merge(instanceState, current.backup_id, no_validate, no_sync); break; case SHOW_CONFIG_CMD: - do_show_config(); + do_show_config(show_default_units); break; case SET_CONFIG_CMD: do_set_config(instanceState, false); diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 61dd2ce0e..bfb551ace 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -939,7 +939,7 @@ extern void do_archive_get(InstanceState *instanceState, InstanceConfig *instanc char *wal_file_name, int batch_size, bool validate_wal); /* in configure.c */ -extern void do_show_config(void); +extern void do_show_config(bool show_default_units); extern void do_set_config(InstanceState *instanceState, bool missing_ok); extern void init_config(InstanceConfig *config, const char *instance_name); extern InstanceConfig *readInstanceConfigFile(InstanceState *instanceState); diff --git a/src/utils/configuration.c b/src/utils/configuration.c index 921555350..6b2382996 100644 --- a/src/utils/configuration.c +++ b/src/utils/configuration.c @@ -678,6 +678,8 @@ config_set_opt(ConfigOption options[], void *var, OptionSource source) /* * Return value of the function in the string representation. Result is * allocated string. + * We can set GET_VAL_IN_DEFAULT_UNITS flag in opt->flags + * before call option_get_value() to get option value in default units */ char * option_get_value(ConfigOption *opt) @@ -692,20 +694,33 @@ option_get_value(ConfigOption *opt) */ if (opt->flags & OPTION_UNIT) { - if (opt->type == 'i') - convert_from_base_unit(*((int32 *) opt->var), - opt->flags & OPTION_UNIT, &value, &unit); - else if (opt->type == 'I') - convert_from_base_unit(*((int64 *) opt->var), - opt->flags & OPTION_UNIT, &value, &unit); - else if (opt->type == 'u') - convert_from_base_unit_u(*((uint32 *) opt->var), - opt->flags & OPTION_UNIT, &value_u, &unit); - else if (opt->type == 'U') - convert_from_base_unit_u(*((uint64 *) opt->var), - opt->flags & OPTION_UNIT, &value_u, &unit); + if (opt->flags & GET_VAL_IN_DEFAULT_UNITS){ + if (opt->type == 'i') + value = *((int32 *) opt->var); + else if (opt->type == 'I') + value = *((int64 *) opt->var); + else if (opt->type == 'u') + value_u = *((uint32 *) opt->var); + else if (opt->type == 'U') + value_u = *((uint64 *) opt->var); + unit = ""; + } + else + { + if (opt->type == 'i') + convert_from_base_unit(*((int32 *) opt->var), + opt->flags & OPTION_UNIT, &value, &unit); + else if (opt->type == 'I') + convert_from_base_unit(*((int64 *) opt->var), + opt->flags & OPTION_UNIT, &value, &unit); + else if (opt->type == 'u') + convert_from_base_unit_u(*((uint32 *) opt->var), + opt->flags & OPTION_UNIT, &value_u, &unit); + else if (opt->type == 'U') + convert_from_base_unit_u(*((uint64 *) opt->var), + opt->flags & OPTION_UNIT, &value_u, &unit); + } } - /* Get string representation itself */ switch (opt->type) { diff --git a/src/utils/configuration.h b/src/utils/configuration.h index 2c6ea3eec..f3bda65de 100644 --- a/src/utils/configuration.h +++ b/src/utils/configuration.h @@ -100,6 +100,7 @@ struct ConfigOption #define OPTION_UNIT_TIME 0xF0000 /* mask for time-related units */ #define OPTION_UNIT (OPTION_UNIT_MEMORY | OPTION_UNIT_TIME) +#define GET_VAL_IN_DEFAULT_UNITS 0x80000000 /* bitflag to get memory and time values in default units*/ extern ProbackupSubcmd parse_subcmd(char const * const subcmd_str); extern char const *get_subcmd_name(ProbackupSubcmd const subcmd); diff --git a/tests/expected/option_help.out b/tests/expected/option_help.out index 49f79607f..618a0d156 100644 --- a/tests/expected/option_help.out +++ b/tests/expected/option_help.out @@ -39,6 +39,7 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database. pg_probackup show-config -B backup-path --instance=instance_name [--format=format] + [--default-units] [--help] pg_probackup backup -B backup-path -b backup-mode --instance=instance_name diff --git a/tests/expected/option_help_ru.out b/tests/expected/option_help_ru.out index 976932b9d..005c74ebb 100644 --- a/tests/expected/option_help_ru.out +++ b/tests/expected/option_help_ru.out @@ -39,6 +39,7 @@ pg_probackup - утилита для управления резервным к pg_probackup show-config -B backup-path --instance=instance_name [--format=format] + [--default-units] [--help] pg_probackup backup -B backup-path -b backup-mode --instance=instance_name diff --git a/tests/option_test.py b/tests/option_test.py index 66cc13746..9a829aaef 100644 --- a/tests/option_test.py +++ b/tests/option_test.py @@ -230,6 +230,25 @@ def test_help_6(self): self.skipTest( 'You need configure PostgreSQL with --enabled-nls option for this test') + # @unittest.skip("skip") + def test_options_default_units(self): + """check --default-units option""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node')) + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + # check that --default-units option works correctly + output = self.run_pb(["show-config", "--backup-path", backup_dir, "--instance=node"]) + self.assertIn(container=output, member="archive-timeout = 5min") + output = self.run_pb(["show-config", "--backup-path", backup_dir, "--instance=node", "--default-units"]) + self.assertIn(container=output, member="archive-timeout = 300") + self.assertNotIn(container=output, member="archive-timeout = 300s") + # check that we have now quotes ("") in json output + output = self.run_pb(["show-config", "--backup-path", backup_dir, "--instance=node", "--default-units", "--format=json"]) + self.assertIn(container=output, member='"archive-timeout": 300,') + self.assertIn(container=output, member='"retention-redundancy": 0,') + self.assertNotIn(container=output, member='"archive-timeout": "300",') def check_locale(locale_name): ret=True From 031d3ebef77f293d4ac491d485e08d22e9fc7e63 Mon Sep 17 00:00:00 2001 From: Alexey Savchkov Date: Fri, 2 Feb 2024 12:07:24 +0700 Subject: [PATCH 504/525] Put listitem contents in --- doc/pgprobackup.xml | 108 +++++++++++++++++++++++++++++++++----------- 1 file changed, 81 insertions(+), 27 deletions(-) diff --git a/doc/pgprobackup.xml b/doc/pgprobackup.xml index 26dfde6e1..74389f9e0 100644 --- a/doc/pgprobackup.xml +++ b/doc/pgprobackup.xml @@ -748,7 +748,9 @@ INFO: Restore of backup S6OBQO completed. - Add the pg_probackup repository GPG key + + Add the pg_probackup repository GPG key + sudo apt install gpg wget wget -qO - https://repo.postgrespro.ru/pg_probackup/keys/GPG-KEY-PG-PROBACKUP | \ @@ -756,7 +758,9 @@ sudo tee /etc/apt/trusted.gpg.d/pg_probackup.asc - Setup the binary package repository + + Setup the binary package repository + . /etc/os-release echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup/deb $VERSION_CODENAME main-$VERSION_CODENAME" | \ @@ -764,24 +768,32 @@ sudo tee /etc/apt/sources.list.d/pg_probackup.list - Optionally setup the source package repository for rebuilding the binaries + + Optionally setup the source package repository for rebuilding the binaries + echo "deb-src [arch=amd64] https://repo.postgrespro.ru/pg_probackup/deb $VERSION_CODENAME main-$VERSION_CODENAME" | \ sudo tee -a /etc/apt/sources.list.d/pg_probackup.list - List the available pg_probackup packages + + List the available pg_probackup packages + - Using apt: + + Using apt: + sudo apt update apt search pg_probackup - Using apt-get: + + Using apt-get: + sudo apt-get update apt-cache search pg_probackup @@ -790,19 +802,25 @@ apt-cache search pg_probackup - Install or upgrade a pg_probackup version of your choice + + Install or upgrade a pg_probackup version of your choice + sudo apt install pg-probackup-15 - Optionally install the debug package + + Optionally install the debug package + sudo apt install pg-probackup-15-dbg - Optionally install the source package (provided you have set up the source package repository as described above) + + Optionally install the source package (provided you have set up the source package repository as described above) + sudo apt install dpkg-dev sudo apt source pg-probackup-15 @@ -817,41 +835,55 @@ sudo apt source pg-probackup-15 - Install the pg_probackup repository + + Install the pg_probackup repository + dnf install https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-centos.noarch.rpm - List the available pg_probackup packages + + List the available pg_probackup packages + dnf search pg_probackup - Install or upgrade a pg_probackup version of your choice + + Install or upgrade a pg_probackup version of your choice + dnf install pg_probackup-15 - Optionally install the debug package + + Optionally install the debug package + dnf install pg_probackup-15-debuginfo - Optionally install the source package for rebuilding the binaries + + Optionally install the source package for rebuilding the binaries + - Using dnf: + + Using dnf: + dnf install 'dnf-command(download)' dnf download --source pg_probackup-15 - Using yum: + + Using yum: + yumdownloader --source pg_probackup-15 @@ -864,10 +896,14 @@ yumdownloader --source pg_probackup-15 Installation on ALT Linux - Setup the repository + + Setup the repository + - On ALT Linux 10: + + On ALT Linux 10: + . /etc/os-release echo "rpm http://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-p$VERSION_ID x86_64 vanilla" | \ @@ -875,7 +911,9 @@ sudo tee /etc/apt/sources.list.d/pg_probackup.list - On ALT Linux 8 and 9: + + On ALT Linux 8 and 9: + . /etc/os-release echo "rpm http://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-$VERSION_ID x86_64 vanilla" | \ @@ -885,20 +923,26 @@ sudo tee /etc/apt/sources.list.d/pg_probackup.list - List the available pg_probackup packages + + List the available pg_probackup packages + sudo apt-get update apt-cache search pg_probackup - Install or upgrade a pg_probackup version of your choice + + Install or upgrade a pg_probackup version of your choice + sudo apt-get install pg_probackup-15 - Optionally install the debug package + + Optionally install the debug package + sudo apt-get install pg_probackup-15-debuginfo @@ -909,7 +953,9 @@ sudo apt-get install pg_probackup-15-debuginfo Installation on SUSE Linux - Add the pg_probackup repository GPG key + + Add the pg_probackup repository GPG key + zypper in -y gpg wget wget -O GPG-KEY-PG_PROBACKUP https://repo.postgrespro.ru/pg_probackup/keys/GPG-KEY-PG_PROBACKUP @@ -917,25 +963,33 @@ rpm --import GPG-KEY-PG_PROBACKUP - Setup the repository + + Setup the repository + zypper in https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-suse.noarch.rpm - List the available pg_probackup packages + + List the available pg_probackup packages + zypper se pg_probackup - Install or upgrade a pg_probackup version of your choice + + Install or upgrade a pg_probackup version of your choice + zypper in pg_probackup-15 - Optionally install the source package for rebuilding the binaries + + Optionally install the source package for rebuilding the binaries + zypper si pg_probackup-15 From 287e7fc89f7d84fc9c3ea07f89798a64224c6b85 Mon Sep 17 00:00:00 2001 From: Oleg Gurev Date: Fri, 9 Feb 2024 15:50:58 +0300 Subject: [PATCH 505/525] Add_requirements.txt testgres can be installed with pip install -r pg_probackup/tests/requirements.txt --- tests/requirements.txt | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 tests/requirements.txt diff --git a/tests/requirements.txt b/tests/requirements.txt new file mode 100644 index 000000000..62efb0e68 --- /dev/null +++ b/tests/requirements.txt @@ -0,0 +1,13 @@ +# Testgres can be installed in the following ways: +# 1. From a pip package (recommended) +# testgres==1.8.5 +# 2. From a specific Git branch, tag or commit +# git+https://github.com/postgrespro/testgres.git@ +# 3. From a local directory +# /path/to/local/directory/testgres +git+https://github.com/postgrespro/testgres.git@master#egg=testgres-pg_probackup2&subdirectory=testgres/plugins/pg_probackup2 +allure-pytest +deprecation +pexpect +pytest +pytest-xdist From 17037baea211bf2148a09e1c3b9284847647d33b Mon Sep 17 00:00:00 2001 From: oleg gurev Date: Wed, 14 Feb 2024 10:51:47 +0300 Subject: [PATCH 506/525] [PBCKP-913] Fix WAL switching with huge XLogRecord - Backport of PBCKP-859 bugfix - increase current segment number when reader has already read it before - avoid error if reader has to switch WAL again - add python test for PAGE backup with huge XLog record --- src/parsexlog.c | 8 ++++++++ tests/page_test.py | 47 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 55 insertions(+) diff --git a/src/parsexlog.c b/src/parsexlog.c index 7c4b5b349..7df169fbf 100644 --- a/src/parsexlog.c +++ b/src/parsexlog.c @@ -1588,9 +1588,14 @@ SwitchThreadToNextWal(XLogReaderState *xlogreader, xlog_thread_arg *arg) reader_data = (XLogReaderData *) xlogreader->private_data; reader_data->need_switch = false; +start: /* Critical section */ pthread_lock(&wal_segment_mutex); Assert(segno_next); + + if (reader_data->xlogsegno > segno_next) + segno_next = reader_data->xlogsegno; + reader_data->xlogsegno = segno_next; segnum_read++; segno_next++; @@ -1604,6 +1609,7 @@ SwitchThreadToNextWal(XLogReaderState *xlogreader, xlog_thread_arg *arg) GetXLogRecPtr(reader_data->xlogsegno, 0, wal_seg_size, arg->startpoint); /* We need to close previously opened file if it wasn't closed earlier */ CleanupXLogPageRead(xlogreader); + xlogreader->currRecPtr = InvalidXLogRecPtr; /* Skip over the page header and contrecord if any */ found = XLogFindNextRecord(xlogreader, arg->startpoint); @@ -1613,6 +1619,8 @@ SwitchThreadToNextWal(XLogReaderState *xlogreader, xlog_thread_arg *arg) */ if (XLogRecPtrIsInvalid(found)) { + if (reader_data->need_switch) + goto start; /* * Check if we need to stop reading. We stop if other thread found a * target segment. diff --git a/tests/page_test.py b/tests/page_test.py index 99f3ce992..a66d6d413 100644 --- a/tests/page_test.py +++ b/tests/page_test.py @@ -1415,3 +1415,50 @@ def test_page_pg_resetxlog(self): # # pgdata_restored = self.pgdata_content(node_restored.data_dir) # self.compare_pgdata(pgdata, pgdata_restored) + + def test_page_huge_xlog_record(self): + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + + + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'max_locks_per_transaction': '1000', + 'work_mem': '100MB', + 'temp_buffers': '100MB', + 'wal_buffers': '128MB', + 'wal_level' : 'logical', + }) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.pgbench_init(scale=3) + + # Do full backup + self.backup_node(backup_dir, 'node', node, backup_type='full') + show_backup = self.show_pb(backup_dir,'node')[0] + + self.assertEqual(show_backup['status'], "OK") + self.assertEqual(show_backup['backup-mode'], "FULL") + + # Originally client had the problem at the transaction that (supposedly) + # deletes a lot of temporary tables (probably it was client disconnect). + # It generated ~40MB COMMIT WAL record. + # + # `pg_logical_emit_message` is much simpler and faster way to generate + # such huge record. + node.safe_psql( + "postgres", + "select pg_logical_emit_message(False, 'z', repeat('o', 60*1000*1000))") + + # Do page backup + self.backup_node(backup_dir, 'node', node, backup_type='page') + + show_backup = self.show_pb(backup_dir,'node')[1] + self.assertEqual(show_backup['status'], "OK") + self.assertEqual(show_backup['backup-mode'], "PAGE") From 90a4a4f4b32128ee728c530b1fabba608b3d51eb Mon Sep 17 00:00:00 2001 From: Alexey Savchkov Date: Wed, 14 Feb 2024 12:57:52 +0700 Subject: [PATCH 507/525] Replace BACKUP_PATH in the source files --- doc/pgprobackup.xml | 10 +++++----- po/ru.po | 2 +- src/archive.c | 4 ++-- src/catalog.c | 2 +- src/help.c | 22 +++++++++++----------- src/pg_probackup.c | 6 +++--- src/pg_probackup.h | 8 ++++---- src/pg_probackup_state.h | 6 +++--- tests/option_test.py | 2 +- 9 files changed, 31 insertions(+), 31 deletions(-) diff --git a/doc/pgprobackup.xml b/doc/pgprobackup.xml index 74389f9e0..49e74e626 100644 --- a/doc/pgprobackup.xml +++ b/doc/pgprobackup.xml @@ -1128,7 +1128,7 @@ pg_probackup add-instance -B backup_dir -D backup_dir directory and at least read-only access to data_dir directory. If you specify the path to the backup catalog in the - BACKUP_PATH environment variable, you can + BACKUP_DIR environment variable, you can omit the corresponding option when running pg_probackup commands. @@ -5212,14 +5212,14 @@ pg_probackup catchup -b catchup_mode -BACKUP_PATH +BACKUP_DIR Specifies the absolute path to the backup catalog. Backup catalog is a directory where all backup files and meta information are stored. Since this option is required for most of the pg_probackup commands, you are recommended to specify - it once in the BACKUP_PATH environment variable. In this case, + it once in the BACKUP_DIR environment variable. In this case, you do not need to use this option each time on the command line. @@ -5679,7 +5679,7 @@ pg_probackup catchup -b catchup_mode lazily, when the first log message is written. - Default: $BACKUP_PATH/log/ + Default: $BACKUP_DIR/log/ @@ -5762,7 +5762,7 @@ pg_probackup catchup -b catchup_mode reached, the log file is rotated once a pg_probackup command is launched, except help and version commands. The time of the last log file creation is stored in - $BACKUP_PATH/log/log_rotation. The zero value disables + $BACKUP_DIR/log/log_rotation. The zero value disables time-based rotation. Supported units: ms, s, min, h, d (min by default). diff --git a/po/ru.po b/po/ru.po index 1263675c2..30f50f797 100644 --- a/po/ru.po +++ b/po/ru.po @@ -811,7 +811,7 @@ msgstr "" #: src/help.c:360 src/help.c:521 src/help.c:588 src/help.c:635 src/help.c:715 #: src/help.c:761 src/help.c:833 #, c-format -msgid " directory for file logging (default: BACKUP_PATH/log)\n" +msgid " directory for file logging (default: BACKUP_DIR/log)\n" msgstr "" #: src/help.c:361 src/help.c:522 src/help.c:589 src/help.c:636 src/help.c:716 diff --git a/src/archive.c b/src/archive.c index 7d753c8b3..e97a1ade8 100644 --- a/src/archive.c +++ b/src/archive.c @@ -113,7 +113,7 @@ static parray *setup_push_filelist(const char *archive_status_dir, * set archive_command to * 'pg_probackup archive-push -B /home/anastasia/backup --wal-file-name %f', * to move backups into arclog_path. - * Where archlog_path is $BACKUP_PATH/wal/instance_name + * Where archlog_path is $BACKUP_DIR/wal/instance_name */ void do_archive_push(InstanceState *instanceState, InstanceConfig *instance, char *pg_xlog_dir, @@ -1126,7 +1126,7 @@ do_archive_get(InstanceState *instanceState, InstanceConfig *instance, const cha join_path_components(absolute_wal_file_path, current_dir, wal_file_path); /* full filepath to WAL file in archive directory. - * $BACKUP_PATH/wal/instance_name/000000010000000000000001 */ + * $BACKUP_DIR/wal/instance_name/000000010000000000000001 */ join_path_components(backup_wal_file_path, instanceState->instance_wal_subdir_path, wal_file_name); INSTR_TIME_SET_CURRENT(start_time); diff --git a/src/catalog.c b/src/catalog.c index b29090789..4da406af3 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -1437,7 +1437,7 @@ get_multi_timeline_parent(parray *backup_list, parray *tli_list, } /* - * Create backup directory in $BACKUP_PATH + * Create backup directory in $BACKUP_DIR * (with proposed backup->backup_id) * and initialize this directory. * If creation of directory fails, then diff --git a/src/help.c b/src/help.c index 0ccae938f..46acab886 100644 --- a/src/help.c +++ b/src/help.c @@ -372,7 +372,7 @@ help_backup(void) printf(_(" --error-log-filename=error-log-filename\n")); printf(_(" filename for error logging (default: none)\n")); printf(_(" --log-directory=log-directory\n")); - printf(_(" directory for file logging (default: BACKUP_PATH/log)\n")); + printf(_(" directory for file logging (default: BACKUP_DIR/log)\n")); printf(_(" --log-rotation-size=log-rotation-size\n")); printf(_(" rotate logfile if its size exceeds this value; 0 disables; (default: 0)\n")); printf(_(" available units: 'kB', 'MB', 'GB', 'TB' (default: kB)\n")); @@ -548,7 +548,7 @@ help_restore(void) printf(_(" --error-log-filename=error-log-filename\n")); printf(_(" filename for error logging (default: none)\n")); printf(_(" --log-directory=log-directory\n")); - printf(_(" directory for file logging (default: BACKUP_PATH/log)\n")); + printf(_(" directory for file logging (default: BACKUP_DIR/log)\n")); printf(_(" --log-rotation-size=log-rotation-size\n")); printf(_(" rotate logfile if its size exceeds this value; 0 disables; (default: 0)\n")); printf(_(" available units: 'kB', 'MB', 'GB', 'TB' (default: kB)\n")); @@ -621,7 +621,7 @@ help_validate(void) printf(_(" --error-log-filename=error-log-filename\n")); printf(_(" filename for error logging (default: none)\n")); printf(_(" --log-directory=log-directory\n")); - printf(_(" directory for file logging (default: BACKUP_PATH/log)\n")); + printf(_(" directory for file logging (default: BACKUP_DIR/log)\n")); printf(_(" --log-rotation-size=log-rotation-size\n")); printf(_(" rotate logfile if its size exceeds this value; 0 disables; (default: 0)\n")); printf(_(" available units: 'kB', 'MB', 'GB', 'TB' (default: kB)\n")); @@ -674,7 +674,7 @@ help_checkdb(void) printf(_(" --error-log-filename=error-log-filename\n")); printf(_(" filename for error logging (default: none)\n")); printf(_(" --log-directory=log-directory\n")); - printf(_(" directory for file logging (default: BACKUP_PATH/log)\n")); + printf(_(" directory for file logging (default: BACKUP_DIR/log)\n")); printf(_(" --log-rotation-size=log-rotation-size\n")); printf(_(" rotate logfile if its size exceeds this value; 0 disables; (default: 0)\n")); printf(_(" available units: 'kB', 'MB', 'GB', 'TB' (default: kB)\n")); @@ -760,7 +760,7 @@ help_delete(void) printf(_(" --error-log-filename=error-log-filename\n")); printf(_(" filename for error logging (default: none)\n")); printf(_(" --log-directory=log-directory\n")); - printf(_(" directory for file logging (default: BACKUP_PATH/log)\n")); + printf(_(" directory for file logging (default: BACKUP_DIR/log)\n")); printf(_(" --log-rotation-size=log-rotation-size\n")); printf(_(" rotate logfile if its size exceeds this value; 0 disables; (default: 0)\n")); printf(_(" available units: 'kB', 'MB', 'GB', 'TB' (default: kB)\n")); @@ -814,7 +814,7 @@ help_merge(void) printf(_(" --error-log-filename=error-log-filename\n")); printf(_(" filename for error logging (default: none)\n")); printf(_(" --log-directory=log-directory\n")); - printf(_(" directory for file logging (default: BACKUP_PATH/log)\n")); + printf(_(" directory for file logging (default: BACKUP_DIR/log)\n")); printf(_(" --log-rotation-size=log-rotation-size\n")); printf(_(" rotate logfile if its size exceeds this value; 0 disables; (default: 0)\n")); printf(_(" available units: 'kB', 'MB', 'GB', 'TB' (default: kB)\n")); @@ -890,7 +890,7 @@ help_set_config(void) printf(_(" --error-log-filename=error-log-filename\n")); printf(_(" filename for error logging (default: none)\n")); printf(_(" --log-directory=log-directory\n")); - printf(_(" directory for file logging (default: BACKUP_PATH/log)\n")); + printf(_(" directory for file logging (default: BACKUP_DIR/log)\n")); printf(_(" --log-rotation-size=log-rotation-size\n")); printf(_(" rotate logfile if its size exceeds this value; 0 disables; (default: 0)\n")); printf(_(" available units: 'kB', 'MB', 'GB', 'TB' (default: kB)\n")); @@ -1002,7 +1002,7 @@ help_add_instance(void) printf(_(" --error-log-filename=error-log-filename\n")); printf(_(" filename for error logging (default: none)\n")); printf(_(" --log-directory=log-directory\n")); - printf(_(" directory for file logging (default: BACKUP_PATH/log)\n")); + printf(_(" directory for file logging (default: BACKUP_DIR/log)\n")); printf(_(" --log-rotation-size=log-rotation-size\n")); printf(_(" rotate logfile if its size exceeds this value; 0 disables; (default: 0)\n")); printf(_(" available units: 'kB', 'MB', 'GB', 'TB' (default: kB)\n")); @@ -1072,7 +1072,7 @@ help_archive_push(void) printf(_(" --error-log-filename=error-log-filename\n")); printf(_(" filename for error logging (default: none)\n")); printf(_(" --log-directory=log-directory\n")); - printf(_(" directory for file logging (default: BACKUP_PATH/log)\n")); + printf(_(" directory for file logging (default: BACKUP_DIR/log)\n")); printf(_(" --log-rotation-size=log-rotation-size\n")); printf(_(" rotate logfile if its size exceeds this value; 0 disables; (default: 0)\n")); printf(_(" available units: 'kB', 'MB', 'GB', 'TB' (default: kB)\n")); @@ -1131,7 +1131,7 @@ help_archive_get(void) printf(_(" --error-log-filename=error-log-filename\n")); printf(_(" filename for error logging (default: none)\n")); printf(_(" --log-directory=log-directory\n")); - printf(_(" directory for file logging (default: BACKUP_PATH/log)\n")); + printf(_(" directory for file logging (default: BACKUP_DIR/log)\n")); printf(_(" --log-rotation-size=log-rotation-size\n")); printf(_(" rotate logfile if its size exceeds this value; 0 disables; (default: 0)\n")); printf(_(" available units: 'kB', 'MB', 'GB', 'TB' (default: kB)\n")); @@ -1221,7 +1221,7 @@ help_catchup(void) printf(_(" --error-log-filename=error-log-filename\n")); printf(_(" filename for error logging (default: none)\n")); printf(_(" --log-directory=log-directory\n")); - printf(_(" directory for file logging (default: BACKUP_PATH/log)\n")); + printf(_(" directory for file logging (default: BACKUP_DIR/log)\n")); printf(_(" --log-rotation-size=log-rotation-size\n")); printf(_(" rotate logfile if its size exceeds this value; 0 disables; (default: 0)\n")); printf(_(" available units: 'kB', 'MB', 'GB', 'TB' (default: kB)\n")); diff --git a/src/pg_probackup.c b/src/pg_probackup.c index 9b896d8bc..09817fdde 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -468,10 +468,10 @@ main(int argc, char *argv[]) if (backup_path == NULL) { /* - * If command line argument is not set, try to read BACKUP_PATH + * If command line argument is not set, try to read BACKUP_DIR * from environment variable */ - backup_path = getenv("BACKUP_PATH"); + backup_path = getenv("BACKUP_DIR"); } if (backup_path != NULL) @@ -498,7 +498,7 @@ main(int argc, char *argv[]) backup_subcmd != CATCHUP_CMD) elog(ERROR, "No backup catalog path specified.\n" - "Please specify it either using environment variable BACKUP_PATH or\n" + "Please specify it either using environment variable BACKUP_DIR or\n" "command line option --backup-path (-B)"); /* ===== catalogState (END) ======*/ diff --git a/src/pg_probackup.h b/src/pg_probackup.h index bfb551ace..7b884c90b 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -837,13 +837,13 @@ typedef struct InstanceState CatalogState *catalog_state; char instance_name[MAXPGPATH]; //previously global var instance_name - /* $BACKUP_PATH/backups/instance_name */ + /* $BACKUP_DIR/backups/instance_name */ char instance_backup_subdir_path[MAXPGPATH]; - /* $BACKUP_PATH/backups/instance_name/BACKUP_CATALOG_CONF_FILE */ + /* $BACKUP_DIR/backups/instance_name/BACKUP_CATALOG_CONF_FILE */ char instance_config_path[MAXPGPATH]; - - /* $BACKUP_PATH/backups/instance_name */ + + /* $BACKUP_DIR/backups/instance_name */ char instance_wal_subdir_path[MAXPGPATH]; // previously global var arclog_path /* TODO: Make it more specific */ diff --git a/src/pg_probackup_state.h b/src/pg_probackup_state.h index 56d852537..1d1ff88d0 100644 --- a/src/pg_probackup_state.h +++ b/src/pg_probackup_state.h @@ -13,11 +13,11 @@ typedef struct CatalogState { - /* $BACKUP_PATH */ + /* $BACKUP_DIR */ char catalog_path[MAXPGPATH]; //previously global var backup_path - /* $BACKUP_PATH/backups */ + /* $BACKUP_DIR/backups */ char backup_subdir_path[MAXPGPATH]; - /* $BACKUP_PATH/wal */ + /* $BACKUP_DIR/wal */ char wal_subdir_path[MAXPGPATH]; // previously global var arclog_path } CatalogState; diff --git a/tests/option_test.py b/tests/option_test.py index 9a829aaef..636c74327 100644 --- a/tests/option_test.py +++ b/tests/option_test.py @@ -34,7 +34,7 @@ def test_without_backup_path_3(self): except ProbackupException as e: self.assertIn( 'ERROR: No backup catalog path specified.\n' + \ - 'Please specify it either using environment variable BACKUP_PATH or\n' + \ + 'Please specify it either using environment variable BACKUP_DIR or\n' + \ 'command line option --backup-path (-B)', e.message, '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) From ab05badc9d91b5aeec64c32d3d524747fbcd0a5b Mon Sep 17 00:00:00 2001 From: Alexey Savchkov Date: Tue, 13 Feb 2024 19:14:47 +0700 Subject: [PATCH 508/525] Change backup-path to backup-dir --- src/help.c | 124 +++++++++++++++--------------- tests/expected/option_help.out | 36 ++++----- tests/expected/option_help_ru.out | 36 ++++----- 3 files changed, 98 insertions(+), 98 deletions(-) diff --git a/src/help.c b/src/help.c index 46acab886..7eced19bc 100644 --- a/src/help.c +++ b/src/help.c @@ -87,9 +87,9 @@ help_pg_probackup(void) printf(_("\n %s version\n"), PROGRAM_NAME); - printf(_("\n %s init -B backup-path\n"), PROGRAM_NAME); + printf(_("\n %s init -B backup-dir\n"), PROGRAM_NAME); - printf(_("\n %s set-config -B backup-path --instance=instance_name\n"), PROGRAM_NAME); + printf(_("\n %s set-config -B backup-dir --instance=instance-name\n"), PROGRAM_NAME); printf(_(" [-D pgdata-path]\n")); printf(_(" [--external-dirs=external-directories-paths]\n")); printf(_(" [--log-level-console=log-level-console]\n")); @@ -114,17 +114,17 @@ help_pg_probackup(void) printf(_(" [--archive-port=port] [--archive-user=username]\n")); printf(_(" [--help]\n")); - printf(_("\n %s set-backup -B backup-path --instance=instance_name\n"), PROGRAM_NAME); + printf(_("\n %s set-backup -B backup-dir --instance=instance-name\n"), PROGRAM_NAME); printf(_(" -i backup-id [--ttl=interval] [--expire-time=timestamp]\n")); printf(_(" [--note=text]\n")); printf(_(" [--help]\n")); - printf(_("\n %s show-config -B backup-path --instance=instance_name\n"), PROGRAM_NAME); + printf(_("\n %s show-config -B backup-dir --instance=instance-name\n"), PROGRAM_NAME); printf(_(" [--format=format]\n")); printf(_(" [--default-units]\n")); printf(_(" [--help]\n")); - printf(_("\n %s backup -B backup-path -b backup-mode --instance=instance_name\n"), PROGRAM_NAME); + printf(_("\n %s backup -B backup-dir -b backup-mode --instance=instance-name\n"), PROGRAM_NAME); printf(_(" [-D pgdata-path] [-C]\n")); printf(_(" [--stream [-S slot-name] [--temp-slot]]\n")); printf(_(" [--backup-pg-log] [-j num-threads] [--progress]\n")); @@ -157,7 +157,7 @@ help_pg_probackup(void) printf(_(" [--help]\n")); - printf(_("\n %s restore -B backup-path --instance=instance_name\n"), PROGRAM_NAME); + printf(_("\n %s restore -B backup-dir --instance=instance-name\n"), PROGRAM_NAME); printf(_(" [-D pgdata-path] [-i backup-id] [-j num-threads]\n")); printf(_(" [--recovery-target-time=time|--recovery-target-xid=xid\n")); printf(_(" |--recovery-target-lsn=lsn [--recovery-target-inclusive=boolean]]\n")); @@ -184,7 +184,7 @@ help_pg_probackup(void) printf(_(" [--archive-port=port] [--archive-user=username]\n")); printf(_(" [--help]\n")); - printf(_("\n %s validate -B backup-path [--instance=instance_name]\n"), PROGRAM_NAME); + printf(_("\n %s validate -B backup-dir [--instance=instance-name]\n"), PROGRAM_NAME); printf(_(" [-i backup-id] [--progress] [-j num-threads]\n")); printf(_(" [--recovery-target-time=time|--recovery-target-xid=xid\n")); printf(_(" |--recovery-target-lsn=lsn [--recovery-target-inclusive=boolean]]\n")); @@ -193,18 +193,18 @@ help_pg_probackup(void) printf(_(" [--skip-block-validation]\n")); printf(_(" [--help]\n")); - printf(_("\n %s checkdb [-B backup-path] [--instance=instance_name]\n"), PROGRAM_NAME); + printf(_("\n %s checkdb [-B backup-dir] [--instance=instance-name]\n"), PROGRAM_NAME); printf(_(" [-D pgdata-path] [--progress] [-j num-threads]\n")); printf(_(" [--amcheck] [--skip-block-validation]\n")); printf(_(" [--heapallindexed] [--checkunique]\n")); printf(_(" [--help]\n")); - printf(_("\n %s show -B backup-path\n"), PROGRAM_NAME); - printf(_(" [--instance=instance_name [-i backup-id]]\n")); + printf(_("\n %s show -B backup-dir\n"), PROGRAM_NAME); + printf(_(" [--instance=instance-name [-i backup-id]]\n")); printf(_(" [--format=format] [--archive]\n")); printf(_(" [--no-color] [--help]\n")); - printf(_("\n %s delete -B backup-path --instance=instance_name\n"), PROGRAM_NAME); + printf(_("\n %s delete -B backup-dir --instance=instance-name\n"), PROGRAM_NAME); printf(_(" [-j num-threads] [--progress]\n")); printf(_(" [--retention-redundancy=retention-redundancy]\n")); printf(_(" [--retention-window=retention-window]\n")); @@ -214,24 +214,24 @@ help_pg_probackup(void) printf(_(" [--dry-run] [--no-validate] [--no-sync]\n")); printf(_(" [--help]\n")); - printf(_("\n %s merge -B backup-path --instance=instance_name\n"), PROGRAM_NAME); + printf(_("\n %s merge -B backup-dir --instance=instance-name\n"), PROGRAM_NAME); printf(_(" -i backup-id [--progress] [-j num-threads]\n")); printf(_(" [--no-validate] [--no-sync]\n")); printf(_(" [--help]\n")); - printf(_("\n %s add-instance -B backup-path -D pgdata-path\n"), PROGRAM_NAME); - printf(_(" --instance=instance_name\n")); + printf(_("\n %s add-instance -B backup-dir -D pgdata-path\n"), PROGRAM_NAME); + printf(_(" --instance=instance-name\n")); printf(_(" [--external-dirs=external-directories-paths]\n")); printf(_(" [--remote-proto] [--remote-host]\n")); printf(_(" [--remote-port] [--remote-path] [--remote-user]\n")); printf(_(" [--ssh-options]\n")); printf(_(" [--help]\n")); - printf(_("\n %s del-instance -B backup-path\n"), PROGRAM_NAME); - printf(_(" --instance=instance_name\n")); + printf(_("\n %s del-instance -B backup-dir\n"), PROGRAM_NAME); + printf(_(" --instance=instance-name\n")); printf(_(" [--help]\n")); - printf(_("\n %s archive-push -B backup-path --instance=instance_name\n"), PROGRAM_NAME); + printf(_("\n %s archive-push -B backup-dir --instance=instance-name\n"), PROGRAM_NAME); printf(_(" --wal-file-name=wal-file-name\n")); printf(_(" [--wal-file-path=wal-file-path]\n")); printf(_(" [-j num-threads] [--batch-size=batch_size]\n")); @@ -245,7 +245,7 @@ help_pg_probackup(void) printf(_(" [--ssh-options]\n")); printf(_(" [--help]\n")); - printf(_("\n %s archive-get -B backup-path --instance=instance_name\n"), PROGRAM_NAME); + printf(_("\n %s archive-get -B backup-dir --instance=instance-name\n"), PROGRAM_NAME); printf(_(" --wal-file-path=wal-file-path\n")); printf(_(" --wal-file-name=wal-file-name\n")); printf(_(" [-j num-threads] [--batch-size=batch_size]\n")); @@ -295,14 +295,14 @@ help_internal(void) static void help_init(void) { - printf(_("\n%s init -B backup-path\n\n"), PROGRAM_NAME); - printf(_(" -B, --backup-path=backup-path location of the backup storage area\n\n")); + printf(_("\n%s init -B backup-dir\n\n"), PROGRAM_NAME); + printf(_(" -B, --backup-path=backup-dir location of the backup storage area\n\n")); } static void help_backup(void) { - printf(_("\n%s backup -B backup-path -b backup-mode --instance=instance_name\n"), PROGRAM_NAME); + printf(_("\n%s backup -B backup-dir -b backup-mode --instance=instance-name\n"), PROGRAM_NAME); printf(_(" [-D pgdata-path] [-C]\n")); printf(_(" [--stream [-S slot-name] [--temp-slot]]\n")); printf(_(" [--backup-pg-log] [-j num-threads] [--progress]\n")); @@ -333,9 +333,9 @@ help_backup(void) printf(_(" [--ssh-options]\n")); printf(_(" [--ttl=interval] [--expire-time=timestamp] [--note=text]\n\n")); - printf(_(" -B, --backup-path=backup-path location of the backup storage area\n")); + printf(_(" -B, --backup-path=backup-dir location of the backup storage area\n")); printf(_(" -b, --backup-mode=backup-mode backup mode=FULL|PAGE|DELTA|PTRACK\n")); - printf(_(" --instance=instance_name name of the instance\n")); + printf(_(" --instance=instance-name name of the instance\n")); printf(_(" -D, --pgdata=pgdata-path location of the database storage area\n")); printf(_(" -C, --smooth-checkpoint do smooth checkpoint before backup\n")); printf(_(" --stream stream the transaction log and include it in the backup\n")); @@ -442,7 +442,7 @@ help_backup(void) static void help_restore(void) { - printf(_("\n%s restore -B backup-path --instance=instance_name\n"), PROGRAM_NAME); + printf(_("\n%s restore -B backup-dir --instance=instance-name\n"), PROGRAM_NAME); printf(_(" [-D pgdata-path] [-i backup-id] [-j num-threads]\n")); printf(_(" [--progress] [--force] [--no-sync]\n")); printf(_(" [--no-validate] [--skip-block-validation]\n")); @@ -469,8 +469,8 @@ help_restore(void) printf(_(" [--archive-host=hostname] [--archive-port=port]\n")); printf(_(" [--archive-user=username]\n\n")); - printf(_(" -B, --backup-path=backup-path location of the backup storage area\n")); - printf(_(" --instance=instance_name name of the instance\n")); + printf(_(" -B, --backup-path=backup-dir location of the backup storage area\n")); + printf(_(" --instance=instance-name name of the instance\n")); printf(_(" -D, --pgdata=pgdata-path location of the database storage area\n")); printf(_(" -i, --backup-id=backup-id backup to restore\n")); @@ -577,7 +577,7 @@ help_restore(void) static void help_validate(void) { - printf(_("\n%s validate -B backup-path [--instance=instance_name]\n"), PROGRAM_NAME); + printf(_("\n%s validate -B backup-dir [--instance=instance-name]\n"), PROGRAM_NAME); printf(_(" [-i backup-id] [--progress] [-j num-threads]\n")); printf(_(" [--recovery-target-time=time|--recovery-target-xid=xid\n")); printf(_(" |--recovery-target-lsn=lsn [--recovery-target-inclusive=boolean]]\n")); @@ -585,8 +585,8 @@ help_validate(void) printf(_(" [--recovery-target-name=target-name]\n")); printf(_(" [--skip-block-validation]\n\n")); - printf(_(" -B, --backup-path=backup-path location of the backup storage area\n")); - printf(_(" --instance=instance_name name of the instance\n")); + printf(_(" -B, --backup-path=backup-dir location of the backup storage area\n")); + printf(_(" --instance=instance-name name of the instance\n")); printf(_(" -i, --backup-id=backup-id backup to validate\n")); printf(_(" --progress show progress\n")); @@ -634,13 +634,13 @@ help_validate(void) static void help_checkdb(void) { - printf(_("\n%s checkdb [-B backup-path] [--instance=instance_name]\n"), PROGRAM_NAME); + printf(_("\n%s checkdb [-B backup-dir] [--instance=instance-name]\n"), PROGRAM_NAME); printf(_(" [-D pgdata-path] [-j num-threads] [--progress]\n")); printf(_(" [--amcheck] [--skip-block-validation]\n")); printf(_(" [--heapallindexed] [--checkunique]\n\n")); - printf(_(" -B, --backup-path=backup-path location of the backup storage area\n")); - printf(_(" --instance=instance_name name of the instance\n")); + printf(_(" -B, --backup-path=backup-dir location of the backup storage area\n")); + printf(_(" --instance=instance-name name of the instance\n")); printf(_(" -D, --pgdata=pgdata-path location of the database storage area\n")); printf(_(" --progress show progress\n")); @@ -695,12 +695,12 @@ help_checkdb(void) static void help_show(void) { - printf(_("\n%s show -B backup-path\n"), PROGRAM_NAME); - printf(_(" [--instance=instance_name [-i backup-id]]\n")); + printf(_("\n%s show -B backup-dir\n"), PROGRAM_NAME); + printf(_(" [--instance=instance-name [-i backup-id]]\n")); printf(_(" [--format=format] [--archive]\n\n")); - printf(_(" -B, --backup-path=backup-path location of the backup storage area\n")); - printf(_(" --instance=instance_name show info about specific instance\n")); + printf(_(" -B, --backup-path=backup-dir location of the backup storage area\n")); + printf(_(" --instance=instance-name show info about specific instance\n")); printf(_(" -i, --backup-id=backup-id show info about specific backups\n")); printf(_(" --archive show WAL archive information\n")); printf(_(" --format=format show format=PLAIN|JSON\n")); @@ -710,7 +710,7 @@ help_show(void) static void help_delete(void) { - printf(_("\n%s delete -B backup-path --instance=instance_name\n"), PROGRAM_NAME); + printf(_("\n%s delete -B backup-dir --instance=instance-name\n"), PROGRAM_NAME); printf(_(" [-i backup-id | --delete-expired | --merge-expired] [--delete-wal]\n")); printf(_(" [-j num-threads] [--progress]\n")); printf(_(" [--retention-redundancy=retention-redundancy]\n")); @@ -718,8 +718,8 @@ help_delete(void) printf(_(" [--wal-depth=wal-depth]\n")); printf(_(" [--no-validate] [--no-sync]\n\n")); - printf(_(" -B, --backup-path=backup-path location of the backup storage area\n")); - printf(_(" --instance=instance_name name of the instance\n")); + printf(_(" -B, --backup-path=backup-dir location of the backup storage area\n")); + printf(_(" --instance=instance-name name of the instance\n")); printf(_(" -i, --backup-id=backup-id backup to delete\n")); printf(_(" -j, --threads=NUM number of parallel threads\n")); printf(_(" --progress show progress\n")); @@ -773,7 +773,7 @@ help_delete(void) static void help_merge(void) { - printf(_("\n%s merge -B backup-path --instance=instance_name\n"), PROGRAM_NAME); + printf(_("\n%s merge -B backup-dir --instance=instance-name\n"), PROGRAM_NAME); printf(_(" -i backup-id [-j num-threads] [--progress]\n")); printf(_(" [--no-validate] [--no-sync]\n")); printf(_(" [--log-level-console=log-level-console]\n")); @@ -786,8 +786,8 @@ help_merge(void) printf(_(" [--log-rotation-size=log-rotation-size]\n")); printf(_(" [--log-rotation-age=log-rotation-age]\n\n")); - printf(_(" -B, --backup-path=backup-path location of the backup storage area\n")); - printf(_(" --instance=instance_name name of the instance\n")); + printf(_(" -B, --backup-path=backup-dir location of the backup storage area\n")); + printf(_(" --instance=instance-name name of the instance\n")); printf(_(" -i, --backup-id=backup-id backup to merge\n")); printf(_(" -j, --threads=NUM number of parallel threads\n")); @@ -827,7 +827,7 @@ help_merge(void) static void help_set_backup(void) { - printf(_("\n%s set-backup -B backup-path --instance=instance_name\n"), PROGRAM_NAME); + printf(_("\n%s set-backup -B backup-dir --instance=instance-name\n"), PROGRAM_NAME); printf(_(" -i backup-id\n")); printf(_(" [--ttl=interval] [--expire-time=time] [--note=text]\n\n")); @@ -843,7 +843,7 @@ help_set_backup(void) static void help_set_config(void) { - printf(_("\n%s set-config -B backup-path --instance=instance_name\n"), PROGRAM_NAME); + printf(_("\n%s set-config -B backup-dir --instance=instance-name\n"), PROGRAM_NAME); printf(_(" [-D pgdata-path]\n")); printf(_(" [-E external-directories-paths]\n")); printf(_(" [--restore-command=cmdline]\n")); @@ -866,8 +866,8 @@ help_set_config(void) printf(_(" [--remote-port] [--remote-path] [--remote-user]\n")); printf(_(" [--ssh-options]\n\n")); - printf(_(" -B, --backup-path=backup-path location of the backup storage area\n")); - printf(_(" --instance=instance_name name of the instance\n")); + printf(_(" -B, --backup-path=backup-dir location of the backup storage area\n")); + printf(_(" --instance=instance-name name of the instance\n")); printf(_(" -D, --pgdata=pgdata-path location of the database storage area\n")); printf(_(" -E --external-dirs=external-directories-paths\n")); printf(_(" backup some directories not from pgdata \n")); @@ -949,11 +949,11 @@ help_set_config(void) static void help_show_config(void) { - printf(_("\n%s show-config -B backup-path --instance=instance_name\n"), PROGRAM_NAME); + printf(_("\n%s show-config -B backup-dir --instance=instance-name\n"), PROGRAM_NAME); printf(_(" [--format=format]\n\n")); - printf(_(" -B, --backup-path=backup-path location of the backup storage area\n")); - printf(_(" --instance=instance_name name of the instance\n")); + printf(_(" -B, --backup-path=backup-dir location of the backup storage area\n")); + printf(_(" --instance=instance-name name of the instance\n")); printf(_(" --format=format show format=PLAIN|JSON\n")); printf(_(" --default-units show memory and time values in default units\n\n")); } @@ -961,16 +961,16 @@ help_show_config(void) static void help_add_instance(void) { - printf(_("\n%s add-instance -B backup-path -D pgdata-path\n"), PROGRAM_NAME); - printf(_(" --instance=instance_name\n")); + printf(_("\n%s add-instance -B backup-dir -D pgdata-path\n"), PROGRAM_NAME); + printf(_(" --instance=instance-name\n")); printf(_(" [-E external-directory-path]\n")); printf(_(" [--remote-proto] [--remote-host]\n")); printf(_(" [--remote-port] [--remote-path] [--remote-user]\n")); printf(_(" [--ssh-options]\n\n")); - printf(_(" -B, --backup-path=backup-path location of the backup storage area\n")); + printf(_(" -B, --backup-path=backup-dir location of the backup storage area\n")); printf(_(" -D, --pgdata=pgdata-path location of the database storage area\n")); - printf(_(" --instance=instance_name name of the new instance\n")); + printf(_(" --instance=instance-name name of the new instance\n")); printf(_(" -E --external-dirs=external-directories-paths\n")); printf(_(" backup some directories not from pgdata \n")); @@ -1014,16 +1014,16 @@ help_add_instance(void) static void help_del_instance(void) { - printf(_("\n%s del-instance -B backup-path --instance=instance_name\n"), PROGRAM_NAME); + printf(_("\n%s del-instance -B backup-dir --instance=instance-name\n"), PROGRAM_NAME); - printf(_(" -B, --backup-path=backup-path location of the backup storage area\n")); - printf(_(" --instance=instance_name name of the instance to delete\n\n")); + printf(_(" -B, --backup-path=backup-dir location of the backup storage area\n")); + printf(_(" --instance=instance-name name of the instance to delete\n\n")); } static void help_archive_push(void) { - printf(_("\n%s archive-push -B backup-path --instance=instance_name\n"), PROGRAM_NAME); + printf(_("\n%s archive-push -B backup-dir --instance=instance-name\n"), PROGRAM_NAME); printf(_(" --wal-file-name=wal-file-name\n")); printf(_(" [--wal-file-path=wal-file-path]\n")); printf(_(" [-j num-threads] [--batch-size=batch_size]\n")); @@ -1036,8 +1036,8 @@ help_archive_push(void) printf(_(" [--remote-port] [--remote-path] [--remote-user]\n")); printf(_(" [--ssh-options]\n\n")); - printf(_(" -B, --backup-path=backup-path location of the backup storage area\n")); - printf(_(" --instance=instance_name name of the instance to delete\n")); + printf(_(" -B, --backup-path=backup-dir location of the backup storage area\n")); + printf(_(" --instance=instance-name name of the instance to delete\n")); printf(_(" --wal-file-name=wal-file-name\n")); printf(_(" name of the file to copy into WAL archive\n")); printf(_(" --wal-file-path=wal-file-path\n")); @@ -1095,7 +1095,7 @@ help_archive_push(void) static void help_archive_get(void) { - printf(_("\n%s archive-get -B backup-path --instance=instance_name\n"), PROGRAM_NAME); + printf(_("\n%s archive-get -B backup-dir --instance=instance-name\n"), PROGRAM_NAME); printf(_(" --wal-file-name=wal-file-name\n")); printf(_(" [--wal-file-path=wal-file-path]\n")); printf(_(" [-j num-threads] [--batch-size=batch_size]\n")); @@ -1104,8 +1104,8 @@ help_archive_get(void) printf(_(" [--remote-port] [--remote-path] [--remote-user]\n")); printf(_(" [--ssh-options]\n\n")); - printf(_(" -B, --backup-path=backup-path location of the backup storage area\n")); - printf(_(" --instance=instance_name name of the instance to delete\n")); + printf(_(" -B, --backup-path=backup-dir location of the backup storage area\n")); + printf(_(" --instance=instance-name name of the instance to delete\n")); printf(_(" --wal-file-path=wal-file-path\n")); printf(_(" relative destination path name of the WAL file on the server\n")); printf(_(" --wal-file-name=wal-file-name\n")); diff --git a/tests/expected/option_help.out b/tests/expected/option_help.out index 618a0d156..985ba7fec 100644 --- a/tests/expected/option_help.out +++ b/tests/expected/option_help.out @@ -5,9 +5,9 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database. pg_probackup version - pg_probackup init -B backup-path + pg_probackup init -B backup-dir - pg_probackup set-config -B backup-path --instance=instance_name + pg_probackup set-config -B backup-dir --instance=instance-name [-D pgdata-path] [--external-dirs=external-directories-paths] [--log-level-console=log-level-console] @@ -32,17 +32,17 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database. [--archive-port=port] [--archive-user=username] [--help] - pg_probackup set-backup -B backup-path --instance=instance_name + pg_probackup set-backup -B backup-dir --instance=instance-name -i backup-id [--ttl=interval] [--expire-time=timestamp] [--note=text] [--help] - pg_probackup show-config -B backup-path --instance=instance_name + pg_probackup show-config -B backup-dir --instance=instance-name [--format=format] [--default-units] [--help] - pg_probackup backup -B backup-path -b backup-mode --instance=instance_name + pg_probackup backup -B backup-dir -b backup-mode --instance=instance-name [-D pgdata-path] [-C] [--stream [-S slot-name] [--temp-slot]] [--backup-pg-log] [-j num-threads] [--progress] @@ -74,7 +74,7 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database. [--ttl=interval] [--expire-time=timestamp] [--note=text] [--help] - pg_probackup restore -B backup-path --instance=instance_name + pg_probackup restore -B backup-dir --instance=instance-name [-D pgdata-path] [-i backup-id] [-j num-threads] [--recovery-target-time=time|--recovery-target-xid=xid |--recovery-target-lsn=lsn [--recovery-target-inclusive=boolean]] @@ -101,7 +101,7 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database. [--archive-port=port] [--archive-user=username] [--help] - pg_probackup validate -B backup-path [--instance=instance_name] + pg_probackup validate -B backup-dir [--instance=instance-name] [-i backup-id] [--progress] [-j num-threads] [--recovery-target-time=time|--recovery-target-xid=xid |--recovery-target-lsn=lsn [--recovery-target-inclusive=boolean]] @@ -110,18 +110,18 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database. [--skip-block-validation] [--help] - pg_probackup checkdb [-B backup-path] [--instance=instance_name] + pg_probackup checkdb [-B backup-dir] [--instance=instance-name] [-D pgdata-path] [--progress] [-j num-threads] [--amcheck] [--skip-block-validation] [--heapallindexed] [--checkunique] [--help] - pg_probackup show -B backup-path - [--instance=instance_name [-i backup-id]] + pg_probackup show -B backup-dir + [--instance=instance-name [-i backup-id]] [--format=format] [--archive] [--no-color] [--help] - pg_probackup delete -B backup-path --instance=instance_name + pg_probackup delete -B backup-dir --instance=instance-name [-j num-threads] [--progress] [--retention-redundancy=retention-redundancy] [--retention-window=retention-window] @@ -131,24 +131,24 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database. [--dry-run] [--no-validate] [--no-sync] [--help] - pg_probackup merge -B backup-path --instance=instance_name + pg_probackup merge -B backup-dir --instance=instance-name -i backup-id [--progress] [-j num-threads] [--no-validate] [--no-sync] [--help] - pg_probackup add-instance -B backup-path -D pgdata-path - --instance=instance_name + pg_probackup add-instance -B backup-dir -D pgdata-path + --instance=instance-name [--external-dirs=external-directories-paths] [--remote-proto] [--remote-host] [--remote-port] [--remote-path] [--remote-user] [--ssh-options] [--help] - pg_probackup del-instance -B backup-path - --instance=instance_name + pg_probackup del-instance -B backup-dir + --instance=instance-name [--help] - pg_probackup archive-push -B backup-path --instance=instance_name + pg_probackup archive-push -B backup-dir --instance=instance-name --wal-file-name=wal-file-name [--wal-file-path=wal-file-path] [-j num-threads] [--batch-size=batch_size] @@ -162,7 +162,7 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database. [--ssh-options] [--help] - pg_probackup archive-get -B backup-path --instance=instance_name + pg_probackup archive-get -B backup-dir --instance=instance-name --wal-file-path=wal-file-path --wal-file-name=wal-file-name [-j num-threads] [--batch-size=batch_size] diff --git a/tests/expected/option_help_ru.out b/tests/expected/option_help_ru.out index 005c74ebb..2fe516bdc 100644 --- a/tests/expected/option_help_ru.out +++ b/tests/expected/option_help_ru.out @@ -5,9 +5,9 @@ pg_probackup - утилита для управления резервным к pg_probackup version - pg_probackup init -B backup-path + pg_probackup init -B backup-dir - pg_probackup set-config -B backup-path --instance=instance_name + pg_probackup set-config -B backup-dir --instance=instance-name [-D pgdata-path] [--external-dirs=external-directories-paths] [--log-level-console=log-level-console] @@ -32,17 +32,17 @@ pg_probackup - утилита для управления резервным к [--archive-port=port] [--archive-user=username] [--help] - pg_probackup set-backup -B backup-path --instance=instance_name + pg_probackup set-backup -B backup-dir --instance=instance-name -i backup-id [--ttl=interval] [--expire-time=timestamp] [--note=text] [--help] - pg_probackup show-config -B backup-path --instance=instance_name + pg_probackup show-config -B backup-dir --instance=instance-name [--format=format] [--default-units] [--help] - pg_probackup backup -B backup-path -b backup-mode --instance=instance_name + pg_probackup backup -B backup-dir -b backup-mode --instance=instance-name [-D pgdata-path] [-C] [--stream [-S slot-name] [--temp-slot]] [--backup-pg-log] [-j num-threads] [--progress] @@ -74,7 +74,7 @@ pg_probackup - утилита для управления резервным к [--ttl=interval] [--expire-time=timestamp] [--note=text] [--help] - pg_probackup restore -B backup-path --instance=instance_name + pg_probackup restore -B backup-dir --instance=instance-name [-D pgdata-path] [-i backup-id] [-j num-threads] [--recovery-target-time=time|--recovery-target-xid=xid |--recovery-target-lsn=lsn [--recovery-target-inclusive=boolean]] @@ -101,7 +101,7 @@ pg_probackup - утилита для управления резервным к [--archive-port=port] [--archive-user=username] [--help] - pg_probackup validate -B backup-path [--instance=instance_name] + pg_probackup validate -B backup-dir [--instance=instance-name] [-i backup-id] [--progress] [-j num-threads] [--recovery-target-time=time|--recovery-target-xid=xid |--recovery-target-lsn=lsn [--recovery-target-inclusive=boolean]] @@ -110,18 +110,18 @@ pg_probackup - утилита для управления резервным к [--skip-block-validation] [--help] - pg_probackup checkdb [-B backup-path] [--instance=instance_name] + pg_probackup checkdb [-B backup-dir] [--instance=instance-name] [-D pgdata-path] [--progress] [-j num-threads] [--amcheck] [--skip-block-validation] [--heapallindexed] [--checkunique] [--help] - pg_probackup show -B backup-path - [--instance=instance_name [-i backup-id]] + pg_probackup show -B backup-dir + [--instance=instance-name [-i backup-id]] [--format=format] [--archive] [--no-color] [--help] - pg_probackup delete -B backup-path --instance=instance_name + pg_probackup delete -B backup-dir --instance=instance-name [-j num-threads] [--progress] [--retention-redundancy=retention-redundancy] [--retention-window=retention-window] @@ -131,24 +131,24 @@ pg_probackup - утилита для управления резервным к [--dry-run] [--no-validate] [--no-sync] [--help] - pg_probackup merge -B backup-path --instance=instance_name + pg_probackup merge -B backup-dir --instance=instance-name -i backup-id [--progress] [-j num-threads] [--no-validate] [--no-sync] [--help] - pg_probackup add-instance -B backup-path -D pgdata-path - --instance=instance_name + pg_probackup add-instance -B backup-dir -D pgdata-path + --instance=instance-name [--external-dirs=external-directories-paths] [--remote-proto] [--remote-host] [--remote-port] [--remote-path] [--remote-user] [--ssh-options] [--help] - pg_probackup del-instance -B backup-path - --instance=instance_name + pg_probackup del-instance -B backup-dir + --instance=instance-name [--help] - pg_probackup archive-push -B backup-path --instance=instance_name + pg_probackup archive-push -B backup-dir --instance=instance-name --wal-file-name=wal-file-name [--wal-file-path=wal-file-path] [-j num-threads] [--batch-size=batch_size] @@ -162,7 +162,7 @@ pg_probackup - утилита для управления резервным к [--ssh-options] [--help] - pg_probackup archive-get -B backup-path --instance=instance_name + pg_probackup archive-get -B backup-dir --instance=instance-name --wal-file-path=wal-file-path --wal-file-name=wal-file-name [-j num-threads] [--batch-size=batch_size] From dffc2b2fcd795cc3e8f4a0d99509ef38971ffd2f Mon Sep 17 00:00:00 2001 From: vshepard Date: Mon, 4 Mar 2024 13:26:40 +0100 Subject: [PATCH 509/525] PBCKP-805 add unlock mutex in data.c --- src/data.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/data.c b/src/data.c index a287218ea..1a9616bae 100644 --- a/src/data.c +++ b/src/data.c @@ -2490,7 +2490,10 @@ write_page_headers(BackupPageHeader2 *headers, pgFile *file, HeaderMap *hdr_map, file->rel_path, file->hdr_off, z_len, file->hdr_crc); if (fwrite(zheaders, 1, z_len, hdr_map->fp) != z_len) + { + pthread_mutex_unlock(&(hdr_map->mutex)); elog(ERROR, "Cannot write to file \"%s\": %s", map_path, strerror(errno)); + } file->hdr_size = z_len; /* save the length of compressed headers */ hdr_map->offset += z_len; /* update current offset in map */ From 2fd0dda488520ca9507a9d60a4131a88993388cd Mon Sep 17 00:00:00 2001 From: Viktoria Shepard Date: Thu, 7 Mar 2024 23:28:44 +0300 Subject: [PATCH 510/525] Print remote host --- src/restore.c | 11 +++++++---- tests/requirements.txt | 4 ++-- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/src/restore.c b/src/restore.c index 535faebfb..44e06f2f6 100644 --- a/src/restore.c +++ b/src/restore.c @@ -131,6 +131,7 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg bool cleanup_pgdata = false; bool backup_has_tblspc = true; /* backup contain tablespace */ XLogRecPtr shift_lsn = InvalidXLogRecPtr; + char timestamp[100]; if (instanceState == NULL) elog(ERROR, "Required parameter not specified: --instance"); @@ -687,6 +688,12 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg backup_id_of(dest_backup), dest_backup->server_version); + time2iso(timestamp, lengthof(timestamp), dest_backup->start_time, false); + if (instance_config.remote.host) + elog(INFO, "Restoring the database from the backup starting at %s on %s", timestamp, instance_config.remote.host); + else + elog(INFO, "Restoring the database from the backup starting at %s", timestamp); + restore_chain(dest_backup, parent_chain, dbOid_exclude_list, params, instance_config.pgdata, no_sync, cleanup_pgdata, backup_has_tblspc); @@ -720,7 +727,6 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain, bool backup_has_tblspc) { int i; - char timestamp[100]; parray *pgdata_files = NULL; parray *dest_files = NULL; parray *external_dirs = NULL; @@ -743,9 +749,6 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain, time_t start_time, end_time; /* Preparations for actual restoring */ - time2iso(timestamp, lengthof(timestamp), dest_backup->start_time, false); - elog(INFO, "Restoring the database from backup at %s", timestamp); - dest_files = get_backup_filelist(dest_backup, true); /* Lock backup chain and make sanity checks */ diff --git a/tests/requirements.txt b/tests/requirements.txt index 62efb0e68..e2ac18bea 100644 --- a/tests/requirements.txt +++ b/tests/requirements.txt @@ -5,9 +5,9 @@ # git+https://github.com/postgrespro/testgres.git@ # 3. From a local directory # /path/to/local/directory/testgres -git+https://github.com/postgrespro/testgres.git@master#egg=testgres-pg_probackup2&subdirectory=testgres/plugins/pg_probackup2 +git+https://github.com/postgrespro/testgres.git@archive-command-exec#egg=testgres-pg_probackup2&subdirectory=testgres/plugins/pg_probackup2 allure-pytest deprecation pexpect -pytest +pytest==7.4.3 pytest-xdist From 3b741ee4f5f68390a89cc4e10968604cc38cbcef Mon Sep 17 00:00:00 2001 From: Oleg Gurev Date: Mon, 20 Nov 2023 12:00:33 +0300 Subject: [PATCH 511/525] [PBCKP-804] Test_AssertionError_Python3.11 (no attribute 'errors') --- tests/helpers/ptrack_helpers.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index da8ece15e..27d982856 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -423,8 +423,12 @@ def is_test_result_ok(test_case): result = test_case.defaultTestResult() # These two methods have no side effects test_case._feedErrorsToResult(result, test_case._outcome.errors) else: - # Python 3.11+ + # Python 3.11+ and pytest 5.3.5+ result = test_case._outcome.result + if not hasattr(result, 'errors'): + result.errors = [] + if not hasattr(result, 'failures'): + result.failures = [] else: # Python 2.7, 3.0-3.3 result = getattr(test_case, '_outcomeForDoCleanups', test_case._resultForDoCleanups) @@ -2252,4 +2256,4 @@ def __init__(self, is_datafile: bool): self.is_datafile = is_datafile class ContentDir(object): - __slots__ = ('mode') \ No newline at end of file + __slots__ = ('mode') From e39a31e369ab5d6a1b6fe99e6235fa6bd647abe4 Mon Sep 17 00:00:00 2001 From: Sofia Kopikova Date: Tue, 27 Feb 2024 21:24:13 +0300 Subject: [PATCH 512/525] PBCKP-964 fix bug with incremental restore of relation with more than one segment (always wrong checksum on non-zero segment) + add tests for it --- tests/incr_restore_test.py | 120 +++++++++++++++++++++++++++++++++++++ 1 file changed, 120 insertions(+) diff --git a/tests/incr_restore_test.py b/tests/incr_restore_test.py index eea0e313b..6a2164098 100644 --- a/tests/incr_restore_test.py +++ b/tests/incr_restore_test.py @@ -2510,3 +2510,123 @@ def test_incr_restore_issue_313(self): backup_id=last_backup_id, options=['--progress', '--incremental-mode=checksum']) node.slow_start() self.compare_pgdata(pgdata, self.pgdata_content(node.data_dir)) + + # @unittest.skip("skip") + def test_skip_pages_at_non_zero_segment_checksum(self): + if self.remote: + self.skipTest("Skipped because this test doesn't work properly in remote mode yet") + + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums'], + pg_options={'wal_log_hints': 'on'}) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # create table of size > 1 GB, so it will have several segments + node.safe_psql( + 'postgres', + "create table t as select i as a, i*2 as b, i*3 as c, i*4 as d, i*5 as e " + "from generate_series(1,20600000) i; " + "CHECKPOINT ") + + filepath = node.safe_psql( + 'postgres', + "SELECT pg_relation_filepath('t')" + ).decode('utf-8').rstrip() + + # segment .1 must exist in order to proceed this test + self.assertTrue(os.path.exists(f'{os.path.join(node.data_dir, filepath)}.1')) + + # do full backup + self.backup_node(backup_dir, 'node', node) + + node.safe_psql( + 'postgres', + "DELETE FROM t WHERE a < 101; " + "CHECKPOINT") + + # do incremental backup + self.backup_node(backup_dir, 'node', node, backup_type='page') + + pgdata = self.pgdata_content(node.data_dir) + + node.safe_psql( + 'postgres', + "DELETE FROM t WHERE a < 201; " + "CHECKPOINT") + + node.stop() + + self.restore_node( + backup_dir, 'node', node, options=["-j", "4", "--incremental-mode=checksum", "--log-level-console=INFO"]) + + self.assertNotIn('WARNING: Corruption detected in file', self.output, + 'Incremental restore copied pages from .1 datafile segment that were not changed') + + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + def test_skip_pages_at_non_zero_segment_lsn(self): + if self.remote: + self.skipTest("Skipped because this test doesn't work properly in remote mode yet") + + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums'], + pg_options={'wal_log_hints': 'on'}) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # create table of size > 1 GB, so it will have several segments + node.safe_psql( + 'postgres', + "create table t as select i as a, i*2 as b, i*3 as c, i*4 as d, i*5 as e " + "from generate_series(1,20600000) i; " + "CHECKPOINT ") + + filepath = node.safe_psql( + 'postgres', + "SELECT pg_relation_filepath('t')" + ).decode('utf-8').rstrip() + + # segment .1 must exist in order to proceed this test + self.assertTrue(os.path.exists(f'{os.path.join(node.data_dir, filepath)}.1')) + + # do full backup + self.backup_node(backup_dir, 'node', node) + + node.safe_psql( + 'postgres', + "DELETE FROM t WHERE a < 101; " + "CHECKPOINT") + + # do incremental backup + self.backup_node(backup_dir, 'node', node, backup_type='page') + + pgdata = self.pgdata_content(node.data_dir) + + node.safe_psql( + 'postgres', + "DELETE FROM t WHERE a < 201; " + "CHECKPOINT") + + node.stop() + + self.restore_node( + backup_dir, 'node', node, options=["-j", "4", "--incremental-mode=lsn", "--log-level-console=INFO"]) + + self.assertNotIn('WARNING: Corruption detected in file', self.output, + 'Incremental restore copied pages from .1 datafile segment that were not changed') + + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) From 63c281066d8926aa9096ecc92520b8516a806524 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 14 Mar 2024 17:53:31 +0300 Subject: [PATCH 513/525] fix restoring highly compressed WAL gzip surprisingly emits a lot of zeroes when compression ratio is high. It triggered branch where FIO_PAGE_ZERO is emitted in agent but not handled in fio_sned_file_gz properly. Fix it. --- src/utils/file.c | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/src/utils/file.c b/src/utils/file.c index d39d3e320..fa08939f5 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -2537,11 +2537,22 @@ fio_send_file_gz(const char *from_fullpath, FILE* out, char **errormsg) exit_code = hdr.arg; goto cleanup; } - else if (hdr.cop == FIO_PAGE) + else if (hdr.cop == FIO_PAGE || hdr.cop == FIO_PAGE_ZERO) { int rc; - Assert(hdr.size <= CHUNK_SIZE); - IO_CHECK(fio_read_all(fio_stdin, in_buf, hdr.size), hdr.size); + unsigned size; + if (hdr.cop == FIO_PAGE) + { + Assert(hdr.size <= CHUNK_SIZE); + size = hdr.size; + IO_CHECK(fio_read_all(fio_stdin, in_buf, hdr.size), hdr.size); + } + else + { + Assert(hdr.arg <= CHUNK_SIZE); + size = hdr.arg; + memset(in_buf, 0, hdr.arg); + } /* We have received a chunk of compressed data, lets decompress it */ if (strm == NULL) @@ -2552,7 +2563,7 @@ fio_send_file_gz(const char *from_fullpath, FILE* out, char **errormsg) /* The fields next_in, avail_in initialized before init */ strm->next_in = (Bytef *)in_buf; - strm->avail_in = hdr.size; + strm->avail_in = size; rc = inflateInit2(strm, 15 + 16); @@ -2569,7 +2580,7 @@ fio_send_file_gz(const char *from_fullpath, FILE* out, char **errormsg) else { strm->next_in = (Bytef *)in_buf; - strm->avail_in = hdr.size; + strm->avail_in = size; } strm->next_out = (Bytef *)out_buf; /* output buffer */ From e9b8fcb1b25dd3a639e718444ce57770582bc259 Mon Sep 17 00:00:00 2001 From: Alexey Savchkov Date: Tue, 23 May 2023 08:55:59 +0700 Subject: [PATCH 514/525] Remove the version test --- tests/expected/option_version.out | 1 - tests/option_test.py | 9 --------- 2 files changed, 10 deletions(-) delete mode 100644 tests/expected/option_version.out diff --git a/tests/expected/option_version.out b/tests/expected/option_version.out deleted file mode 100644 index 0d50cb268..000000000 --- a/tests/expected/option_version.out +++ /dev/null @@ -1 +0,0 @@ -pg_probackup 2.5.12 diff --git a/tests/option_test.py b/tests/option_test.py index 636c74327..b74b9714f 100644 --- a/tests/option_test.py +++ b/tests/option_test.py @@ -15,15 +15,6 @@ def test_help_1(self): help_out.read().decode("utf-8") ) - # @unittest.skip("skip") - def test_version_2(self): - """help options""" - with open(os.path.join(self.dir_path, "expected/option_version.out"), "rb") as version_out: - self.assertIn( - version_out.read().decode("utf-8").strip(), - self.run_pb(["--version"]) - ) - # @unittest.skip("skip") def test_without_backup_path_3(self): """backup command failure without backup mode option""" From 91cddad61d623b869157abf816c0264dabb6b3df Mon Sep 17 00:00:00 2001 From: Alexey Savchkov Date: Fri, 5 Apr 2024 12:44:29 +0700 Subject: [PATCH 515/525] Up version --- src/pg_probackup.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 7b884c90b..186666a9b 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -356,7 +356,7 @@ typedef enum ShowFormat #define BYTES_INVALID (-1) /* file didn`t changed since previous backup, DELTA backup do not rely on it */ #define FILE_NOT_FOUND (-2) /* file disappeared during backup */ #define BLOCKNUM_INVALID (-1) -#define PROGRAM_VERSION "2.5.13" +#define PROGRAM_VERSION "2.5.14" /* update when remote agent API or behaviour changes */ #define AGENT_PROTOCOL_VERSION 20509 From da79091ace70cc0cad08002ff0276162bfbbb66c Mon Sep 17 00:00:00 2001 From: vshepard Date: Fri, 12 Apr 2024 07:23:45 +0200 Subject: [PATCH 516/525] Fix test_unfinished_merge for REL_2_5 --- tests/merge_test.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/merge_test.py b/tests/merge_test.py index a9bc6fe68..1d40af7f7 100644 --- a/tests/merge_test.py +++ b/tests/merge_test.py @@ -2768,7 +2768,9 @@ def test_unfinished_merge(self): print(self.show_pb(backup_dir, node_name, as_json=False, as_text=True)) - for expected, real in zip(states, self.show_pb(backup_dir, node_name), strict=True): + backup_infos = self.show_pb(backup_dir, node_name) + self.assertEqual(len(backup_infos), len(states)) + for expected, real in zip(states, backup_infos): self.assertEqual(expected, real['status']) with self.assertRaisesRegex(ProbackupException, From 79d2b9d21cf6db670ec128994107f739dfdb9fd3 Mon Sep 17 00:00:00 2001 From: Alexey Savchkov Date: Thu, 18 Apr 2024 12:54:28 +0300 Subject: [PATCH 517/525] Name the backup by its ID --- src/restore.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/restore.c b/src/restore.c index 44e06f2f6..f9310dcee 100644 --- a/src/restore.c +++ b/src/restore.c @@ -131,7 +131,6 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg bool cleanup_pgdata = false; bool backup_has_tblspc = true; /* backup contain tablespace */ XLogRecPtr shift_lsn = InvalidXLogRecPtr; - char timestamp[100]; if (instanceState == NULL) elog(ERROR, "Required parameter not specified: --instance"); @@ -688,11 +687,10 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg backup_id_of(dest_backup), dest_backup->server_version); - time2iso(timestamp, lengthof(timestamp), dest_backup->start_time, false); if (instance_config.remote.host) - elog(INFO, "Restoring the database from the backup starting at %s on %s", timestamp, instance_config.remote.host); + elog(INFO, "Restoring the database from backup %s on %s", backup_id_of(dest_backup), instance_config.remote.host); else - elog(INFO, "Restoring the database from the backup starting at %s", timestamp); + elog(INFO, "Restoring the database from backup %s", backup_id_of(dest_backup)); restore_chain(dest_backup, parent_chain, dbOid_exclude_list, params, instance_config.pgdata, no_sync, cleanup_pgdata, backup_has_tblspc); From d29b005d23f04c5549b4f4bfd2ba60a6a53e6e32 Mon Sep 17 00:00:00 2001 From: oleg gurev Date: Mon, 6 May 2024 04:39:06 +0300 Subject: [PATCH 518/525] Rename default-units to no-scale-units --- doc/pgprobackup.xml | 13 +++++++++++++ src/configure.c | 10 +++++----- src/help.c | 4 ++-- src/pg_probackup.c | 6 +++--- src/pg_probackup.h | 2 +- src/utils/configuration.c | 4 ++-- src/utils/configuration.h | 2 +- tests/expected/option_help.out | 2 +- tests/expected/option_help_ru.out | 2 +- tests/option_test.py | 10 +++++----- 10 files changed, 34 insertions(+), 21 deletions(-) diff --git a/doc/pgprobackup.xml b/doc/pgprobackup.xml index 49e74e626..a73041f31 100644 --- a/doc/pgprobackup.xml +++ b/doc/pgprobackup.xml @@ -4267,6 +4267,7 @@ pg_probackup set-backup -B backup_dir --instance show-config pg_probackup show-config -B backup_dir --instance instance_name [--format=plain|json] +[--no-scale-units] [logging_options] Displays the contents of the pg_probackup.conf configuration @@ -4277,6 +4278,18 @@ pg_probackup show-config -B backup_dir --instance JSON format. By default, configuration settings are shown as plain text. + + You can also specify the + option to display time and memory configuration settings in their base (unscaled) units. + Otherwise, the values are scaled to larger units for optimal display. + For example, if archive-timeout is 300, then + 5min is displayed, but if archive-timeout + is 301, then 301s is displayed. + Also, if the option is specified, configuration + settings are displayed without units and for the JSON format, + numeric and boolean values are not enclosed in quotes. This facilitates parsing + the output. + To edit pg_probackup.conf, use the command. diff --git a/src/configure.c b/src/configure.c index 4f6774d55..964548343 100644 --- a/src/configure.c +++ b/src/configure.c @@ -269,7 +269,7 @@ static const char *current_group = NULL; * Show configure options including default values. */ void -do_show_config(bool show_default_units) +do_show_config(bool show_base_units) { int i; @@ -277,13 +277,13 @@ do_show_config(bool show_default_units) for (i = 0; instance_options[i].type; i++) { - if (show_default_units && strchr("bBiIuU", instance_options[i].type) && instance_options[i].get_value == *option_get_value) - instance_options[i].flags |= GET_VAL_IN_DEFAULT_UNITS; /* Set flag */ + if (show_base_units && strchr("bBiIuU", instance_options[i].type) && instance_options[i].get_value == *option_get_value) + instance_options[i].flags |= GET_VAL_IN_BASE_UNITS; /* Set flag */ if (show_format == SHOW_PLAIN) show_configure_plain(&instance_options[i]); else show_configure_json(&instance_options[i]); - instance_options[i].flags &= ~(GET_VAL_IN_DEFAULT_UNITS); /* Reset flag. It was resetted in option_get_value(). Probably this reset isn't needed */ + instance_options[i].flags &= ~(GET_VAL_IN_BASE_UNITS); /* Reset flag. It was resetted in option_get_value(). Probably this reset isn't needed */ } show_configure_end(); @@ -804,6 +804,6 @@ show_configure_json(ConfigOption *opt) return; json_add_value(&show_buf, opt->lname, value, json_level, - !(opt->flags & GET_VAL_IN_DEFAULT_UNITS)); + !(opt->flags & GET_VAL_IN_BASE_UNITS)); pfree(value); } diff --git a/src/help.c b/src/help.c index 7eced19bc..48cc1f524 100644 --- a/src/help.c +++ b/src/help.c @@ -121,7 +121,7 @@ help_pg_probackup(void) printf(_("\n %s show-config -B backup-dir --instance=instance-name\n"), PROGRAM_NAME); printf(_(" [--format=format]\n")); - printf(_(" [--default-units]\n")); + printf(_(" [--no-scale-units]\n")); printf(_(" [--help]\n")); printf(_("\n %s backup -B backup-dir -b backup-mode --instance=instance-name\n"), PROGRAM_NAME); @@ -955,7 +955,7 @@ help_show_config(void) printf(_(" -B, --backup-path=backup-dir location of the backup storage area\n")); printf(_(" --instance=instance-name name of the instance\n")); printf(_(" --format=format show format=PLAIN|JSON\n")); - printf(_(" --default-units show memory and time values in default units\n\n")); + printf(_(" --no-scale-units show memory and time values in default units\n\n")); } static void diff --git a/src/pg_probackup.c b/src/pg_probackup.c index 09817fdde..e50b05995 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -164,7 +164,7 @@ bool no_validate_wal = false; /* show options */ ShowFormat show_format = SHOW_PLAIN; bool show_archive = false; -static bool show_default_units = false; +static bool show_base_units = false; /* set-backup options */ int64 ttl = -1; @@ -277,7 +277,7 @@ static ConfigOption cmd_options[] = { 'f', 165, "format", opt_show_format, SOURCE_CMD_STRICT }, { 'b', 166, "archive", &show_archive, SOURCE_CMD_STRICT }, /* show-config options */ - { 'b', 167, "default-units", &show_default_units,SOURCE_CMD_STRICT }, + { 'b', 167, "no-scale-units", &show_base_units,SOURCE_CMD_STRICT }, /* set-backup options */ { 'I', 170, "ttl", &ttl, SOURCE_CMD_STRICT, SOURCE_DEFAULT, 0, OPTION_UNIT_S, option_get_value}, { 's', 171, "expire-time", &expire_time_string, SOURCE_CMD_STRICT }, @@ -1052,7 +1052,7 @@ main(int argc, char *argv[]) do_merge(instanceState, current.backup_id, no_validate, no_sync); break; case SHOW_CONFIG_CMD: - do_show_config(show_default_units); + do_show_config(show_base_units); break; case SET_CONFIG_CMD: do_set_config(instanceState, false); diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 186666a9b..1f4780f58 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -939,7 +939,7 @@ extern void do_archive_get(InstanceState *instanceState, InstanceConfig *instanc char *wal_file_name, int batch_size, bool validate_wal); /* in configure.c */ -extern void do_show_config(bool show_default_units); +extern void do_show_config(bool show_base_units); extern void do_set_config(InstanceState *instanceState, bool missing_ok); extern void init_config(InstanceConfig *config, const char *instance_name); extern InstanceConfig *readInstanceConfigFile(InstanceState *instanceState); diff --git a/src/utils/configuration.c b/src/utils/configuration.c index 6b2382996..f049aa1be 100644 --- a/src/utils/configuration.c +++ b/src/utils/configuration.c @@ -678,7 +678,7 @@ config_set_opt(ConfigOption options[], void *var, OptionSource source) /* * Return value of the function in the string representation. Result is * allocated string. - * We can set GET_VAL_IN_DEFAULT_UNITS flag in opt->flags + * We can set GET_VAL_IN_BASE_UNITS flag in opt->flags * before call option_get_value() to get option value in default units */ char * @@ -694,7 +694,7 @@ option_get_value(ConfigOption *opt) */ if (opt->flags & OPTION_UNIT) { - if (opt->flags & GET_VAL_IN_DEFAULT_UNITS){ + if (opt->flags & GET_VAL_IN_BASE_UNITS){ if (opt->type == 'i') value = *((int32 *) opt->var); else if (opt->type == 'I') diff --git a/src/utils/configuration.h b/src/utils/configuration.h index f3bda65de..59da29bd5 100644 --- a/src/utils/configuration.h +++ b/src/utils/configuration.h @@ -100,7 +100,7 @@ struct ConfigOption #define OPTION_UNIT_TIME 0xF0000 /* mask for time-related units */ #define OPTION_UNIT (OPTION_UNIT_MEMORY | OPTION_UNIT_TIME) -#define GET_VAL_IN_DEFAULT_UNITS 0x80000000 /* bitflag to get memory and time values in default units*/ +#define GET_VAL_IN_BASE_UNITS 0x80000000 /* bitflag to get memory and time values in default units*/ extern ProbackupSubcmd parse_subcmd(char const * const subcmd_str); extern char const *get_subcmd_name(ProbackupSubcmd const subcmd); diff --git a/tests/expected/option_help.out b/tests/expected/option_help.out index 985ba7fec..f0c77ae16 100644 --- a/tests/expected/option_help.out +++ b/tests/expected/option_help.out @@ -39,7 +39,7 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database. pg_probackup show-config -B backup-dir --instance=instance-name [--format=format] - [--default-units] + [--no-scale-units] [--help] pg_probackup backup -B backup-dir -b backup-mode --instance=instance-name diff --git a/tests/expected/option_help_ru.out b/tests/expected/option_help_ru.out index 2fe516bdc..bd6d76970 100644 --- a/tests/expected/option_help_ru.out +++ b/tests/expected/option_help_ru.out @@ -39,7 +39,7 @@ pg_probackup - утилита для управления резервным к pg_probackup show-config -B backup-dir --instance=instance-name [--format=format] - [--default-units] + [--no-scale-units] [--help] pg_probackup backup -B backup-dir -b backup-mode --instance=instance-name diff --git a/tests/option_test.py b/tests/option_test.py index b74b9714f..e97da1ef7 100644 --- a/tests/option_test.py +++ b/tests/option_test.py @@ -222,21 +222,21 @@ def test_help_6(self): 'You need configure PostgreSQL with --enabled-nls option for this test') # @unittest.skip("skip") - def test_options_default_units(self): - """check --default-units option""" + def test_options_no_scale_units(self): + """check --no-scale-units option""" backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( base_dir=os.path.join(self.module_name, self.fname, 'node')) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) - # check that --default-units option works correctly + # check that --no-scale-units option works correctly output = self.run_pb(["show-config", "--backup-path", backup_dir, "--instance=node"]) self.assertIn(container=output, member="archive-timeout = 5min") - output = self.run_pb(["show-config", "--backup-path", backup_dir, "--instance=node", "--default-units"]) + output = self.run_pb(["show-config", "--backup-path", backup_dir, "--instance=node", "--no-scale-units"]) self.assertIn(container=output, member="archive-timeout = 300") self.assertNotIn(container=output, member="archive-timeout = 300s") # check that we have now quotes ("") in json output - output = self.run_pb(["show-config", "--backup-path", backup_dir, "--instance=node", "--default-units", "--format=json"]) + output = self.run_pb(["show-config", "--backup-path", backup_dir, "--instance=node", "--no-scale-units", "--format=json"]) self.assertIn(container=output, member='"archive-timeout": 300,') self.assertIn(container=output, member='"retention-redundancy": 0,') self.assertNotIn(container=output, member='"archive-timeout": "300",') From 2b74971f710a799e49b10ab87e0901a6ec561d90 Mon Sep 17 00:00:00 2001 From: Oleg Gurev Date: Sat, 27 Apr 2024 15:54:32 +0300 Subject: [PATCH 519/525] [PBCKP-818] Add id's for programlisting section - to use with automatet listings update --- doc/pgprobackup.xml | 60 ++++++++++++++++++++++----------------------- 1 file changed, 30 insertions(+), 30 deletions(-) diff --git a/doc/pgprobackup.xml b/doc/pgprobackup.xml index a73041f31..ac9892c65 100644 --- a/doc/pgprobackup.xml +++ b/doc/pgprobackup.xml @@ -529,14 +529,14 @@ doc/src/sgml/pgprobackup.sgml Initialize the backup catalog: - + backup_user@backup_host:~$ pg_probackup-16 init -B /mnt/backups INFO: Backup catalog '/mnt/backups' successfully initialized Add a backup instance called mydb to the backup catalog: - + backup_user@backup_host:~$ pg_probackup-16 add-instance \ -B /mnt/backups \ -D /var/lib/postgresql/16/main \ @@ -548,7 +548,7 @@ INFO: Instance 'mydb' successfully initialized Make a FULL backup: - + backup_user@backup_host:~$ pg_probackup-16 backup \ -B /mnt/backups \ -b FULL \ @@ -580,7 +580,7 @@ INFO: Backup S6OBFN completed List the backups of the instance: - + backup_user@backup_host:~$ pg_probackup-16 show -B /mnt/backups --instance=mydb ================================================================================================================================ Instance Version ID Recovery Time Mode WAL Mode TLI Time Data WAL Zratio Start LSN Stop LSN Status @@ -590,7 +590,7 @@ backup_user@backup_host:~$ pg_probackup-16 show -B /mnt/backups --instance=mydb Make an incremental backup in the DELTA mode: - + backup_user@backup_host:~$ pg_probackup-16 backup \ -B /mnt/backups \ -b delta \ @@ -625,7 +625,7 @@ INFO: Backup S6OBLG completed Add or modify some parameters in the pg_probackup configuration file, so that you do not have to specify them each time on the command line: - + backup_user@backup_host:~$ pg_probackup-16 set-config \ -B /mnt/backups \ --instance=mydb \ @@ -637,7 +637,7 @@ backup_user@backup_host:~$ pg_probackup-16 set-config \ Check the configuration of the instance: - + backup_user@backup_host:~$ pg_probackup-16 show-config -B /mnt/backups --instance=mydb # Backup instance information pgdata = /var/lib/postgresql/16/main @@ -678,7 +678,7 @@ remote-user = postgres Make another incremental backup in the DELTA mode, omitting the parameters stored in the configuration file earlier: - + backup_user@backup_host:~$ pg_probackup-16 backup -B /mnt/backups --instance=mydb -b delta --stream INFO: Backup start, pg_probackup version: 2.5.13, instance: mydb, backup ID: S6OBQO, backup mode: DELTA, wal mode: STREAM, remote: true, compress-algorithm: none, compress-level: 1 INFO: Database backup start @@ -704,7 +704,7 @@ INFO: Backup S6OBQO completed List the backups of the instance again: - + backup_user@backup_host:~$ pg_probackup-16 show -B /mnt/backups --instance=mydb ================================================================================================================================== Instance Version ID Recovery Time Mode WAL Mode TLI Time Data WAL Zratio Start LSN Stop LSN Status @@ -716,7 +716,7 @@ backup_user@backup_host:~$ pg_probackup-16 show -B /mnt/backups --instance=mydb Restore the data from the latest available backup to an arbitrary location: - + backup_user@backup_host:~$ pg_probackup-16 restore -B /mnt/backups -D /var/lib/postgresql/16/staging --instance=mydb INFO: Validating parents for backup S6OBQO INFO: Validating backup S6OBFN @@ -2110,7 +2110,7 @@ pg_probackup validate -B backup_dir --instance PT8XFX backup ID up to the specified timestamp, run this command: - + pg_probackup validate -B backup_dir --instance instance_name -i PT8XFX --recovery-target-time="2017-05-18 14:18:11+03" @@ -2305,7 +2305,7 @@ pg_probackup restore -B backup_dir --instance - + ============================================================================================================================================= Instance Version ID Recovery Time Mode WAL Mode TLI Time Data WAL Zratio Start LSN Stop LSN Status ============================================================================================================================================= @@ -2788,7 +2788,7 @@ pg_probackup show -B backup_dir pg_probackup displays the list of all the available backups. For example: - + BACKUP INSTANCE 'node' ====================================================================================================================================== Instance Version ID Recovery time Mode WAL Mode TLI Time Data WAL Zratio Start LSN Stop LSN Status @@ -2952,7 +2952,7 @@ pg_probackup show -B backup_dir --instance The sample output is as follows: - + #Configuration backup-mode = FULL stream = false @@ -3096,7 +3096,7 @@ pg_probackup show -B backup_dir --instance The sample output is as follows: - + [ { "instance": "node", @@ -3145,7 +3145,7 @@ pg_probackup show -B backup_dir [--instance pg_probackup displays the list of all the available WAL files grouped by timelines. For example: - + ARCHIVE INSTANCE 'node' =================================================================================================================================== TLI Parent TLI Switchpoint Min Segno Max Segno N segments Size Zratio N backups Status @@ -3243,7 +3243,7 @@ pg_probackup show -B backup_dir [--instance The sample output is as follows: - + [ { "instance": "replica", @@ -3599,7 +3599,7 @@ pg_probackup delete -B backup_dir --instance 7, and you have the following backups available on April 10, 2019: - + BACKUP INSTANCE 'node' =================================================================================================================================== Instance Version ID Recovery time Mode WAL TLI Time Data WAL Zratio Start LSN Stop LSN Status @@ -3613,17 +3613,17 @@ BACKUP INSTANCE 'node' node 10 P7XDFT 2019-03-29 05:26:25+03 FULL STREAM 1/0 11s 200MB 16MB 1.0 0/D000028 0/D000198 OK - Even though P7XDHB and P7XDHU backups are outside the + Even though P7XDHB and P7XDHU backups are outside the retention window, they cannot be removed as it invalidates the - succeeding incremental backups P7XDJA and P7XDQV that are + succeeding incremental backups P7XDJA and P7XDQV that are still required, so, if you run the command with the - flag, only the P7XDFT full + flag, only the P7XDFT full backup will be removed. - With the option, the P7XDJA - backup is merged with the underlying P7XDHU and P7XDHB backups + With the option, the P7XDJA + backup is merged with the underlying P7XDHU and P7XDHB backups and becomes a full one, so there is no need to keep these expired backups anymore: @@ -3631,7 +3631,7 @@ BACKUP INSTANCE 'node' pg_probackup delete -B backup_dir --instance node --delete-expired --merge-expired pg_probackup show -B backup_dir - + BACKUP INSTANCE 'node' ================================================================================================================================== Instance Version ID Recovery time Mode WAL TLI Time Data WAL Zratio Start LSN Stop LSN Status @@ -3688,7 +3688,7 @@ pg_probackup show -B backup_dir --instance If the backup is pinned, it has the expire-time attribute that displays its expiration time: - + ... recovery-time = '2017-05-16 12:57:31' expire-time = '2020-01-01 00:00:00+03' @@ -3766,7 +3766,7 @@ pg_probackup set-backup -B backup_dir --instance pg_probackup show -B backup_dir --instance node - + BACKUP INSTANCE 'node' ==================================================================================================================================== Instance Version ID Recovery Time Mode WAL Mode TLI Time Data WAL Zratio Start LSN Stop LSN Status @@ -3786,7 +3786,7 @@ BACKUP INSTANCE 'node' pg_probackup show -B backup_dir --instance node --archive - + ARCHIVE INSTANCE 'node' =============================================================================================================================== TLI Parent TLI Switchpoint Min Segno Max Segno N segments Size Zratio N backups Status @@ -3800,7 +3800,7 @@ ARCHIVE INSTANCE 'node' pg_probackup delete -B backup_dir --instance node --delete-wal - + ARCHIVE INSTANCE 'node' =============================================================================================================================== TLI Parent TLI Switchpoint Min Segno Max Segno N segments Size Zratio N backups Status @@ -3815,7 +3815,7 @@ ARCHIVE INSTANCE 'node' pg_probackup delete -B backup_dir --instance node --delete-wal --wal-depth=1 - + ARCHIVE INSTANCE 'node' ================================================================================================================================ TLI Parent TLI Switchpoint Min Segno Max Segno N segments Size Zratio N backups Status @@ -3829,7 +3829,7 @@ ARCHIVE INSTANCE 'node' pg_probackup backup -B backup_dir --instance node -b DELTA --wal-depth=1 --delete-wal - + ARCHIVE INSTANCE 'node' =============================================================================================================================== TLI Parent TLI Switchpoint Min Segno Max Segno N segments Size Zratio N backups Status From 26f1e9c052d50b8ada799dc5dd02355893d54056 Mon Sep 17 00:00:00 2001 From: Oleg Gurev Date: Thu, 2 May 2024 11:02:27 +0300 Subject: [PATCH 520/525] [PBCKP-818] Doc. Replace --instance to --instance= in the xml - Replace --instance node to --instance=node in the xml --- doc/pgprobackup.xml | 146 ++++++++++++++++++++++---------------------- 1 file changed, 73 insertions(+), 73 deletions(-) diff --git a/doc/pgprobackup.xml b/doc/pgprobackup.xml index ac9892c65..272a0d043 100644 --- a/doc/pgprobackup.xml +++ b/doc/pgprobackup.xml @@ -1080,7 +1080,7 @@ pg_probackup init -B backup_dir To add a new backup instance, run the following command: -pg_probackup add-instance -B backup_dir -D data_dir --instance instance_name [remote_options] +pg_probackup add-instance -B backup_dir -D data_dir --instance=instance_name [remote_options] Where: @@ -1355,7 +1355,7 @@ ALTER ROLE backup WITH REPLICATION; parameter, as follows: -archive_command = '"install_dir/pg_probackup" archive-push -B "backup_dir" --instance instance_name --wal-file-name=%f [remote_options]' +archive_command = '"install_dir/pg_probackup" archive-push -B "backup_dir" --instance=instance_name --wal-file-name=%f [remote_options]' @@ -1804,7 +1804,7 @@ CREATE EXTENSION ptrack; To create a backup, run the following command: -pg_probackup backup -B backup_dir --instance instance_name -b backup_mode +pg_probackup backup -B backup_dir --instance=instance_name -b backup_mode Where backup_mode can take one of the @@ -1830,7 +1830,7 @@ pg_probackup backup -B backup_dir --instance -pg_probackup backup -B backup_dir --instance instance_name -b FULL +pg_probackup backup -B backup_dir --instance=instance_name -b FULL ARCHIVE backups rely on @@ -1860,7 +1860,7 @@ pg_probackup backup -B backup_dir --instance -pg_probackup backup -B backup_dir --instance instance_name -b FULL --stream --temp-slot +pg_probackup backup -B backup_dir --instance=instance_name -b FULL --stream --temp-slot The optional flag ensures that @@ -1953,7 +1953,7 @@ pg_probackup backup -B backup_dir --instance -pg_probackup backup -B backup_dir --instance instance_name -b FULL --external-dirs=/etc/dir1:/etc/dir2 +pg_probackup backup -B backup_dir --instance=instance_name -b FULL --external-dirs=/etc/dir1:/etc/dir2 Similarly, to include C:\dir1 and @@ -1961,7 +1961,7 @@ pg_probackup backup -B backup_dir --instance -pg_probackup backup -B backup_dir --instance instance_name -b FULL --external-dirs=C:\dir1;C:\dir2 +pg_probackup backup -B backup_dir --instance=instance_name -b FULL --external-dirs=C:\dir1;C:\dir2 pg_probackup recursively copies the contents @@ -1989,7 +1989,7 @@ pg_probackup backup -B backup_dir --instance -pg_probackup checkdb [-B backup_dir [--instance instance_name]] [-D data_dir] [connection_options] +pg_probackup checkdb [-B backup_dir [--instance=instance_name]] [-D data_dir] [connection_options] @@ -2087,7 +2087,7 @@ pg_probackup checkdb --amcheck --skip-block-validation [connection_ this command: -pg_probackup validate -B backup_dir --instance instance_name --recovery-target-xid=4242 +pg_probackup validate -B backup_dir --instance=instance_name --recovery-target-xid=4242 If validation completes successfully, pg_probackup displays the @@ -2111,7 +2111,7 @@ pg_probackup validate -B backup_dir --instance -pg_probackup validate -B backup_dir --instance instance_name -i PT8XFX --recovery-target-time="2017-05-18 14:18:11+03" +pg_probackup validate -B backup_dir --instance=instance_name -i PT8XFX --recovery-target-time="2017-05-18 14:18:11+03" If you specify the backup_id of an incremental backup, @@ -2129,7 +2129,7 @@ pg_probackup validate -B backup_dir --instance -pg_probackup restore -B backup_dir --instance instance_name -i backup_id +pg_probackup restore -B backup_dir --instance=instance_name -i backup_id Where: @@ -2213,7 +2213,7 @@ pg_probackup restore -B backup_dir --instance -pg_probackup restore -B backup_dir --instance instance_name -D data_dir -j 4 -i backup_id -T tablespace1_dir=tablespace1_newdir -T tablespace2_dir=tablespace2_newdir +pg_probackup restore -B backup_dir --instance=instance_name -D data_dir -j 4 -i backup_id -T tablespace1_dir=tablespace1_newdir -T tablespace2_dir=tablespace2_newdir @@ -2245,7 +2245,7 @@ pg_probackup restore -B backup_dir --instance command with the following options: -pg_probackup restore -B backup_dir --instance instance_name -D data_dir -I incremental_mode +pg_probackup restore -B backup_dir --instance=instance_name -D data_dir -I incremental_mode Where incremental_mode can take one of the @@ -2314,7 +2314,7 @@ pg_probackup restore -B backup_dir --instance -pg_probackup restore -B backup_dir --instance instance_name --db-include=database_name +pg_probackup restore -B backup_dir --instance=instance_name --db-include=database_name The option can be specified @@ -2360,14 +2360,14 @@ pg_probackup restore -B backup_dir --instance -pg_probackup restore -B backup_dir --instance instance_name --db-include=db1 --db-include=db2 +pg_probackup restore -B backup_dir --instance=instance_name --db-include=db1 --db-include=db2 To exclude one or more databases from restore, use the option: -pg_probackup restore -B backup_dir --instance instance_name --db-exclude=database_name +pg_probackup restore -B backup_dir --instance=instance_name --db-exclude=database_name The option can be specified @@ -2376,7 +2376,7 @@ pg_probackup restore -B backup_dir --instance -pg_probackup restore -B backup_dir --instance instance_name --db-exclude=db1 --db-exclude=db2 +pg_probackup restore -B backup_dir --instance=instance_name --db-exclude=db1 --db-exclude=db2 Partial restore relies on lax behavior of PostgreSQL recovery @@ -2438,7 +2438,7 @@ pg_probackup restore -B backup_dir --instance -pg_probackup restore -B backup_dir --instance instance_name --recovery-target-time="2017-05-18 14:18:11+03" +pg_probackup restore -B backup_dir --instance=instance_name --recovery-target-time="2017-05-18 14:18:11+03" @@ -2447,7 +2447,7 @@ pg_probackup restore -B backup_dir --instance --recovery-target-xid option: -pg_probackup restore -B backup_dir --instance instance_name --recovery-target-xid=687 +pg_probackup restore -B backup_dir --instance=instance_name --recovery-target-xid=687 @@ -2456,7 +2456,7 @@ pg_probackup restore -B backup_dir --instance --recovery-target-lsn option: -pg_probackup restore -B backup_dir --instance instance_name --recovery-target-lsn=16/B374D848 +pg_probackup restore -B backup_dir --instance=instance_name --recovery-target-lsn=16/B374D848 @@ -2465,7 +2465,7 @@ pg_probackup restore -B backup_dir --instance --recovery-target-name option: -pg_probackup restore -B backup_dir --instance instance_name --recovery-target-name="before_app_upgrade" +pg_probackup restore -B backup_dir --instance=instance_name --recovery-target-name="before_app_upgrade" @@ -2475,7 +2475,7 @@ pg_probackup restore -B backup_dir --instance latest value: -pg_probackup restore -B backup_dir --instance instance_name --recovery-target="latest" +pg_probackup restore -B backup_dir --instance=instance_name --recovery-target="latest" @@ -2485,7 +2485,7 @@ pg_probackup restore -B backup_dir --instance immediate value: -pg_probackup restore -B backup_dir --instance instance_name --recovery-target='immediate' +pg_probackup restore -B backup_dir --instance=instance_name --recovery-target='immediate' @@ -2569,7 +2569,7 @@ pg_probackup restore -B backup_dir --instance 2302, run: -pg_probackup backup -B backup_dir --instance instance_name -b FULL --remote-user=postgres --remote-host=192.168.0.2 --remote-port=2302 +pg_probackup backup -B backup_dir --instance=instance_name -b FULL --remote-user=postgres --remote-host=192.168.0.2 --remote-port=2302 To restore the latest available backup on a remote system with host address @@ -2577,7 +2577,7 @@ pg_probackup backup -B backup_dir --instance 2302, run: -pg_probackup restore -B backup_dir --instance instance_name --remote-user=postgres --remote-host=192.168.0.2 --remote-port=2302 +pg_probackup restore -B backup_dir --instance=instance_name --remote-user=postgres --remote-host=192.168.0.2 --remote-port=2302 Restoring an ARCHIVE backup or performing PITR in the remote mode @@ -2604,20 +2604,20 @@ pg_probackup restore -B backup_dir --instance 2303, run: -pg_probackup restore -B backup_dir --instance instance_name --remote-user=postgres --remote-host=192.168.0.2 --remote-port=2302 --archive-host=192.168.0.3 --archive-port=2303 --archive-user=backup +pg_probackup restore -B backup_dir --instance=instance_name --remote-user=postgres --remote-host=192.168.0.2 --remote-port=2302 --archive-host=192.168.0.3 --archive-port=2303 --archive-user=backup Provided arguments will be used to construct the restore_command: -restore_command = '"install_dir/pg_probackup" archive-get -B "backup_dir" --instance instance_name --wal-file-path=%p --wal-file-name=%f --remote-host=192.168.0.3 --remote-port=2303 --remote-user=backup' +restore_command = '"install_dir/pg_probackup" archive-get -B "backup_dir" --instance=instance_name --wal-file-path=%p --wal-file-name=%f --remote-host=192.168.0.3 --remote-port=2303 --remote-user=backup' Alternatively, you can use the option to provide the entire restore_command: -pg_probackup restore -B backup_dir --instance instance_name --remote-user=postgres --remote-host=192.168.0.2 --remote-port=2302 --restore-command='"install_dir/pg_probackup" archive-get -B "backup_dir" --instance instance_name --wal-file-path=%p --wal-file-name=%f --remote-host=192.168.0.3 --remote-port=2303 --remote-user=backup' +pg_probackup restore -B backup_dir --instance=instance_name --remote-user=postgres --remote-host=192.168.0.2 --remote-port=2302 --restore-command='"install_dir/pg_probackup" archive-get -B "backup_dir" --instance=instance_name --wal-file-path=%p --wal-file-name=%f --remote-host=192.168.0.3 --remote-port=2303 --remote-user=backup' @@ -2646,7 +2646,7 @@ pg_probackup restore -B backup_dir --instance -pg_probackup backup -B backup_dir --instance instance_name -b FULL -j 4 +pg_probackup backup -B backup_dir --instance=instance_name -b FULL -j 4 @@ -2707,14 +2707,14 @@ pg_probackup backup -B backup_dir --instance set-config command: -pg_probackup set-config -B backup_dir --instance instance_name +pg_probackup set-config -B backup_dir --instance=instance_name [--external-dirs=external_directory_path] [remote_options] [connection_options] [retention_options] [logging_options] To view the current settings, run the following command: -pg_probackup show-config -B backup_dir --instance instance_name +pg_probackup show-config -B backup_dir --instance=instance_name You can override the settings defined in pg_probackup.conf when @@ -2947,7 +2947,7 @@ BACKUP INSTANCE 'node' show command with the backup ID: -pg_probackup show -B backup_dir --instance instance_name -i backup_id +pg_probackup show -B backup_dir --instance=instance_name -i backup_id The sample output is as follows: @@ -3091,7 +3091,7 @@ primary_conninfo = 'user=backup passfile=/var/lib/pgsql/.pgpass port=5432 sslmod in the JSON format: -pg_probackup show -B backup_dir --instance instance_name --format=json -i backup_id +pg_probackup show -B backup_dir --instance=instance_name --format=json -i backup_id The sample output is as follows: @@ -3139,7 +3139,7 @@ pg_probackup show -B backup_dir --instance -pg_probackup show -B backup_dir [--instance instance_name] --archive +pg_probackup show -B backup_dir [--instance=instance_name] --archive pg_probackup displays the list of all the available WAL files @@ -3238,7 +3238,7 @@ ARCHIVE INSTANCE 'node' format, run the command: -pg_probackup show -B backup_dir [--instance instance_name] --archive --format=json +pg_probackup show -B backup_dir [--instance=instance_name] --archive --format=json The sample output is as follows: @@ -3540,7 +3540,7 @@ pg_probackup show -B backup_dir [--instance -pg_probackup set-config -B backup_dir --instance instance_name --retention-redundancy=2 --retention-window=7 +pg_probackup set-config -B backup_dir --instance=instance_name --retention-redundancy=2 --retention-window=7 @@ -3558,7 +3558,7 @@ pg_probackup set-config -B backup_dir --instance --delete-expired flag: -pg_probackup delete -B backup_dir --instance instance_name --delete-expired +pg_probackup delete -B backup_dir --instance=instance_name --delete-expired If you would like to also remove the WAL files that are no @@ -3566,7 +3566,7 @@ pg_probackup delete -B backup_dir --instance --delete-wal flag: -pg_probackup delete -B backup_dir --instance instance_name --delete-expired --delete-wal +pg_probackup delete -B backup_dir --instance=instance_name --delete-expired --delete-wal @@ -3577,7 +3577,7 @@ pg_probackup delete -B backup_dir --instance -pg_probackup delete -B backup_dir --instance instance_name --delete-expired --retention-window=7 --retention-redundancy=2 +pg_probackup delete -B backup_dir --instance=instance_name --delete-expired --retention-window=7 --retention-redundancy=2 Since incremental backups require that their parent full @@ -3628,7 +3628,7 @@ BACKUP INSTANCE 'node' expired backups anymore: -pg_probackup delete -B backup_dir --instance node --delete-expired --merge-expired +pg_probackup delete -B backup_dir --instance=node --delete-expired --merge-expired pg_probackup show -B backup_dir @@ -3654,7 +3654,7 @@ BACKUP INSTANCE 'node' for arbitrary time. For example: -pg_probackup set-backup -B backup_dir --instance instance_name -i backup_id --ttl=30d +pg_probackup set-backup -B backup_dir --instance=instance_name -i backup_id --ttl=30d This command sets the expiration time of the @@ -3666,7 +3666,7 @@ pg_probackup set-backup -B backup_dir --instance --expire-time option. For example: -pg_probackup set-backup -B backup_dir --instance instance_name -i backup_id --expire-time="2020-01-01 00:00:00+03" +pg_probackup set-backup -B backup_dir --instance=instance_name -i backup_id --expire-time="2020-01-01 00:00:00+03" Alternatively, you can use the and @@ -3675,14 +3675,14 @@ pg_probackup set-backup -B backup_dir --instance -pg_probackup backup -B backup_dir --instance instance_name -b FULL --ttl=30d -pg_probackup backup -B backup_dir --instance instance_name -b FULL --expire-time="2020-01-01 00:00:00+03" +pg_probackup backup -B backup_dir --instance=instance_name -b FULL --ttl=30d +pg_probackup backup -B backup_dir --instance=instance_name -b FULL --expire-time="2020-01-01 00:00:00+03" To check if the backup is pinned, run the command: -pg_probackup show -B backup_dir --instance instance_name -i backup_id +pg_probackup show -B backup_dir --instance=instance_name -i backup_id @@ -3700,7 +3700,7 @@ data-bytes = 22288792 You can unpin the backup by setting the option to zero: -pg_probackup set-backup -B backup_dir --instance instance_name -i backup_id --ttl=0 +pg_probackup set-backup -B backup_dir --instance=instance_name -i backup_id --ttl=0 @@ -3764,7 +3764,7 @@ pg_probackup set-backup -B backup_dir --instance : -pg_probackup show -B backup_dir --instance node +pg_probackup show -B backup_dir --instance=node BACKUP INSTANCE 'node' @@ -3784,7 +3784,7 @@ BACKUP INSTANCE 'node' flag: -pg_probackup show -B backup_dir --instance node --archive +pg_probackup show -B backup_dir --instance=node --archive ARCHIVE INSTANCE 'node' @@ -3798,7 +3798,7 @@ ARCHIVE INSTANCE 'node' achieve much, only one segment is removed: -pg_probackup delete -B backup_dir --instance node --delete-wal +pg_probackup delete -B backup_dir --instance=node --delete-wal ARCHIVE INSTANCE 'node' @@ -3813,7 +3813,7 @@ ARCHIVE INSTANCE 'node' option to 1: -pg_probackup delete -B backup_dir --instance node --delete-wal --wal-depth=1 +pg_probackup delete -B backup_dir --instance=node --delete-wal --wal-depth=1 ARCHIVE INSTANCE 'node' @@ -3827,7 +3827,7 @@ ARCHIVE INSTANCE 'node' option with the command: -pg_probackup backup -B backup_dir --instance node -b DELTA --wal-depth=1 --delete-wal +pg_probackup backup -B backup_dir --instance=node -b DELTA --wal-depth=1 --delete-wal ARCHIVE INSTANCE 'node' @@ -3848,7 +3848,7 @@ ARCHIVE INSTANCE 'node' recent incremental backup you would like to merge: -pg_probackup merge -B backup_dir --instance instance_name -i backup_id +pg_probackup merge -B backup_dir --instance=instance_name -i backup_id This command merges backups that belong to a common incremental backup @@ -3870,7 +3870,7 @@ pg_probackup merge -B backup_dir --instance -pg_probackup show -B backup_dir --instance instance_name -i backup_id +pg_probackup show -B backup_dir --instance=instance_name -i backup_id If the merge is still in progress, the backup status is @@ -3888,7 +3888,7 @@ pg_probackup show -B backup_dir --instance -pg_probackup delete -B backup_dir --instance instance_name -i backup_id +pg_probackup delete -B backup_dir --instance=instance_name -i backup_id This command will delete the backup with the specified @@ -3904,7 +3904,7 @@ pg_probackup delete -B backup_dir --instance --delete-wal flag: -pg_probackup delete -B backup_dir --instance instance_name --delete-wal +pg_probackup delete -B backup_dir --instance=instance_name --delete-wal To delete backups that are expired according to the current @@ -3912,7 +3912,7 @@ pg_probackup delete -B backup_dir --instance -pg_probackup delete -B backup_dir --instance instance_name --delete-expired +pg_probackup delete -B backup_dir --instance=instance_name --delete-expired Expired backups cannot be removed while at least one @@ -3923,7 +3923,7 @@ pg_probackup delete -B backup_dir --instance -pg_probackup delete -B backup_dir --instance instance_name --delete-expired --merge-expired +pg_probackup delete -B backup_dir --instance=instance_name --delete-expired --merge-expired In this case, pg_probackup searches for the oldest incremental @@ -3943,7 +3943,7 @@ pg_probackup delete -B backup_dir --instance --status: -pg_probackup delete -B backup_dir --instance instance_name --status=ERROR +pg_probackup delete -B backup_dir --instance=instance_name --status=ERROR @@ -4181,7 +4181,7 @@ pg_probackup init -B backup_dir [--help] add-instance -pg_probackup add-instance -B backup_dir -D data_dir --instance instance_name [--help] +pg_probackup add-instance -B backup_dir -D data_dir --instance=instance_name [--help] Initializes a new backup instance inside the backup catalog @@ -4199,7 +4199,7 @@ pg_probackup add-instance -B backup_dir -D del-instance -pg_probackup del-instance -B backup_dir --instance instance_name [--help] +pg_probackup del-instance -B backup_dir --instance=instance_name [--help] Deletes all backups and WAL files associated with the @@ -4209,7 +4209,7 @@ pg_probackup del-instance -B backup_dir --instance set-config -pg_probackup set-config -B backup_dir --instance instance_name +pg_probackup set-config -B backup_dir --instance=instance_name [--help] [--pgdata=pgdata-path] [--retention-redundancy=redundancy][--retention-window=window][--wal-depth=wal_depth] [--compress-algorithm=compression_algorithm] [--compress-level=compression_level] @@ -4235,7 +4235,7 @@ pg_probackup set-config -B backup_dir --instance set-backup -pg_probackup set-backup -B backup_dir --instance instance_name -i backup_id +pg_probackup set-backup -B backup_dir --instance=instance_name -i backup_id {--ttl=ttl | --expire-time=time} [--note=backup_note] [--help] @@ -4266,7 +4266,7 @@ pg_probackup set-backup -B backup_dir --instance show-config -pg_probackup show-config -B backup_dir --instance instance_name [--format=plain|json] +pg_probackup show-config -B backup_dir --instance=instance_name [--format=plain|json] [--no-scale-units] [logging_options] @@ -4299,7 +4299,7 @@ pg_probackup show-config -B backup_dir --instance show pg_probackup show -B backup_dir -[--help] [--instance instance_name [-i backup_id | --archive]] [--format=plain|json] [--no-color] +[--help] [--instance=instance_name [-i backup_id | --archive]] [--format=plain|json] [--no-color] Shows the contents of the backup catalog. If @@ -4328,7 +4328,7 @@ pg_probackup show -B backup_dir backup -pg_probackup backup -B backup_dir -b backup_mode --instance instance_name +pg_probackup backup -B backup_dir -b backup_mode --instance=instance_name [--help] [-j num_threads] [--progress] [-C] [--stream [-S slot_name] [--temp-slot]] [--backup-pg-log] [--no-validate] [--skip-block-validation] @@ -4511,7 +4511,7 @@ pg_probackup backup -B backup_dir -b bac restore -pg_probackup restore -B backup_dir --instance instance_name +pg_probackup restore -B backup_dir --instance=instance_name [--help] [-D data_dir] [-i backup_id] [-j num_threads] [--progress] [-T OLDDIR=NEWDIR] [--external-mapping=OLDDIR=NEWDIR] [--skip-external-dirs] @@ -4722,7 +4722,7 @@ pg_probackup restore -B backup_dir --instance checkdb pg_probackup checkdb -[-B backup_dir] [--instance instance_name] [-D data_dir] +[-B backup_dir] [--instance=instance_name] [-D data_dir] [--help] [-j num_threads] [--progress] [--amcheck [--skip-block-validation] [--checkunique] [--heapallindexed]] [connection_options] [logging_options] @@ -4812,7 +4812,7 @@ pg_probackup checkdb validate pg_probackup validate -B backup_dir -[--help] [--instance instance_name] [-i backup_id] +[--help] [--instance=instance_name] [-i backup_id] [-j num_threads] [--progress] [--skip-block-validation] [recovery_target_options] [logging_options] @@ -4840,7 +4840,7 @@ pg_probackup validate -B backup_dir merge -pg_probackup merge -B backup_dir --instance instance_name -i backup_id +pg_probackup merge -B backup_dir --instance=instance_name -i backup_id [--help] [-j num_threads] [--progress] [--no-validate] [--no-sync] [logging_options] @@ -4884,7 +4884,7 @@ pg_probackup merge -B backup_dir --instance delete -pg_probackup delete -B backup_dir --instance instance_name +pg_probackup delete -B backup_dir --instance=instance_name [--help] [-j num_threads] [--progress] [--retention-redundancy=redundancy][--retention-window=window][--wal-depth=wal_depth] [--delete-wal] {-i backup_id | --delete-expired [--merge-expired] | --merge-expired | --status=backup_status} @@ -4931,7 +4931,7 @@ pg_probackup delete -B backup_dir --instance archive-push -pg_probackup archive-push -B backup_dir --instance instance_name +pg_probackup archive-push -B backup_dir --instance=instance_name --wal-file-name=wal_file_name [--wal-file-path=wal_file_path] [--help] [--no-sync] [--compress] [--no-ready-rename] [--overwrite] [-j num_threads] [--batch-size=batch_size] @@ -4997,7 +4997,7 @@ pg_probackup archive-push -B backup_dir --instance archive-get -pg_probackup archive-get -B backup_dir --instance instance_name --wal-file-path=wal_file_path --wal-file-name=wal_file_name +pg_probackup archive-get -B backup_dir --instance=instance_name --wal-file-path=wal_file_path --wal-file-name=wal_file_name [-j num_threads] [--batch-size=batch_size] [--prefetch-dir=prefetch_dir_path] [--no-validate-wal] [--help] [remote_options] [logging_options] From 69379ab7a66c15720f066cd770bbc0ced80c05b1 Mon Sep 17 00:00:00 2001 From: Oleg Gurev Date: Thu, 2 May 2024 11:38:22 +0300 Subject: [PATCH 521/525] [PBCKP-818] Documentation update script work result. --- doc/pgprobackup.xml | 866 ++++++++++++++++++++++---------------------- 1 file changed, 424 insertions(+), 442 deletions(-) diff --git a/doc/pgprobackup.xml b/doc/pgprobackup.xml index 272a0d043..f5b2a93eb 100644 --- a/doc/pgprobackup.xml +++ b/doc/pgprobackup.xml @@ -2,7 +2,6 @@ doc/src/sgml/pgprobackup.sgml &project; documentation --> - pg_probackup @@ -530,105 +529,111 @@ doc/src/sgml/pgprobackup.sgml Initialize the backup catalog: -backup_user@backup_host:~$ pg_probackup-16 init -B /mnt/backups +backup_user@backup_host:~$ pg_probackup init -B /mnt/backups INFO: Backup catalog '/mnt/backups' successfully initialized Add a backup instance called mydb to the backup catalog: -backup_user@backup_host:~$ pg_probackup-16 add-instance \ +backup_user@backup_host:~$ pg_probackup add-instance \ -B /mnt/backups \ - -D /var/lib/postgresql/16/main \ - --instance=mydb \ + -D /var/lib/pgpro/std-16/data \ + --instance=node \ --remote-host=postgres_host \ --remote-user=postgres -INFO: Instance 'mydb' successfully initialized +INFO: Instance 'node' successfully initialized Make a FULL backup: -backup_user@backup_host:~$ pg_probackup-16 backup \ +backup_user@backup_host:~$ pg_probackup backup \ -B /mnt/backups \ -b FULL \ - --instance=mydb \ + --instance=node \ --stream \ + --compress-algorithm=zlib \ --remote-host=postgres_host \ --remote-user=postgres \ -U backup \ -d backupdb -INFO: Backup start, pg_probackup version: 2.5.13, instance: mydb, backup ID: S6OBFN, backup mode: FULL, wal mode: STREAM, remote: true, compress-algorithm: none, compress-level: 1 +INFO: Backup start, pg_probackup version: 2.5.14, instance: node, backup ID: SCUN1Q, backup mode: FULL, wal mode: STREAM, remote: true, compress-algorithm: zlib, compress-level: 1 +INFO: This PostgreSQL instance was initialized with data block checksums. Data block corruption will be detected INFO: Database backup start INFO: wait for pg_backup_start() -INFO: Wait for WAL segment /mnt/backups/backups/mydb/S6OBFN/database/pg_wal/000000010000000000000002 to be streamed -INFO: PGDATA size: 29MB -INFO: Current Start LSN: 0/2000060, TLI: 1 +INFO: Wait for WAL segment /mnt/backups/backups/node/SCUN1Q/database/pg_wal/000000010000000000000008 to be streamed +INFO: PGDATA size: 96MB +INFO: Current Start LSN: 0/8000028, TLI: 1 INFO: Start transferring data files -INFO: Data files are transferred, time elapsed: 0 +INFO: Data files are transferred, time elapsed: 1s INFO: wait for pg_stop_backup() INFO: pg_stop backup() successfully executed -INFO: stop_lsn: 0/2003CB0 +INFO: stop_lsn: 0/800BBD0 INFO: Getting the Recovery Time from WAL INFO: Syncing backup files to disk -INFO: Backup files are synced, time elapsed: 0 -INFO: Validating backup S6OBFN -INFO: Backup S6OBFN data files are valid -INFO: Backup S6OBFN resident size: 45MB -INFO: Backup S6OBFN completed +INFO: Backup files are synced, time elapsed: 1s +INFO: Validating backup SCUN1Q +INFO: Backup SCUN1Q data files are valid +INFO: Backup SCUN1Q resident size: 56MB +INFO: Backup SCUN1Q completed List the backups of the instance: -backup_user@backup_host:~$ pg_probackup-16 show -B /mnt/backups --instance=mydb +backup_user@backup_host:~$ pg_probackup show \ + -B /mnt/backups \ + --instance=node ================================================================================================================================ Instance Version ID Recovery Time Mode WAL Mode TLI Time Data WAL Zratio Start LSN Stop LSN Status ================================================================================================================================ - mydb 16 S6OBFN 2024-01-03 06:59:49+00 FULL STREAM 1/0 10s 29MB 16MB 1.00 0/2000060 0/2003CB0 OK + node 16 SCUN1Q 2024-05-02 11:17:53+03 FULL STREAM 1/0 12s 40MB 16MB 2.42 0/8000028 0/800BBD0 OK Make an incremental backup in the DELTA mode: -backup_user@backup_host:~$ pg_probackup-16 backup \ - -B /mnt/backups \ - -b delta \ - --instance=mydb \ - --stream \ - --remote-host=postgres_host \ - --remote-user=postgres \ - -U backup \ - -d backupdb -INFO: Backup start, pg_probackup version: 2.5.13, instance: mydb, backup ID: S6OBLG, backup mode: DELTA, wal mode: STREAM, remote: true, compress-algorithm: none, compress-level: 1 +backup_user@backup_host:~$ pg_probackup backup \ + -B /mnt/backups \ + -b DELTA \ + --instance=node \ + --stream \ + --compress-algorithm=zlib \ + --remote-host=postgres_host \ + --remote-user=postgres \ + -U backup \ + -d backupdb +INFO: Backup start, pg_probackup version: 2.5.14, instance: node, backup ID: SCUN22, backup mode: DELTA, wal mode: STREAM, remote: true, compress-algorithm: zlib, compress-level: 1 +INFO: This PostgreSQL instance was initialized with data block checksums. Data block corruption will be detected INFO: Database backup start INFO: wait for pg_backup_start() -INFO: Parent backup: S6OBFN -INFO: Wait for WAL segment /mnt/backups/backups/mydb/S6OBLG/database/pg_wal/000000010000000000000004 to be streamed -INFO: PGDATA size: 29MB -INFO: Current Start LSN: 0/4000028, TLI: 1 -INFO: Parent Start LSN: 0/2000060, TLI: 1 +INFO: Parent backup: SCUN1Q +INFO: Wait for WAL segment /mnt/backups/backups/node/SCUN22/database/pg_wal/000000010000000000000009 to be streamed +INFO: PGDATA size: 96MB +INFO: Current Start LSN: 0/9000028, TLI: 1 +INFO: Parent Start LSN: 0/8000028, TLI: 1 INFO: Start transferring data files INFO: Data files are transferred, time elapsed: 1s INFO: wait for pg_stop_backup() INFO: pg_stop backup() successfully executed -INFO: stop_lsn: 0/4000168 +INFO: stop_lsn: 0/9000168 INFO: Getting the Recovery Time from WAL INFO: Syncing backup files to disk -INFO: Backup files are synced, time elapsed: 0 -INFO: Validating backup S6OBLG -INFO: Backup S6OBLG data files are valid -INFO: Backup S6OBLG resident size: 32MB -INFO: Backup S6OBLG completed +INFO: Backup files are synced, time elapsed: 1s +INFO: Validating backup SCUN22 +INFO: Backup SCUN22 data files are valid +INFO: Backup SCUN22 resident size: 34MB +INFO: Backup SCUN22 completed Add or modify some parameters in the pg_probackup configuration file, so that you do not have to specify them each time on the command line: -backup_user@backup_host:~$ pg_probackup-16 set-config \ +backup_user@backup_host:~$ pg_probackup set-config \ -B /mnt/backups \ - --instance=mydb \ + --instance=node \ --remote-host=postgres_host \ --remote-user=postgres \ -U backup \ @@ -638,10 +643,12 @@ backup_user@backup_host:~$ pg_probackup-16 set-config \ Check the configuration of the instance: -backup_user@backup_host:~$ pg_probackup-16 show-config -B /mnt/backups --instance=mydb +backup_user@backup_host:~$ pg_probackup show-config \ + -B /mnt/backups \ + --instance=node # Backup instance information -pgdata = /var/lib/postgresql/16/main -system-identifier = 7319761899046784808 +pgdata = /var/lib/pgpro/std-16/data +system-identifier = 7364313570668255886 xlog-seg-size = 16777216 # Connection parameters pgdatabase = backupdb @@ -679,61 +686,72 @@ remote-user = postgres Make another incremental backup in the DELTA mode, omitting the parameters stored in the configuration file earlier: -backup_user@backup_host:~$ pg_probackup-16 backup -B /mnt/backups --instance=mydb -b delta --stream -INFO: Backup start, pg_probackup version: 2.5.13, instance: mydb, backup ID: S6OBQO, backup mode: DELTA, wal mode: STREAM, remote: true, compress-algorithm: none, compress-level: 1 +backup_user@backup_host:~$ pg_probackup backup \ + -B /mnt/backups \ + -b DELTA \ + --instance=node \ + --stream \ + --compress-algorithm=zlib +INFO: Backup start, pg_probackup version: 2.5.14, instance: node, backup ID: SCUN2C, backup mode: DELTA, wal mode: STREAM, remote: true, compress-algorithm: zlib, compress-level: 1 +INFO: This PostgreSQL instance was initialized with data block checksums. Data block corruption will be detected INFO: Database backup start INFO: wait for pg_backup_start() -INFO: Parent backup: S6OBLG -INFO: Wait for WAL segment /mnt/backups/backups/mydb/S6OBQO/database/pg_wal/000000010000000000000006 to be streamed -INFO: PGDATA size: 29MB -INFO: Current Start LSN: 0/6000028, TLI: 1 -INFO: Parent Start LSN: 0/4000028, TLI: 1 +INFO: Parent backup: SCUN22 +INFO: Wait for WAL segment /mnt/backups/backups/node/SCUN2C/database/pg_wal/00000001000000000000000B to be streamed +INFO: PGDATA size: 96MB +INFO: Current Start LSN: 0/B000028, TLI: 1 +INFO: Parent Start LSN: 0/9000028, TLI: 1 INFO: Start transferring data files INFO: Data files are transferred, time elapsed: 0 INFO: wait for pg_stop_backup() INFO: pg_stop backup() successfully executed -INFO: stop_lsn: 0/6000168 +INFO: stop_lsn: 0/B000168 INFO: Getting the Recovery Time from WAL INFO: Syncing backup files to disk INFO: Backup files are synced, time elapsed: 0 -INFO: Validating backup S6OBQO -INFO: Backup S6OBQO data files are valid -INFO: Backup S6OBQO resident size: 32MB -INFO: Backup S6OBQO completed +INFO: Validating backup SCUN2C +INFO: Backup SCUN2C data files are valid +INFO: Backup SCUN2C resident size: 17MB +INFO: Backup SCUN2C completed List the backups of the instance again: -backup_user@backup_host:~$ pg_probackup-16 show -B /mnt/backups --instance=mydb -================================================================================================================================== - Instance Version ID Recovery Time Mode WAL Mode TLI Time Data WAL Zratio Start LSN Stop LSN Status -================================================================================================================================== - mydb 16 S6OBQO 2024-01-03 07:06:26+00 DELTA STREAM 1/1 6s 111kB 32MB 1.00 0/6000028 0/6000168 OK - mydb 16 S6OBLG 2024-01-03 07:03:18+00 DELTA STREAM 1/1 10s 127kB 32MB 1.00 0/4000028 0/4000168 OK - mydb 16 S6OBFN 2024-01-03 06:59:49+00 FULL STREAM 1/0 10s 29MB 16MB 1.00 0/2000060 0/2003CB0 OK +backup_user@backup_host:~$ pg_probackup show \ + -B /mnt/backups \ + --instance=node +=================================================================================================================================== + Instance Version ID Recovery Time Mode WAL Mode TLI Time Data WAL Zratio Start LSN Stop LSN Status +=================================================================================================================================== + node 16 SCUN2C 2024-05-02 11:18:13+03 DELTA STREAM 1/1 10s 1139kB 16MB 1.00 0/B000028 0/B000168 OK + node 16 SCUN22 2024-05-02 11:18:04+03 DELTA STREAM 1/1 10s 2357kB 32MB 1.02 0/9000028 0/9000168 OK + node 16 SCUN1Q 2024-05-02 11:17:53+03 FULL STREAM 1/0 12s 40MB 16MB 2.42 0/8000028 0/800BBD0 OK Restore the data from the latest available backup to an arbitrary location: -backup_user@backup_host:~$ pg_probackup-16 restore -B /mnt/backups -D /var/lib/postgresql/16/staging --instance=mydb -INFO: Validating parents for backup S6OBQO -INFO: Validating backup S6OBFN -INFO: Backup S6OBFN data files are valid -INFO: Validating backup S6OBLG -INFO: Backup S6OBLG data files are valid -INFO: Validating backup S6OBQO -INFO: Backup S6OBQO data files are valid -INFO: Backup S6OBQO WAL segments are valid -INFO: Backup S6OBQO is valid. -INFO: Restoring the database from backup at 2024-01-03 07:06:24+00 -INFO: Start restoring backup files. PGDATA size: 61MB -INFO: Backup files are restored. Transfered bytes: 61MB, time elapsed: 1s -INFO: Restore incremental ratio (less is better): 100% (61MB/61MB) +backup_user@backup_host:~$ pg_probackup restore \ + -B /mnt/backups \ + -D /var/lib/pgpro/std-16/staging-data \ + --instance=node +INFO: Validating parents for backup SCUN2C +INFO: Validating backup SCUN1Q +INFO: Backup SCUN1Q data files are valid +INFO: Validating backup SCUN22 +INFO: Backup SCUN22 data files are valid +INFO: Validating backup SCUN2C +INFO: Backup SCUN2C data files are valid +INFO: Backup SCUN2C WAL segments are valid +INFO: Backup SCUN2C is valid. +INFO: Restoring the database from backup SCUN2C on localhost +INFO: Start restoring backup files. PGDATA size: 112MB +INFO: Backup files are restored. Transfered bytes: 112MB, time elapsed: 0 +INFO: Restore incremental ratio (less is better): 100% (112MB/112MB) INFO: Syncing restored files to disk -INFO: Restored backup files are synced, time elapsed: 0 -INFO: Restore of backup S6OBQO completed. +INFO: Restored backup files are synced, time elapsed: 2s +INFO: Restore of backup SCUN2C completed. @@ -806,7 +824,7 @@ apt-cache search pg_probackup Install or upgrade a pg_probackup version of your choice -sudo apt install pg-probackup-15 +sudo apt install pg-probackup-16 @@ -814,7 +832,7 @@ sudo apt install pg-probackup-15 Optionally install the debug package -sudo apt install pg-probackup-15-dbg +sudo apt install pg-probackup-16-dbg @@ -823,7 +841,7 @@ sudo apt install pg-probackup-15-dbg sudo apt install dpkg-dev -sudo apt source pg-probackup-15 +sudo apt source pg-probackup-16 @@ -855,7 +873,7 @@ dnf search pg_probackup Install or upgrade a pg_probackup version of your choice -dnf install pg_probackup-15 +dnf install pg_probackup-16 @@ -863,7 +881,7 @@ dnf install pg_probackup-15 Optionally install the debug package -dnf install pg_probackup-15-debuginfo +dnf install pg_probackup-16-debuginfo @@ -877,7 +895,7 @@ dnf install pg_probackup-15-debuginfo dnf install 'dnf-command(download)' -dnf download --source pg_probackup-15 +dnf download --source pg_probackup-16 @@ -885,7 +903,7 @@ dnf download --source pg_probackup-15 Using yum: -yumdownloader --source pg_probackup-15 +yumdownloader --source pg_probackup-16 @@ -936,7 +954,7 @@ apt-cache search pg_probackup Install or upgrade a pg_probackup version of your choice -sudo apt-get install pg_probackup-15 +sudo apt-get install pg_probackup-16 @@ -944,7 +962,7 @@ sudo apt-get install pg_probackup-15 Optionally install the debug package -sudo apt-get install pg_probackup-15-debuginfo +sudo apt-get install pg_probackup-16-debuginfo @@ -983,7 +1001,7 @@ zypper se pg_probackup Install or upgrade a pg_probackup version of your choice -zypper in pg_probackup-15 +zypper in pg_probackup-16 @@ -991,7 +1009,7 @@ zypper in pg_probackup-15 Optionally install the source package for rebuilding the binaries -zypper si pg_probackup-15 +zypper si pg_probackup-16 @@ -2107,11 +2125,11 @@ pg_probackup validate -B backup_dir --instance= For example, to check that you can restore the database cluster - from a backup copy with the PT8XFX backup ID up to the + from a backup copy with the SCUN2C backup ID up to the specified timestamp, run this command: -pg_probackup validate -B backup_dir --instance=instance_name -i PT8XFX --recovery-target-time="2017-05-18 14:18:11+03" +pg_probackup validate -B backup_dir --instance=instance_name -i SCUN2C --recovery-target-time="2024-05-03 11:18:13+03" If you specify the backup_id of an incremental backup, @@ -2306,28 +2324,35 @@ pg_probackup restore -B backup_dir --instance= -============================================================================================================================================= - Instance Version ID Recovery Time Mode WAL Mode TLI Time Data WAL Zratio Start LSN Stop LSN Status -============================================================================================================================================= - node 12 QBRNBP 2020-06-11 17:40:58+03 DELTA ARCHIVE 16/15 40s 194MB 16MB 8.26 15/2C000028 15/2D000128 OK - node 12 QBRIDX 2020-06-11 15:51:42+03 PAGE ARCHIVE 15/15 11s 18MB 16MB 5.10 14/DC000028 14/DD0000B8 OK - node 12 QBRIAJ 2020-06-11 15:51:08+03 PAGE ARCHIVE 15/15 20s 141MB 96MB 6.22 14/D4BABFE0 14/DA9871D0 OK - node 12 QBRHT8 2020-06-11 15:45:56+03 FULL ARCHIVE 15/0 2m:11s 1371MB 416MB 10.93 14/9D000028 14/B782E9A0 OK - -pg_probackup restore -B /backup --instance=node -R -I lsn -INFO: Running incremental restore into nonempty directory: "/var/lib/pgsql/12/data" -INFO: Destination directory redo point 15/2E000028 on tli 16 is within reach of backup QBRIDX with Stop LSN 14/DD0000B8 on tli 15 -INFO: shift LSN: 14/DD0000B8 -INFO: Restoring the database from backup at 2020-06-11 17:40:58+03 -INFO: Extracting the content of destination directory for incremental restore -INFO: Destination directory content extracted, time elapsed: 1s -INFO: Removing redundant files in destination directory -INFO: Redundant files are removed, time elapsed: 1s -INFO: Start restoring backup files. PGDATA size: 15GB -INFO: Backup files are restored. Transfered bytes: 1693MB, time elapsed: 43s -INFO: Restore incremental ratio (less is better): 11% (1693MB/15GB) -INFO: Restore of backup QBRNBP completed. - +====================================================================================================================================== + Instance Version ID Recovery Time Mode WAL Mode TLI Time Data WAL Zratio Start LSN Stop LSN Status +====================================================================================================================================== + node 16 SCUN3Y 2024-05-02 11:19:16+03 DELTA STREAM 1/1 7s 92MB 208MB 2.27 0/3C0043A8 0/46159C70 OK + node 16 SCUN3M 2024-05-02 11:19:01+03 PTRACK STREAM 1/1 10s 30MB 16MB 2.23 0/32000028 0/32005ED0 OK + node 16 SCUN39 2024-05-02 11:18:50+03 PAGE STREAM 1/1 12s 46MB 32MB 1.44 0/2A000028 0/2B0000B8 OK + node 16 SCUN2V 2024-05-02 11:18:38+03 FULL STREAM 1/0 11s 154MB 16MB 2.32 0/23000028 0/23000168 OK + +backup_user@backup_host:~$ pg_probackup restore -B /mnt/backups --instance=node -R -I lsn +INFO: Destination directory and tablespace directories are empty, disable incremental restore +INFO: Validating parents for backup SCUN3Y +INFO: Validating backup SCUN2V +INFO: Backup SCUN2V data files are valid +INFO: Validating backup SCUN39 +INFO: Backup SCUN39 data files are valid +INFO: Validating backup SCUN3M +INFO: Backup SCUN3M data files are valid +INFO: Validating backup SCUN3Y +INFO: Backup SCUN3Y data files are valid +INFO: Backup SCUN3Y WAL segments are valid +INFO: Backup SCUN3Y is valid. +INFO: Restoring the database from backup SCUN3Y +INFO: Start restoring backup files. PGDATA size: 759MB +INFO: Backup files are restored. Transfered bytes: 759MB, time elapsed: 3s +INFO: Restore incremental ratio (less is better): 100% (759MB/759MB) +INFO: Syncing restored files to disk +INFO: Restored backup files are synced, time elapsed: 1s +INFO: Restore of backup SCUN3Y completed. + Incremental restore is possible only for backups with @@ -2438,7 +2463,7 @@ pg_probackup restore -B backup_dir --instance= -pg_probackup restore -B backup_dir --instance=instance_name --recovery-target-time="2017-05-18 14:18:11+03" +pg_probackup restore -B backup_dir --instance=instance_name --recovery-target-time="2024-05-03 11:18:13+03" @@ -2791,13 +2816,13 @@ pg_probackup show -B backup_dir BACKUP INSTANCE 'node' ====================================================================================================================================== - Instance Version ID Recovery time Mode WAL Mode TLI Time Data WAL Zratio Start LSN Stop LSN Status + Instance Version ID Recovery Time Mode WAL Mode TLI Time Data WAL Zratio Start LSN Stop LSN Status ====================================================================================================================================== - node 10 PYSUE8 2019-10-03 15:51:48+03 FULL ARCHIVE 1/0 16s 9047kB 16MB 4.31 0/12000028 0/12000160 OK - node 10 P7XDQV 2018-04-29 05:32:59+03 DELTA STREAM 1/1 11s 19MB 16MB 1.00 0/15000060 0/15000198 OK - node 10 P7XDJA 2018-04-29 05:28:36+03 PTRACK STREAM 1/1 21s 32MB 32MB 1.00 0/13000028 0/13000198 OK - node 10 P7XDHU 2018-04-29 05:27:59+03 PAGE STREAM 1/1 15s 33MB 16MB 1.00 0/11000028 0/110001D0 OK - node 10 P7XDHB 2018-04-29 05:27:15+03 FULL STREAM 1/0 11s 39MB 16MB 1.00 0/F000028 0/F000198 OK + node 16 SCUN4E 2024-05-02 11:19:37+03 FULL ARCHIVE 1/0 13s 239MB 16MB 2.31 0/4C000028 0/4D0000B8 OK + node 16 SCUN3Y 2024-05-02 11:19:16+03 DELTA STREAM 1/1 7s 92MB 208MB 2.27 0/3C0043A8 0/46159C70 OK + node 16 SCUN3M 2024-05-02 11:19:01+03 PTRACK STREAM 1/1 10s 30MB 16MB 2.23 0/32000028 0/32005ED0 OK + node 16 SCUN39 2024-05-02 11:18:50+03 PAGE STREAM 1/1 12s 46MB 32MB 1.44 0/2A000028 0/2B0000B8 OK + node 16 SCUN2V 2024-05-02 11:18:38+03 FULL STREAM 1/0 11s 154MB 16MB 2.32 0/23000028 0/23000168 OK For each backup, the following information is provided: @@ -2962,27 +2987,26 @@ from-replica = false #Compatibility block-size = 8192 -wal-block-size = 8192 +xlog-block-size = 8192 checksum-version = 1 -program-version = 2.1.3 -server-version = 10 +program-version = 2.5.14 +server-version = 16 #Result backup info timelineid = 1 -start-lsn = 0/04000028 -stop-lsn = 0/040000f8 -start-time = '2017-05-16 12:57:29' -end-time = '2017-05-16 12:57:31' -recovery-xid = 597 -recovery-time = '2017-05-16 12:57:31' -expire-time = '2020-05-16 12:57:31' -data-bytes = 22288792 +start-lsn = 0/4C000028 +stop-lsn = 0/4D0000B8 +start-time = '2024-05-02 11:19:26+03' +end-time = '2024-05-02 11:19:39+03' +recovery-xid = 743 +recovery-time = '2024-05-02 11:19:37+03' +data-bytes = 250827955 wal-bytes = 16777216 -uncompressed-bytes = 39961833 -pgdata-bytes = 39859393 +uncompressed-bytes = 578216425 +pgdata-bytes = 578216107 status = OK -parent-backup-id = 'PT8XFX' -primary_conninfo = 'user=backup passfile=/var/lib/pgsql/.pgpass port=5432 sslmode=disable sslcompression=1 target_session_attrs=any' +primary_conninfo = 'user=backup channel_binding=prefer host=localhost port=5432 sslmode=prefer sslcompression=0 sslcertmode=allow sslsni=1 ssl_min_protocol_version=TLSv1.2 gssencmode=prefer krbsrvname=postgres gssdelegation=0 target_session_attrs=any load_balance_hosts=disable' +content-crc = 802820606 Detailed output has additional attributes: @@ -3098,37 +3122,39 @@ pg_probackup show -B backup_dir --instance= [ - { - "instance": "node", - "backups": [ - { - "id": "PT91HZ", - "parent-backup-id": "PT8XFX", - "backup-mode": "DELTA", - "wal": "ARCHIVE", - "compress-alg": "zlib", - "compress-level": 1, - "from-replica": false, - "block-size": 8192, - "xlog-block-size": 8192, - "checksum-version": 1, - "program-version": "2.1.3", - "server-version": "10", - "current-tli": 16, - "parent-tli": 2, - "start-lsn": "0/8000028", - "stop-lsn": "0/8000160", - "start-time": "2019-06-17 18:25:11+03", - "end-time": "2019-06-17 18:25:16+03", - "recovery-xid": 0, - "recovery-time": "2019-06-17 18:25:15+03", - "data-bytes": 106733, - "wal-bytes": 16777216, - "primary_conninfo": "user=backup passfile=/var/lib/pgsql/.pgpass port=5432 sslmode=disable sslcompression=1 target_session_attrs=any", - "status": "OK" - } - ] - } + { + "instance": "node", + "backups": [ + { + "id": "SCUN4E", + "backup-mode": "FULL", + "wal": "ARCHIVE", + "compress-alg": "zlib", + "compress-level": 1, + "from-replica": "false", + "block-size": 8192, + "xlog-block-size": 8192, + "checksum-version": 1, + "program-version": "2.5.14", + "server-version": "16", + "current-tli": 1, + "parent-tli": 0, + "start-lsn": "0/4C000028", + "stop-lsn": "0/4D0000B8", + "start-time": "2024-05-02 11:19:26+03", + "end-time": "2024-05-02 11:19:39+03", + "recovery-xid": 743, + "recovery-time": "2024-05-02 11:19:37+03", + "data-bytes": 250827955, + "wal-bytes": 16777216, + "uncompressed-bytes": 578216425, + "pgdata-bytes": 578216107, + "primary_conninfo": "user=backup channel_binding=prefer host=localhost port=5432 sslmode=prefer sslcompression=0 sslcertmode=allow sslsni=1 ssl_min_protocol_version=TLSv1.2 gssencmode=prefer krbsrvname=postgres gssdelegation=0 target_session_attrs=any load_balance_hosts=disable", + "status": "OK", + "content-crc": 802820606 + } + ] + } ] @@ -3146,15 +3172,12 @@ pg_probackup show -B backup_dir [--instance= + ARCHIVE INSTANCE 'node' -=================================================================================================================================== - TLI Parent TLI Switchpoint Min Segno Max Segno N segments Size Zratio N backups Status -=================================================================================================================================== - 5 1 0/B000000 00000005000000000000000B 00000005000000000000000C 2 685kB 48.00 0 OK - 4 3 0/18000000 000000040000000000000018 00000004000000000000001A 3 648kB 77.00 0 OK - 3 2 0/15000000 000000030000000000000015 000000030000000000000017 3 648kB 77.00 0 OK - 2 1 0/B000108 00000002000000000000000B 000000020000000000000015 5 892kB 94.00 1 DEGRADED - 1 0 0/0 000000010000000000000001 00000001000000000000000A 10 8774kB 19.00 1 OK +================================================================================================================================ + TLI Parent TLI Switchpoint Min Segno Max Segno N segments Size Zratio N backups Status +================================================================================================================================ + 1 0 0/0 000000010000000000000019 00000001000000000000004D 53 848MB 1.00 5 OK For each timeline, the following information is provided: @@ -3245,212 +3268,169 @@ pg_probackup show -B backup_dir [--instance= [ - { - "instance": "replica", - "timelines": [ - { - "tli": 5, - "parent-tli": 1, - "switchpoint": "0/B000000", - "min-segno": "00000005000000000000000B", - "max-segno": "00000005000000000000000C", - "n-segments": 2, - "size": 685320, - "zratio": 48.00, - "closest-backup-id": "PXS92O", - "status": "OK", - "lost-segments": [], - "backups": [] - }, - { - "tli": 4, - "parent-tli": 3, - "switchpoint": "0/18000000", - "min-segno": "000000040000000000000018", - "max-segno": "00000004000000000000001A", - "n-segments": 3, - "size": 648625, - "zratio": 77.00, - "closest-backup-id": "PXS9CE", - "status": "OK", - "lost-segments": [], - "backups": [] - }, - { - "tli": 3, - "parent-tli": 2, - "switchpoint": "0/15000000", - "min-segno": "000000030000000000000015", - "max-segno": "000000030000000000000017", - "n-segments": 3, - "size": 648911, - "zratio": 77.00, - "closest-backup-id": "PXS9CE", - "status": "OK", - "lost-segments": [], - "backups": [] - }, - { - "tli": 2, - "parent-tli": 1, - "switchpoint": "0/B000108", - "min-segno": "00000002000000000000000B", - "max-segno": "000000020000000000000015", - "n-segments": 5, - "size": 892173, - "zratio": 94.00, - "closest-backup-id": "PXS92O", - "status": "DEGRADED", - "lost-segments": [ - { - "begin-segno": "00000002000000000000000D", - "end-segno": "00000002000000000000000E" - }, - { - "begin-segno": "000000020000000000000010", - "end-segno": "000000020000000000000012" - } - ], - "backups": [ - { - "id": "PXS9CE", - "backup-mode": "FULL", - "wal": "ARCHIVE", - "compress-alg": "none", - "compress-level": 1, - "from-replica": "false", - "block-size": 8192, - "xlog-block-size": 8192, - "checksum-version": 1, - "program-version": "2.1.5", - "server-version": "10", - "current-tli": 2, - "parent-tli": 0, - "start-lsn": "0/C000028", - "stop-lsn": "0/C000160", - "start-time": "2019-09-13 21:43:26+03", - "end-time": "2019-09-13 21:43:30+03", - "recovery-xid": 0, - "recovery-time": "2019-09-13 21:43:29+03", - "data-bytes": 104674852, - "wal-bytes": 16777216, - "primary_conninfo": "user=backup passfile=/var/lib/pgsql/.pgpass port=5432 sslmode=disable sslcompression=1 target_session_attrs=any", - "status": "OK" - } - ] - }, - { - "tli": 1, - "parent-tli": 0, - "switchpoint": "0/0", - "min-segno": "000000010000000000000001", - "max-segno": "00000001000000000000000A", - "n-segments": 10, - "size": 8774805, - "zratio": 19.00, - "closest-backup-id": "", - "status": "OK", - "lost-segments": [], - "backups": [ - { - "id": "PXS92O", - "backup-mode": "FULL", - "wal": "ARCHIVE", - "compress-alg": "none", - "compress-level": 1, - "from-replica": "true", - "block-size": 8192, - "xlog-block-size": 8192, - "checksum-version": 1, - "program-version": "2.1.5", - "server-version": "10", - "current-tli": 1, - "parent-tli": 0, - "start-lsn": "0/4000028", - "stop-lsn": "0/6000028", - "start-time": "2019-09-13 21:37:36+03", - "end-time": "2019-09-13 21:38:45+03", - "recovery-xid": 0, - "recovery-time": "2019-09-13 21:37:30+03", - "data-bytes": 25987319, - "wal-bytes": 50331648, - "primary_conninfo": "user=backup passfile=/var/lib/pgsql/.pgpass port=5432 sslmode=disable sslcompression=1 target_session_attrs=any", - "status": "OK" - } - ] - } - ] - }, - { - "instance": "master", - "timelines": [ - { - "tli": 1, - "parent-tli": 0, - "switchpoint": "0/0", - "min-segno": "000000010000000000000001", - "max-segno": "00000001000000000000000B", - "n-segments": 11, - "size": 8860892, - "zratio": 20.00, - "status": "OK", - "lost-segments": [], - "backups": [ - { - "id": "PXS92H", - "parent-backup-id": "PXS92C", - "backup-mode": "PAGE", - "wal": "ARCHIVE", - "compress-alg": "none", - "compress-level": 1, - "from-replica": "false", - "block-size": 8192, - "xlog-block-size": 8192, - "checksum-version": 1, - "program-version": "2.1.5", - "server-version": "10", - "current-tli": 1, - "parent-tli": 1, - "start-lsn": "0/4000028", - "stop-lsn": "0/50000B8", - "start-time": "2019-09-13 21:37:29+03", - "end-time": "2019-09-13 21:37:31+03", - "recovery-xid": 0, - "recovery-time": "2019-09-13 21:37:30+03", - "data-bytes": 1328461, - "wal-bytes": 33554432, - "primary_conninfo": "user=backup passfile=/var/lib/pgsql/.pgpass port=5432 sslmode=disable sslcompression=1 target_session_attrs=any", - "status": "OK" - }, - { - "id": "PXS92C", - "backup-mode": "FULL", - "wal": "ARCHIVE", - "compress-alg": "none", - "compress-level": 1, - "from-replica": "false", - "block-size": 8192, - "xlog-block-size": 8192, - "checksum-version": 1, - "program-version": "2.1.5", - "server-version": "10", - "current-tli": 1, - "parent-tli": 0, - "start-lsn": "0/2000028", - "stop-lsn": "0/2000160", - "start-time": "2019-09-13 21:37:24+03", - "end-time": "2019-09-13 21:37:29+03", - "recovery-xid": 0, - "recovery-time": "2019-09-13 21:37:28+03", - "data-bytes": 24871902, - "wal-bytes": 16777216, - "primary_conninfo": "user=backup passfile=/var/lib/pgsql/.pgpass port=5432 sslmode=disable sslcompression=1 target_session_attrs=any", - "status": "OK" - } - ] - } - ] - } + { + "instance": "node", + "timelines": [ + { + "tli": 1, + "parent-tli": 0, + "switchpoint": "0/0", + "min-segno": "000000010000000000000019", + "max-segno": "00000001000000000000004D", + "n-segments": 53, + "size": 889192448, + "zratio": 1.00, + "closest-backup-id": "", + "status": "OK", + "lost-segments": [], + "backups": [ + { + "id": "SCUN4E", + "backup-mode": "FULL", + "wal": "ARCHIVE", + "compress-alg": "zlib", + "compress-level": 1, + "from-replica": "false", + "block-size": 8192, + "xlog-block-size": 8192, + "checksum-version": 1, + "program-version": "2.5.14", + "server-version": "16", + "current-tli": 1, + "parent-tli": 0, + "start-lsn": "0/4C000028", + "stop-lsn": "0/4D0000B8", + "start-time": "2024-05-02 11:19:26+03", + "end-time": "2024-05-02 11:19:39+03", + "recovery-xid": 743, + "recovery-time": "2024-05-02 11:19:37+03", + "data-bytes": 250827955, + "wal-bytes": 16777216, + "uncompressed-bytes": 578216425, + "pgdata-bytes": 578216107, + "primary_conninfo": "user=backup channel_binding=prefer host=localhost port=5432 sslmode=prefer sslcompression=0 sslcertmode=allow sslsni=1 ssl_min_protocol_version=TLSv1.2 gssencmode=prefer krbsrvname=postgres gssdelegation=0 target_session_attrs=any load_balance_hosts=disable", + "status": "OK", + "content-crc": 802820606 + }, + { + "id": "SCUN3Y", + "parent-backup-id": "SCUN3M", + "backup-mode": "DELTA", + "wal": "STREAM", + "compress-alg": "zlib", + "compress-level": 1, + "from-replica": "false", + "block-size": 8192, + "xlog-block-size": 8192, + "checksum-version": 1, + "program-version": "2.5.14", + "server-version": "16", + "current-tli": 1, + "parent-tli": 1, + "start-lsn": "0/3C0043A8", + "stop-lsn": "0/46159C70", + "start-time": "2024-05-02 11:19:10+03", + "end-time": "2024-05-02 11:19:17+03", + "recovery-xid": 743, + "recovery-time": "2024-05-02 11:19:16+03", + "data-bytes": 96029293, + "wal-bytes": 218103808, + "uncompressed-bytes": 217639806, + "pgdata-bytes": 578216107, + "primary_conninfo": "user=backup channel_binding=prefer host=localhost port=5432 sslmode=prefer sslcompression=0 sslcertmode=allow sslsni=1 ssl_min_protocol_version=TLSv1.2 gssencmode=prefer krbsrvname=postgres gssdelegation=0 target_session_attrs=any load_balance_hosts=disable", + "status": "OK", + "content-crc": 3074300814 + }, + { + "id": "SCUN3M", + "parent-backup-id": "SCUN39", + "backup-mode": "PTRACK", + "wal": "STREAM", + "compress-alg": "zlib", + "compress-level": 1, + "from-replica": "false", + "block-size": 8192, + "xlog-block-size": 8192, + "checksum-version": 1, + "program-version": "2.5.14", + "server-version": "16", + "current-tli": 1, + "parent-tli": 1, + "start-lsn": "0/32000028", + "stop-lsn": "0/32005ED0", + "start-time": "2024-05-02 11:18:58+03", + "end-time": "2024-05-02 11:19:08+03", + "recovery-xid": 742, + "recovery-time": "2024-05-02 11:19:01+03", + "data-bytes": 31205704, + "wal-bytes": 16777216, + "uncompressed-bytes": 69585790, + "pgdata-bytes": 509927595, + "primary_conninfo": "user=backup channel_binding=prefer host=localhost port=5432 sslmode=prefer sslcompression=0 sslcertmode=allow sslsni=1 ssl_min_protocol_version=TLSv1.2 gssencmode=prefer krbsrvname=postgres gssdelegation=0 target_session_attrs=any load_balance_hosts=disable", + "status": "OK", + "content-crc": 3446949708 + }, + { + "id": "SCUN39", + "parent-backup-id": "SCUN2V", + "backup-mode": "PAGE", + "wal": "STREAM", + "compress-alg": "pglz", + "compress-level": 1, + "from-replica": "false", + "block-size": 8192, + "xlog-block-size": 8192, + "checksum-version": 1, + "program-version": "2.5.14", + "server-version": "16", + "current-tli": 1, + "parent-tli": 1, + "start-lsn": "0/2A000028", + "stop-lsn": "0/2B0000B8", + "start-time": "2024-05-02 11:18:45+03", + "end-time": "2024-05-02 11:18:57+03", + "recovery-xid": 741, + "recovery-time": "2024-05-02 11:18:50+03", + "data-bytes": 48381612, + "wal-bytes": 33554432, + "uncompressed-bytes": 69569406, + "pgdata-bytes": 441639083, + "primary_conninfo": "user=backup channel_binding=prefer host=localhost port=5432 sslmode=prefer sslcompression=0 sslcertmode=allow sslsni=1 ssl_min_protocol_version=TLSv1.2 gssencmode=prefer krbsrvname=postgres gssdelegation=0 target_session_attrs=any load_balance_hosts=disable", + "status": "OK", + "content-crc": 3492989773 + }, + { + "id": "SCUN2V", + "backup-mode": "FULL", + "wal": "STREAM", + "compress-alg": "zlib", + "compress-level": 1, + "from-replica": "false", + "block-size": 8192, + "xlog-block-size": 8192, + "checksum-version": 1, + "program-version": "2.5.14", + "server-version": "16", + "current-tli": 1, + "parent-tli": 0, + "start-lsn": "0/23000028", + "stop-lsn": "0/23000168", + "start-time": "2024-05-02 11:18:31+03", + "end-time": "2024-05-02 11:18:42+03", + "recovery-xid": 740, + "recovery-time": "2024-05-02 11:18:38+03", + "data-bytes": 161084290, + "wal-bytes": 16777216, + "uncompressed-bytes": 373359081, + "pgdata-bytes": 373358763, + "primary_conninfo": "user=backup channel_binding=prefer host=localhost port=5432 sslmode=prefer sslcompression=0 sslcertmode=allow sslsni=1 ssl_min_protocol_version=TLSv1.2 gssencmode=prefer krbsrvname=postgres gssdelegation=0 target_session_attrs=any load_balance_hosts=disable", + "status": "OK", + "content-crc": 1621343133 + } + ] + } + ] + } ] @@ -3597,33 +3577,33 @@ pg_probackup delete -B backup_dir --instance=backup_dir directory, with the option set to 7, and you have the following backups - available on April 10, 2019: + available on May 02, 2024: BACKUP INSTANCE 'node' -=================================================================================================================================== - Instance Version ID Recovery time Mode WAL TLI Time Data WAL Zratio Start LSN Stop LSN Status -=================================================================================================================================== - node 10 P7XDHR 2019-04-10 05:27:15+03 FULL STREAM 1/0 11s 200MB 16MB 1.0 0/18000059 0/18000197 OK - node 10 P7XDQV 2019-04-08 05:32:59+03 PAGE STREAM 1/0 11s 19MB 16MB 1.0 0/15000060 0/15000198 OK - node 10 P7XDJA 2019-04-03 05:28:36+03 DELTA STREAM 1/0 21s 32MB 16MB 1.0 0/13000028 0/13000198 OK - -------------------------------------------------------retention window-------------------------------------------------------- - node 10 P7XDHU 2019-04-02 05:27:59+03 PAGE STREAM 1/0 31s 33MB 16MB 1.0 0/11000028 0/110001D0 OK - node 10 P7XDHB 2019-04-01 05:27:15+03 FULL STREAM 1/0 11s 200MB 16MB 1.0 0/F000028 0/F000198 OK - node 10 P7XDFT 2019-03-29 05:26:25+03 FULL STREAM 1/0 11s 200MB 16MB 1.0 0/D000028 0/D000198 OK - - - Even though P7XDHB and P7XDHU backups are outside the +===================================================================================================================================== + Instance Version ID Recovery Time Mode WAL Mode TLI Time Data WAL Zratio Start LSN Stop LSN Status +===================================================================================================================================== + node 16 SCUN6L 2024-05-02 11:20:48+03 FULL ARCHIVE 1/0 5s 296MB 16MB 2.30 0/46000028 0/470000B8 OK + node 16 SCQXUI 2024-04-30 11:20:45+03 PAGE ARCHIVE 1/1 5s 6280kB 16MB 1.00 0/44000028 0/450000F0 OK + node 16 SCFTUG 2024-04-24 11:20:43+03 DELTA ARCHIVE 1/1 5s 6280kB 16MB 1.00 0/42000028 0/430000B8 OK +----------------------------------------------------------retention window----------------------------------------------------------- + node 16 SCDZ6D 2024-04-23 11:20:40+03 PAGE ARCHIVE 1/1 5s 6280kB 16MB 1.00 0/40000028 0/410000B8 OK + node 16 SCC4HX 2024-04-22 11:20:24+03 FULL ARCHIVE 1/0 5s 296MB 16MB 2.30 0/3E000028 0/3F0000F0 OK + node 16 SC8F5G 2024-04-20 11:20:07+03 FULL ARCHIVE 1/0 5s 296MB 16MB 2.30 0/3C0000D8 0/3D00BB58 OK + + + Even though SCC4HX and SCDZ6D backups are outside the retention window, they cannot be removed as it invalidates the - succeeding incremental backups P7XDJA and P7XDQV that are + succeeding incremental backups SCFTUG and SCQXUI that are still required, so, if you run the command with the - flag, only the P7XDFT full + flag, only the SC8F5G full backup will be removed. - With the option, the P7XDJA - backup is merged with the underlying P7XDHU and P7XDHB backups + With the option, the SCC4HX + backup is merged with the underlying SCC4HX and SCC4HX backups and becomes a full one, so there is no need to keep these expired backups anymore: @@ -3633,12 +3613,14 @@ pg_probackup show -B backup_dir BACKUP INSTANCE 'node' -================================================================================================================================== - Instance Version ID Recovery time Mode WAL TLI Time Data WAL Zratio Start LSN Stop LSN Status -================================================================================================================================== - node 10 P7XDHR 2019-04-10 05:27:15+03 FULL STREAM 1/0 11s 200MB 16MB 1.0 0/18000059 0/18000197 OK - node 10 P7XDQV 2019-04-08 05:32:59+03 PAGE STREAM 1/0 11s 19MB 16MB 1.0 0/15000060 0/15000198 OK - node 10 P7XDJA 2019-04-03 05:28:36+03 FULL STREAM 1/0 21s 32MB 16MB 1.0 0/13000028 0/13000198 OK +===================================================================================================================================== + Instance Version ID Recovery Time Mode WAL Mode TLI Time Data WAL Zratio Start LSN Stop LSN Status +===================================================================================================================================== + node 16 SCUN6L 2024-05-02 11:20:48+03 FULL ARCHIVE 1/0 5s 296MB 16MB 2.30 0/46000028 0/470000B8 OK + node 16 SCQXUI 2024-04-30 11:20:45+03 PAGE ARCHIVE 1/1 5s 6280kB 16MB 1.00 0/44000028 0/450000F0 OK + node 16 SCFTUG 2024-04-24 11:20:43+03 DELTA ARCHIVE 1/1 5s 6280kB 16MB 1.00 0/42000028 0/430000B8 OK + node 16 SCDZ6D 2024-04-23 11:20:40+03 PAGE ARCHIVE 1/1 5s 6280kB 16MB 1.00 0/40000028 0/410000B8 OK + node 16 SCC4HX 2024-04-22 11:20:24+03 FULL ARCHIVE 1/0 5s 296MB 16MB 2.30 0/3E000028 0/3F0000F0 OK The Time field for the merged backup displays the time @@ -3666,7 +3648,7 @@ pg_probackup set-backup -B backup_dir --instance=--expire-time option. For example: -pg_probackup set-backup -B backup_dir --instance=instance_name -i backup_id --expire-time="2020-01-01 00:00:00+03" +pg_probackup set-backup -B backup_dir --instance=instance_name -i backup_id --expire-time="2027-05-02 11:21:00+00" Alternatively, you can use the and @@ -3676,7 +3658,7 @@ pg_probackup set-backup -B backup_dir --instance= pg_probackup backup -B backup_dir --instance=instance_name -b FULL --ttl=30d -pg_probackup backup -B backup_dir --instance=instance_name -b FULL --expire-time="2020-01-01 00:00:00+03" +pg_probackup backup -B backup_dir --instance=instance_name -b FULL --expire-time="2027-05-02 11:21:00+00" To check if the backup is pinned, @@ -3690,8 +3672,8 @@ pg_probackup show -B backup_dir --instance= ... -recovery-time = '2017-05-16 12:57:31' -expire-time = '2020-01-01 00:00:00+03' +recovery-time = '2024-05-02 11:21:00+00' +expire-time = '2027-05-02 11:21:00+00' data-bytes = 22288792 ... @@ -3767,16 +3749,15 @@ pg_probackup set-backup -B backup_dir --instance=backup_dir --instance=node -BACKUP INSTANCE 'node' -==================================================================================================================================== - Instance Version ID Recovery Time Mode WAL Mode TLI Time Data WAL Zratio Start LSN Stop LSN Status -==================================================================================================================================== - node 11 PZ9442 2019-10-12 10:43:21+03 DELTA STREAM 1/0 10s 121kB 16MB 1.00 0/46000028 0/46000160 OK - node 11 PZ943L 2019-10-12 10:43:04+03 FULL STREAM 1/0 10s 180MB 32MB 1.00 0/44000028 0/44000160 OK - node 11 PZ7YR5 2019-10-11 19:49:56+03 DELTA STREAM 1/1 10s 112kB 32MB 1.00 0/41000028 0/41000160 OK - node 11 PZ7YMP 2019-10-11 19:47:16+03 DELTA STREAM 1/1 10s 376kB 32MB 1.00 0/3E000028 0/3F0000B8 OK - node 11 PZ7YK2 2019-10-11 19:45:45+03 FULL STREAM 1/0 11s 180MB 16MB 1.00 0/3C000028 0/3C000198 OK - node 11 PZ7YFO 2019-10-11 19:43:04+03 FULL STREAM 1/0 10s 30MB 16MB 1.00 0/2000028 0/200ADD8 OK +====================================================================================================================================== + Instance Version ID Recovery Time Mode WAL Mode TLI Time Data WAL Zratio Start LSN Stop LSN Status +====================================================================================================================================== + node 16 SCUN92 2024-05-02 11:22:16+03 DELTA STREAM 1/1 9s 1162kB 32MB 1.08 0/7C000028 0/7C000168 OK + node 16 SCUN8N 2024-05-02 11:22:09+03 FULL STREAM 1/0 12s 296MB 16MB 2.30 0/7A000028 0/7A009A08 OK + node 16 SCUN8I 2024-05-02 11:21:55+03 DELTA STREAM 1/1 5s 1148kB 32MB 1.01 0/78000028 0/78000168 OK + node 16 SCUN86 2024-05-02 11:21:47+03 DELTA STREAM 1/1 11s 120MB 16MB 2.27 0/76000028 0/760001A0 OK + node 16 SCUN7I 2024-05-02 11:21:29+03 FULL STREAM 1/0 22s 296MB 288MB 2.30 0/63012FE8 0/74E7ADA0 OK + node 16 SCUN71 2024-05-02 11:21:12+03 FULL STREAM 1/0 13s 296MB 272MB 2.30 0/49000028 0/573683B8 OK You can check the state of the WAL archive by running the @@ -3787,11 +3768,12 @@ BACKUP INSTANCE 'node' pg_probackup show -B backup_dir --instance=node --archive + ARCHIVE INSTANCE 'node' -=============================================================================================================================== - TLI Parent TLI Switchpoint Min Segno Max Segno N segments Size Zratio N backups Status -=============================================================================================================================== - 1 0 0/0 000000010000000000000001 000000010000000000000047 71 36MB 31.00 6 OK +================================================================================================================================ + TLI Parent TLI Switchpoint Min Segno Max Segno N segments Size Zratio N backups Status +================================================================================================================================ + 1 0 0/0 000000010000000000000048 00000001000000000000007C 53 848MB 1.00 6 OK WAL purge without cannot @@ -3801,11 +3783,12 @@ ARCHIVE INSTANCE 'node' pg_probackup delete -B backup_dir --instance=node --delete-wal + ARCHIVE INSTANCE 'node' -=============================================================================================================================== - TLI Parent TLI Switchpoint Min Segno Max Segno N segments Size Zratio N backups Status -=============================================================================================================================== - 1 0 0/0 000000010000000000000002 000000010000000000000047 70 34MB 32.00 6 OK +================================================================================================================================ + TLI Parent TLI Switchpoint Min Segno Max Segno N segments Size Zratio N backups Status +================================================================================================================================ + 1 0 0/0 000000010000000000000049 00000001000000000000007C 52 832MB 1.00 6 OK If you would like, for example, to keep only those WAL @@ -3816,11 +3799,12 @@ ARCHIVE INSTANCE 'node' pg_probackup delete -B backup_dir --instance=node --delete-wal --wal-depth=1 + ARCHIVE INSTANCE 'node' -================================================================================================================================ - TLI Parent TLI Switchpoint Min Segno Max Segno N segments Size Zratio N backups Status -================================================================================================================================ - 1 0 0/0 000000010000000000000046 000000010000000000000047 2 143kB 228.00 6 OK +=============================================================================================================================== + TLI Parent TLI Switchpoint Min Segno Max Segno N segments Size Zratio N backups Status +=============================================================================================================================== + 1 0 0/0 00000001000000000000007C 00000001000000000000007C 1 16MB 1.00 6 OK Alternatively, you can use the @@ -3830,11 +3814,12 @@ ARCHIVE INSTANCE 'node' pg_probackup backup -B backup_dir --instance=node -b DELTA --wal-depth=1 --delete-wal + ARCHIVE INSTANCE 'node' =============================================================================================================================== - TLI Parent TLI Switchpoint Min Segno Max Segno N segments Size Zratio N backups Status + TLI Parent TLI Switchpoint Min Segno Max Segno N segments Size Zratio N backups Status =============================================================================================================================== - 1 0 0/0 000000010000000000000048 000000010000000000000049 1 72kB 228.00 7 OK + 1 0 0/0 00000001000000000000007E 00000001000000000000007E 1 16MB 1.00 7 OK @@ -3997,10 +3982,7 @@ pg_probackup delete -B backup_dir --instance= DDL commands - CREATE TABLESPACE/DROP TABLESPACE + CREATE TABLESPACE/DROP TABLESPACE cannot be run simultaneously with catchup. @@ -5375,7 +5357,7 @@ pg_probackup catchup -b catchup_mode If the time zone offset is not specified, the local time zone is used. - Example: --recovery-target-time="2020-01-01 00:00:00+03" + Example: --recovery-target-time="2027-05-02 11:21:00+00" @@ -5565,7 +5547,7 @@ pg_probackup catchup -b catchup_mode If the time zone offset is not specified, the local time zone is used. - Example: --expire-time="2020-01-01 00:00:00+03" + Example: --expire-time="2027-05-02 11:21:00+00" From 28bc0f6358b0af4958788fe1f36f6af6bac6f6b1 Mon Sep 17 00:00:00 2001 From: Oleg Gurev Date: Fri, 3 May 2024 10:49:50 +0300 Subject: [PATCH 522/525] [PBCKP-818] Refinery of documentation manually --- doc/pgprobackup.xml | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/doc/pgprobackup.xml b/doc/pgprobackup.xml index f5b2a93eb..95dd1a1d8 100644 --- a/doc/pgprobackup.xml +++ b/doc/pgprobackup.xml @@ -2327,10 +2327,10 @@ pg_probackup restore -B backup_dir --instance=backup_dir --instance= - With the option, the SCC4HX - backup is merged with the underlying SCC4HX and SCC4HX backups + With the option, the SCFTUG + backup is merged with the underlying SCDZ6D and SCC4HX backups and becomes a full one, so there is no need to keep these expired backups anymore: @@ -3618,9 +3618,7 @@ BACKUP INSTANCE 'node' ===================================================================================================================================== node 16 SCUN6L 2024-05-02 11:20:48+03 FULL ARCHIVE 1/0 5s 296MB 16MB 2.30 0/46000028 0/470000B8 OK node 16 SCQXUI 2024-04-30 11:20:45+03 PAGE ARCHIVE 1/1 5s 6280kB 16MB 1.00 0/44000028 0/450000F0 OK - node 16 SCFTUG 2024-04-24 11:20:43+03 DELTA ARCHIVE 1/1 5s 6280kB 16MB 1.00 0/42000028 0/430000B8 OK - node 16 SCDZ6D 2024-04-23 11:20:40+03 PAGE ARCHIVE 1/1 5s 6280kB 16MB 1.00 0/40000028 0/410000B8 OK - node 16 SCC4HX 2024-04-22 11:20:24+03 FULL ARCHIVE 1/0 5s 296MB 16MB 2.30 0/3E000028 0/3F0000F0 OK + node 16 SCFTUG 2024-04-24 11:20:43+03 FULL ARCHIVE 1/1 5s 296MB 16MB 1.00 0/42000028 0/430000B8 OK The Time field for the merged backup displays the time From f361dda7f3df31fe65f3fb5477ee92d04404abd2 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 16 May 2024 14:08:04 +0300 Subject: [PATCH 523/525] Revert "Replace BACKUP_PATH in the source files" This reverts commit 90a4a4f4b32128ee728c530b1fabba608b3d51eb. --- doc/pgprobackup.xml | 10 +++++----- po/ru.po | 2 +- src/archive.c | 4 ++-- src/catalog.c | 2 +- src/help.c | 22 +++++++++++----------- src/pg_probackup.c | 6 +++--- src/pg_probackup.h | 8 ++++---- src/pg_probackup_state.h | 6 +++--- tests/option_test.py | 2 +- 9 files changed, 31 insertions(+), 31 deletions(-) diff --git a/doc/pgprobackup.xml b/doc/pgprobackup.xml index 95dd1a1d8..f8f269b7c 100644 --- a/doc/pgprobackup.xml +++ b/doc/pgprobackup.xml @@ -1146,7 +1146,7 @@ pg_probackup add-instance -B backup_dir -D backup_dir directory and at least read-only access to data_dir directory. If you specify the path to the backup catalog in the - BACKUP_DIR environment variable, you can + BACKUP_PATH environment variable, you can omit the corresponding option when running pg_probackup commands. @@ -5205,14 +5205,14 @@ pg_probackup catchup -b catchup_mode -BACKUP_DIR +BACKUP_PATH Specifies the absolute path to the backup catalog. Backup catalog is a directory where all backup files and meta information are stored. Since this option is required for most of the pg_probackup commands, you are recommended to specify - it once in the BACKUP_DIR environment variable. In this case, + it once in the BACKUP_PATH environment variable. In this case, you do not need to use this option each time on the command line. @@ -5672,7 +5672,7 @@ pg_probackup catchup -b catchup_mode lazily, when the first log message is written. - Default: $BACKUP_DIR/log/ + Default: $BACKUP_PATH/log/ @@ -5755,7 +5755,7 @@ pg_probackup catchup -b catchup_mode reached, the log file is rotated once a pg_probackup command is launched, except help and version commands. The time of the last log file creation is stored in - $BACKUP_DIR/log/log_rotation. The zero value disables + $BACKUP_PATH/log/log_rotation. The zero value disables time-based rotation. Supported units: ms, s, min, h, d (min by default). diff --git a/po/ru.po b/po/ru.po index 30f50f797..1263675c2 100644 --- a/po/ru.po +++ b/po/ru.po @@ -811,7 +811,7 @@ msgstr "" #: src/help.c:360 src/help.c:521 src/help.c:588 src/help.c:635 src/help.c:715 #: src/help.c:761 src/help.c:833 #, c-format -msgid " directory for file logging (default: BACKUP_DIR/log)\n" +msgid " directory for file logging (default: BACKUP_PATH/log)\n" msgstr "" #: src/help.c:361 src/help.c:522 src/help.c:589 src/help.c:636 src/help.c:716 diff --git a/src/archive.c b/src/archive.c index e97a1ade8..7d753c8b3 100644 --- a/src/archive.c +++ b/src/archive.c @@ -113,7 +113,7 @@ static parray *setup_push_filelist(const char *archive_status_dir, * set archive_command to * 'pg_probackup archive-push -B /home/anastasia/backup --wal-file-name %f', * to move backups into arclog_path. - * Where archlog_path is $BACKUP_DIR/wal/instance_name + * Where archlog_path is $BACKUP_PATH/wal/instance_name */ void do_archive_push(InstanceState *instanceState, InstanceConfig *instance, char *pg_xlog_dir, @@ -1126,7 +1126,7 @@ do_archive_get(InstanceState *instanceState, InstanceConfig *instance, const cha join_path_components(absolute_wal_file_path, current_dir, wal_file_path); /* full filepath to WAL file in archive directory. - * $BACKUP_DIR/wal/instance_name/000000010000000000000001 */ + * $BACKUP_PATH/wal/instance_name/000000010000000000000001 */ join_path_components(backup_wal_file_path, instanceState->instance_wal_subdir_path, wal_file_name); INSTR_TIME_SET_CURRENT(start_time); diff --git a/src/catalog.c b/src/catalog.c index 4da406af3..b29090789 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -1437,7 +1437,7 @@ get_multi_timeline_parent(parray *backup_list, parray *tli_list, } /* - * Create backup directory in $BACKUP_DIR + * Create backup directory in $BACKUP_PATH * (with proposed backup->backup_id) * and initialize this directory. * If creation of directory fails, then diff --git a/src/help.c b/src/help.c index 48cc1f524..e18706a13 100644 --- a/src/help.c +++ b/src/help.c @@ -372,7 +372,7 @@ help_backup(void) printf(_(" --error-log-filename=error-log-filename\n")); printf(_(" filename for error logging (default: none)\n")); printf(_(" --log-directory=log-directory\n")); - printf(_(" directory for file logging (default: BACKUP_DIR/log)\n")); + printf(_(" directory for file logging (default: BACKUP_PATH/log)\n")); printf(_(" --log-rotation-size=log-rotation-size\n")); printf(_(" rotate logfile if its size exceeds this value; 0 disables; (default: 0)\n")); printf(_(" available units: 'kB', 'MB', 'GB', 'TB' (default: kB)\n")); @@ -548,7 +548,7 @@ help_restore(void) printf(_(" --error-log-filename=error-log-filename\n")); printf(_(" filename for error logging (default: none)\n")); printf(_(" --log-directory=log-directory\n")); - printf(_(" directory for file logging (default: BACKUP_DIR/log)\n")); + printf(_(" directory for file logging (default: BACKUP_PATH/log)\n")); printf(_(" --log-rotation-size=log-rotation-size\n")); printf(_(" rotate logfile if its size exceeds this value; 0 disables; (default: 0)\n")); printf(_(" available units: 'kB', 'MB', 'GB', 'TB' (default: kB)\n")); @@ -621,7 +621,7 @@ help_validate(void) printf(_(" --error-log-filename=error-log-filename\n")); printf(_(" filename for error logging (default: none)\n")); printf(_(" --log-directory=log-directory\n")); - printf(_(" directory for file logging (default: BACKUP_DIR/log)\n")); + printf(_(" directory for file logging (default: BACKUP_PATH/log)\n")); printf(_(" --log-rotation-size=log-rotation-size\n")); printf(_(" rotate logfile if its size exceeds this value; 0 disables; (default: 0)\n")); printf(_(" available units: 'kB', 'MB', 'GB', 'TB' (default: kB)\n")); @@ -674,7 +674,7 @@ help_checkdb(void) printf(_(" --error-log-filename=error-log-filename\n")); printf(_(" filename for error logging (default: none)\n")); printf(_(" --log-directory=log-directory\n")); - printf(_(" directory for file logging (default: BACKUP_DIR/log)\n")); + printf(_(" directory for file logging (default: BACKUP_PATH/log)\n")); printf(_(" --log-rotation-size=log-rotation-size\n")); printf(_(" rotate logfile if its size exceeds this value; 0 disables; (default: 0)\n")); printf(_(" available units: 'kB', 'MB', 'GB', 'TB' (default: kB)\n")); @@ -760,7 +760,7 @@ help_delete(void) printf(_(" --error-log-filename=error-log-filename\n")); printf(_(" filename for error logging (default: none)\n")); printf(_(" --log-directory=log-directory\n")); - printf(_(" directory for file logging (default: BACKUP_DIR/log)\n")); + printf(_(" directory for file logging (default: BACKUP_PATH/log)\n")); printf(_(" --log-rotation-size=log-rotation-size\n")); printf(_(" rotate logfile if its size exceeds this value; 0 disables; (default: 0)\n")); printf(_(" available units: 'kB', 'MB', 'GB', 'TB' (default: kB)\n")); @@ -814,7 +814,7 @@ help_merge(void) printf(_(" --error-log-filename=error-log-filename\n")); printf(_(" filename for error logging (default: none)\n")); printf(_(" --log-directory=log-directory\n")); - printf(_(" directory for file logging (default: BACKUP_DIR/log)\n")); + printf(_(" directory for file logging (default: BACKUP_PATH/log)\n")); printf(_(" --log-rotation-size=log-rotation-size\n")); printf(_(" rotate logfile if its size exceeds this value; 0 disables; (default: 0)\n")); printf(_(" available units: 'kB', 'MB', 'GB', 'TB' (default: kB)\n")); @@ -890,7 +890,7 @@ help_set_config(void) printf(_(" --error-log-filename=error-log-filename\n")); printf(_(" filename for error logging (default: none)\n")); printf(_(" --log-directory=log-directory\n")); - printf(_(" directory for file logging (default: BACKUP_DIR/log)\n")); + printf(_(" directory for file logging (default: BACKUP_PATH/log)\n")); printf(_(" --log-rotation-size=log-rotation-size\n")); printf(_(" rotate logfile if its size exceeds this value; 0 disables; (default: 0)\n")); printf(_(" available units: 'kB', 'MB', 'GB', 'TB' (default: kB)\n")); @@ -1002,7 +1002,7 @@ help_add_instance(void) printf(_(" --error-log-filename=error-log-filename\n")); printf(_(" filename for error logging (default: none)\n")); printf(_(" --log-directory=log-directory\n")); - printf(_(" directory for file logging (default: BACKUP_DIR/log)\n")); + printf(_(" directory for file logging (default: BACKUP_PATH/log)\n")); printf(_(" --log-rotation-size=log-rotation-size\n")); printf(_(" rotate logfile if its size exceeds this value; 0 disables; (default: 0)\n")); printf(_(" available units: 'kB', 'MB', 'GB', 'TB' (default: kB)\n")); @@ -1072,7 +1072,7 @@ help_archive_push(void) printf(_(" --error-log-filename=error-log-filename\n")); printf(_(" filename for error logging (default: none)\n")); printf(_(" --log-directory=log-directory\n")); - printf(_(" directory for file logging (default: BACKUP_DIR/log)\n")); + printf(_(" directory for file logging (default: BACKUP_PATH/log)\n")); printf(_(" --log-rotation-size=log-rotation-size\n")); printf(_(" rotate logfile if its size exceeds this value; 0 disables; (default: 0)\n")); printf(_(" available units: 'kB', 'MB', 'GB', 'TB' (default: kB)\n")); @@ -1131,7 +1131,7 @@ help_archive_get(void) printf(_(" --error-log-filename=error-log-filename\n")); printf(_(" filename for error logging (default: none)\n")); printf(_(" --log-directory=log-directory\n")); - printf(_(" directory for file logging (default: BACKUP_DIR/log)\n")); + printf(_(" directory for file logging (default: BACKUP_PATH/log)\n")); printf(_(" --log-rotation-size=log-rotation-size\n")); printf(_(" rotate logfile if its size exceeds this value; 0 disables; (default: 0)\n")); printf(_(" available units: 'kB', 'MB', 'GB', 'TB' (default: kB)\n")); @@ -1221,7 +1221,7 @@ help_catchup(void) printf(_(" --error-log-filename=error-log-filename\n")); printf(_(" filename for error logging (default: none)\n")); printf(_(" --log-directory=log-directory\n")); - printf(_(" directory for file logging (default: BACKUP_DIR/log)\n")); + printf(_(" directory for file logging (default: BACKUP_PATH/log)\n")); printf(_(" --log-rotation-size=log-rotation-size\n")); printf(_(" rotate logfile if its size exceeds this value; 0 disables; (default: 0)\n")); printf(_(" available units: 'kB', 'MB', 'GB', 'TB' (default: kB)\n")); diff --git a/src/pg_probackup.c b/src/pg_probackup.c index e50b05995..fa67ddff5 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -468,10 +468,10 @@ main(int argc, char *argv[]) if (backup_path == NULL) { /* - * If command line argument is not set, try to read BACKUP_DIR + * If command line argument is not set, try to read BACKUP_PATH * from environment variable */ - backup_path = getenv("BACKUP_DIR"); + backup_path = getenv("BACKUP_PATH"); } if (backup_path != NULL) @@ -498,7 +498,7 @@ main(int argc, char *argv[]) backup_subcmd != CATCHUP_CMD) elog(ERROR, "No backup catalog path specified.\n" - "Please specify it either using environment variable BACKUP_DIR or\n" + "Please specify it either using environment variable BACKUP_PATH or\n" "command line option --backup-path (-B)"); /* ===== catalogState (END) ======*/ diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 1f4780f58..668f183a7 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -837,13 +837,13 @@ typedef struct InstanceState CatalogState *catalog_state; char instance_name[MAXPGPATH]; //previously global var instance_name - /* $BACKUP_DIR/backups/instance_name */ + /* $BACKUP_PATH/backups/instance_name */ char instance_backup_subdir_path[MAXPGPATH]; - /* $BACKUP_DIR/backups/instance_name/BACKUP_CATALOG_CONF_FILE */ + /* $BACKUP_PATH/backups/instance_name/BACKUP_CATALOG_CONF_FILE */ char instance_config_path[MAXPGPATH]; - - /* $BACKUP_DIR/backups/instance_name */ + + /* $BACKUP_PATH/backups/instance_name */ char instance_wal_subdir_path[MAXPGPATH]; // previously global var arclog_path /* TODO: Make it more specific */ diff --git a/src/pg_probackup_state.h b/src/pg_probackup_state.h index 1d1ff88d0..56d852537 100644 --- a/src/pg_probackup_state.h +++ b/src/pg_probackup_state.h @@ -13,11 +13,11 @@ typedef struct CatalogState { - /* $BACKUP_DIR */ + /* $BACKUP_PATH */ char catalog_path[MAXPGPATH]; //previously global var backup_path - /* $BACKUP_DIR/backups */ + /* $BACKUP_PATH/backups */ char backup_subdir_path[MAXPGPATH]; - /* $BACKUP_DIR/wal */ + /* $BACKUP_PATH/wal */ char wal_subdir_path[MAXPGPATH]; // previously global var arclog_path } CatalogState; diff --git a/tests/option_test.py b/tests/option_test.py index e97da1ef7..d1e8cb3a6 100644 --- a/tests/option_test.py +++ b/tests/option_test.py @@ -25,7 +25,7 @@ def test_without_backup_path_3(self): except ProbackupException as e: self.assertIn( 'ERROR: No backup catalog path specified.\n' + \ - 'Please specify it either using environment variable BACKUP_DIR or\n' + \ + 'Please specify it either using environment variable BACKUP_PATH or\n' + \ 'command line option --backup-path (-B)', e.message, '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) From 721d5d231118587ed4bd30725b86c8de8366dea7 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 16 May 2024 14:37:37 +0300 Subject: [PATCH 524/525] Up version to 2.5.15 --- doc/pgprobackup.xml | 20 ++++++++++---------- src/pg_probackup.h | 2 +- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/doc/pgprobackup.xml b/doc/pgprobackup.xml index f8f269b7c..1491059c5 100644 --- a/doc/pgprobackup.xml +++ b/doc/pgprobackup.xml @@ -558,7 +558,7 @@ backup_user@backup_host:~$ pg_probackup backup \ --remote-user=postgres \ -U backup \ -d backupdb -INFO: Backup start, pg_probackup version: 2.5.14, instance: node, backup ID: SCUN1Q, backup mode: FULL, wal mode: STREAM, remote: true, compress-algorithm: zlib, compress-level: 1 +INFO: Backup start, pg_probackup version: 2.5.15, instance: node, backup ID: SCUN1Q, backup mode: FULL, wal mode: STREAM, remote: true, compress-algorithm: zlib, compress-level: 1 INFO: This PostgreSQL instance was initialized with data block checksums. Data block corruption will be detected INFO: Database backup start INFO: wait for pg_backup_start() @@ -604,7 +604,7 @@ backup_user@backup_host:~$ pg_probackup backup \ --remote-user=postgres \ -U backup \ -d backupdb -INFO: Backup start, pg_probackup version: 2.5.14, instance: node, backup ID: SCUN22, backup mode: DELTA, wal mode: STREAM, remote: true, compress-algorithm: zlib, compress-level: 1 +INFO: Backup start, pg_probackup version: 2.5.15, instance: node, backup ID: SCUN22, backup mode: DELTA, wal mode: STREAM, remote: true, compress-algorithm: zlib, compress-level: 1 INFO: This PostgreSQL instance was initialized with data block checksums. Data block corruption will be detected INFO: Database backup start INFO: wait for pg_backup_start() @@ -692,7 +692,7 @@ backup_user@backup_host:~$ pg_probackup backup \ --instance=node \ --stream \ --compress-algorithm=zlib -INFO: Backup start, pg_probackup version: 2.5.14, instance: node, backup ID: SCUN2C, backup mode: DELTA, wal mode: STREAM, remote: true, compress-algorithm: zlib, compress-level: 1 +INFO: Backup start, pg_probackup version: 2.5.15, instance: node, backup ID: SCUN2C, backup mode: DELTA, wal mode: STREAM, remote: true, compress-algorithm: zlib, compress-level: 1 INFO: This PostgreSQL instance was initialized with data block checksums. Data block corruption will be detected INFO: Database backup start INFO: wait for pg_backup_start() @@ -2989,7 +2989,7 @@ from-replica = false block-size = 8192 xlog-block-size = 8192 checksum-version = 1 -program-version = 2.5.14 +program-version = 2.5.15 server-version = 16 #Result backup info @@ -3135,7 +3135,7 @@ pg_probackup show -B backup_dir --instance=backup_dir [--instance=backup_dir [--instance=backup_dir [--instance=backup_dir [--instance=backup_dir [--instance= Date: Thu, 16 May 2024 16:24:43 +0300 Subject: [PATCH 525/525] [nojira] Change binary name to pg_probackup-16 --- doc/pgprobackup.xml | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/doc/pgprobackup.xml b/doc/pgprobackup.xml index 1491059c5..10e766239 100644 --- a/doc/pgprobackup.xml +++ b/doc/pgprobackup.xml @@ -529,14 +529,14 @@ doc/src/sgml/pgprobackup.sgml Initialize the backup catalog: -backup_user@backup_host:~$ pg_probackup init -B /mnt/backups +backup_user@backup_host:~$ pg_probackup-16 init -B /mnt/backups INFO: Backup catalog '/mnt/backups' successfully initialized Add a backup instance called mydb to the backup catalog: -backup_user@backup_host:~$ pg_probackup add-instance \ +backup_user@backup_host:~$ pg_probackup-16 add-instance \ -B /mnt/backups \ -D /var/lib/pgpro/std-16/data \ --instance=node \ @@ -548,7 +548,7 @@ INFO: Instance 'node' successfully initialized Make a FULL backup: -backup_user@backup_host:~$ pg_probackup backup \ +backup_user@backup_host:~$ pg_probackup-16 backup \ -B /mnt/backups \ -b FULL \ --instance=node \ @@ -582,7 +582,7 @@ INFO: Backup SCUN1Q completed List the backups of the instance: -backup_user@backup_host:~$ pg_probackup show \ +backup_user@backup_host:~$ pg_probackup-16 show \ -B /mnt/backups \ --instance=node ================================================================================================================================ @@ -594,7 +594,7 @@ backup_user@backup_host:~$ pg_probackup show \ Make an incremental backup in the DELTA mode: -backup_user@backup_host:~$ pg_probackup backup \ +backup_user@backup_host:~$ pg_probackup-16 backup \ -B /mnt/backups \ -b DELTA \ --instance=node \ @@ -631,7 +631,7 @@ INFO: Backup SCUN22 completed Add or modify some parameters in the pg_probackup configuration file, so that you do not have to specify them each time on the command line: -backup_user@backup_host:~$ pg_probackup set-config \ +backup_user@backup_host:~$ pg_probackup-16 set-config \ -B /mnt/backups \ --instance=node \ --remote-host=postgres_host \ @@ -643,7 +643,7 @@ backup_user@backup_host:~$ pg_probackup set-config \ Check the configuration of the instance: -backup_user@backup_host:~$ pg_probackup show-config \ +backup_user@backup_host:~$ pg_probackup-16 show-config \ -B /mnt/backups \ --instance=node # Backup instance information @@ -686,7 +686,7 @@ remote-user = postgres Make another incremental backup in the DELTA mode, omitting the parameters stored in the configuration file earlier: -backup_user@backup_host:~$ pg_probackup backup \ +backup_user@backup_host:~$ pg_probackup-16 backup \ -B /mnt/backups \ -b DELTA \ --instance=node \ @@ -718,7 +718,7 @@ INFO: Backup SCUN2C completed List the backups of the instance again: -backup_user@backup_host:~$ pg_probackup show \ +backup_user@backup_host:~$ pg_probackup-16 show \ -B /mnt/backups \ --instance=node =================================================================================================================================== @@ -732,7 +732,7 @@ backup_user@backup_host:~$ pg_probackup show \ Restore the data from the latest available backup to an arbitrary location: -backup_user@backup_host:~$ pg_probackup restore \ +backup_user@backup_host:~$ pg_probackup-16 restore \ -B /mnt/backups \ -D /var/lib/pgpro/std-16/staging-data \ --instance=node @@ -2332,7 +2332,7 @@ pg_probackup restore -B backup_dir --instance=