From 822111e278afb1318a9ae98cab954f971129606c Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Tue, 19 Jul 2022 14:58:51 +0300 Subject: [PATCH 01/17] [PBCKP-232] remove 9.5-9.6 support, part 1 --- .travis.yml | 1 + README.md | 72 +++++++-------- src/backup.c | 10 --- src/catchup.c | 8 -- src/parsexlog.c | 5 -- src/pg_probackup.c | 7 +- src/pg_probackup.h | 8 -- src/stream.c | 42 ++------- src/util.c | 42 +-------- src/utils/pgut.c | 18 +--- tests/backup.py | 149 ++++++++------------------------ tests/checkdb.py | 60 +------------ tests/helpers/ptrack_helpers.py | 61 ++++--------- 13 files changed, 100 insertions(+), 383 deletions(-) diff --git a/.travis.yml b/.travis.yml index 26b2bc4e2..ed932b68e 100644 --- a/.travis.yml +++ b/.travis.yml @@ -52,6 +52,7 @@ env: jobs: allow_failures: - if: env(PG_BRANCH) = master + - if: env(PG_BRANCH) = REL9_6_STABLE - if: env(PG_BRANCH) = REL9_5_STABLE # - if: env(MODE) IN (archive, backup, delta, locking, merge, replica, retention, restore) diff --git a/README.md b/README.md index 5da8d199e..281116cce 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,7 @@ `pg_probackup` is a utility to manage backup and recovery of PostgreSQL database clusters. It is designed to perform periodic backups of the PostgreSQL instance that enable you to restore the server in case of a failure. The utility is compatible with: -* PostgreSQL 9.6, 10, 11, 12, 13, 14; +* PostgreSQL 10, 11, 12, 13, 14; As compared to other backup solutions, `pg_probackup` offers the following benefits that can help you implement different backup strategies and deal with large amounts of data: * Incremental backup: page-level incremental backup allows you to save disk space, speed up backup and restore. With three different incremental modes, you can plan the backup strategy in accordance with your data flow. @@ -74,62 +74,62 @@ Installers are available in release **assets**. [Latests](https://github.com/pos #DEB Ubuntu|Debian Packages sudo sh -c 'echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup/deb/ $(lsb_release -cs) main-$(lsb_release -cs)" > /etc/apt/sources.list.d/pg_probackup.list' sudo wget -O - https://repo.postgrespro.ru/pg_probackup/keys/GPG-KEY-PG_PROBACKUP | sudo apt-key add - && sudo apt-get update -sudo apt-get install pg-probackup-{14,13,12,11,10,9.6} -sudo apt-get install pg-probackup-{14,13,12,11,10,9.6}-dbg +sudo apt-get install pg-probackup-{14,13,12,11,10} +sudo apt-get install pg-probackup-{14,13,12,11,10}-dbg #DEB-SRC Packages sudo sh -c 'echo "deb-src [arch=amd64] https://repo.postgrespro.ru/pg_probackup/deb/ $(lsb_release -cs) main-$(lsb_release -cs)" >>\ /etc/apt/sources.list.d/pg_probackup.list' && sudo apt-get update -sudo apt-get source pg-probackup-{14,13,12,11,10,9.6} +sudo apt-get source pg-probackup-{14,13,12,11,10} #DEB Astra Linix Orel sudo sh -c 'echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup/deb/ stretch main-stretch" > /etc/apt/sources.list.d/pg_probackup.list' sudo wget -O - https://repo.postgrespro.ru/pg_probackup/keys/GPG-KEY-PG_PROBACKUP | sudo apt-key add - && sudo apt-get update -sudo apt-get install pg-probackup-{14,13,12,11,10,9.6}{-dbg,} +sudo apt-get install pg-probackup-{14,13,12,11,10}{-dbg,} #RPM Centos Packages rpm -ivh https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-centos.noarch.rpm -yum install pg_probackup-{14,13,12,11,10,9.6} -yum install pg_probackup-{14,13,12,11,10,9.6}-debuginfo +yum install pg_probackup-{14,13,12,11,10} +yum install pg_probackup-{14,13,12,11,10}-debuginfo #RPM RHEL Packages rpm -ivh https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-rhel.noarch.rpm -yum install pg_probackup-{14,13,12,11,10,9.6} -yum install pg_probackup-{14,13,12,11,10,9.6}-debuginfo +yum install pg_probackup-{14,13,12,11,10} +yum install pg_probackup-{14,13,12,11,10}-debuginfo #RPM Oracle Linux Packages rpm -ivh https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-oraclelinux.noarch.rpm -yum install pg_probackup-{14,13,12,11,10,9.6} -yum install pg_probackup-{14,13,12,11,10,9.6}-debuginfo +yum install pg_probackup-{14,13,12,11,10} +yum install pg_probackup-{14,13,12,11,10}-debuginfo #SRPM Centos|RHEL|OracleLinux Packages -yumdownloader --source pg_probackup-{14,13,12,11,10,9.6} +yumdownloader --source pg_probackup-{14,13,12,11,10} #RPM SUSE|SLES Packages zypper install --allow-unsigned-rpm -y https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-suse.noarch.rpm -zypper --gpg-auto-import-keys install -y pg_probackup-{14,13,12,11,10,9.6} -zypper install pg_probackup-{14,13,12,11,10,9.6}-debuginfo +zypper --gpg-auto-import-keys install -y pg_probackup-{14,13,12,11,10} +zypper install pg_probackup-{14,13,12,11,10}-debuginfo #SRPM SUSE|SLES Packages -zypper si pg_probackup-{14,13,12,11,10,9.6} +zypper si pg_probackup-{14,13,12,11,10} #RPM ALT Linux 7 sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-p7 x86_64 vanilla" > /etc/apt/sources.list.d/pg_probackup.list' sudo apt-get update -sudo apt-get install pg_probackup-{14,13,12,11,10,9.6} -sudo apt-get install pg_probackup-{14,13,12,11,10,9.6}-debuginfo +sudo apt-get install pg_probackup-{14,13,12,11,10} +sudo apt-get install pg_probackup-{14,13,12,11,10}-debuginfo #RPM ALT Linux 8 sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-p8 x86_64 vanilla" > /etc/apt/sources.list.d/pg_probackup.list' sudo apt-get update -sudo apt-get install pg_probackup-{14,13,12,11,10,9.6} -sudo apt-get install pg_probackup-{14,13,12,11,10,9.6}-debuginfo +sudo apt-get install pg_probackup-{14,13,12,11,10} +sudo apt-get install pg_probackup-{14,13,12,11,10}-debuginfo #RPM ALT Linux 9 sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-p9 x86_64 vanilla" > /etc/apt/sources.list.d/pg_probackup.list' sudo apt-get update -sudo apt-get install pg_probackup-{14,13,12,11,10,9.6} -sudo apt-get install pg_probackup-{14,13,12,11,10,9.6}-debuginfo +sudo apt-get install pg_probackup-{14,13,12,11,10} +sudo apt-get install pg_probackup-{14,13,12,11,10}-debuginfo ``` #### pg_probackup for PostgresPro Standard and Enterprise @@ -137,46 +137,46 @@ sudo apt-get install pg_probackup-{14,13,12,11,10,9.6}-debuginfo #DEB Ubuntu|Debian Packages sudo sh -c 'echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup-forks/deb/ $(lsb_release -cs) main-$(lsb_release -cs)" > /etc/apt/sources.list.d/pg_probackup-forks.list' sudo wget -O - https://repo.postgrespro.ru/pg_probackup-forks/keys/GPG-KEY-PG_PROBACKUP | sudo apt-key add - && sudo apt-get update -sudo apt-get install pg-probackup-{std,ent}-{13,12,11,10,9.6} -sudo apt-get install pg-probackup-{std,ent}-{13,12,11,10,9.6}-dbg +sudo apt-get install pg-probackup-{std,ent}-{13,12,11,10} +sudo apt-get install pg-probackup-{std,ent}-{13,12,11,10}-dbg #DEB Astra Linix Orel sudo sh -c 'echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup-forks/deb/ stretch main-stretch" > /etc/apt/sources.list.d/pg_probackup.list' sudo wget -O - https://repo.postgrespro.ru/pg_probackup-forks/keys/GPG-KEY-PG_PROBACKUP | sudo apt-key add - && sudo apt-get update -sudo apt-get install pg-probackup-{std,ent}-{12,11,10,9.6}{-dbg,} +sudo apt-get install pg-probackup-{std,ent}-{12,11,10}{-dbg,} #RPM Centos Packages rpm -ivh https://repo.postgrespro.ru/pg_probackup-forks/keys/pg_probackup-repo-forks-centos.noarch.rpm -yum install pg_probackup-{std,ent}-{13,12,11,10,9.6} -yum install pg_probackup-{std,ent}-{13,12,11,10,9.6}-debuginfo +yum install pg_probackup-{std,ent}-{13,12,11,10} +yum install pg_probackup-{std,ent}-{13,12,11,10}-debuginfo #RPM RHEL Packages rpm -ivh https://repo.postgrespro.ru/pg_probackup-forks/keys/pg_probackup-repo-forks-rhel.noarch.rpm -yum install pg_probackup-{std,ent}-{13,12,11,10,9.6} -yum install pg_probackup-{std,ent}-{13,12,11,10,9.6}-debuginfo +yum install pg_probackup-{std,ent}-{13,12,11,10} +yum install pg_probackup-{std,ent}-{13,12,11,10}-debuginfo #RPM Oracle Linux Packages rpm -ivh https://repo.postgrespro.ru/pg_probackup-forks/keys/pg_probackup-repo-forks-oraclelinux.noarch.rpm -yum install pg_probackup-{std,ent}-{13,12,11,10,9.6} -yum install pg_probackup-{std,ent}-{13,12,11,10,9.6}-debuginfo +yum install pg_probackup-{std,ent}-{13,12,11,10} +yum install pg_probackup-{std,ent}-{13,12,11,10}-debuginfo #RPM ALT Linux 7 sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup-forks/rpm/latest/altlinux-p7 x86_64 forks" > /etc/apt/sources.list.d/pg_probackup_forks.list' sudo apt-get update -sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10,9.6} -sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10,9.6}-debuginfo +sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10} +sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10}-debuginfo #RPM ALT Linux 8 sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup-forks/rpm/latest/altlinux-p8 x86_64 forks" > /etc/apt/sources.list.d/pg_probackup_forks.list' sudo apt-get update -sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10,9.6} -sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10,9.6}-debuginfo +sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10} +sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10}-debuginfo #RPM ALT Linux 9 sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup-forks/rpm/latest/altlinux-p9 x86_64 forks" > /etc/apt/sources.list.d/pg_probackup_forks.list' && sudo apt-get update -sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10,9.6} -sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10,9.6}-debuginfo +sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10} +sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10}-debuginfo ``` Once you have `pg_probackup` installed, complete [the setup](https://postgrespro.github.io/pg_probackup/#pbk-install-and-setup). diff --git a/src/backup.c b/src/backup.c index 84b503245..0edb57710 100644 --- a/src/backup.c +++ b/src/backup.c @@ -133,12 +133,7 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, pg_start_backup(label, smooth_checkpoint, ¤t, nodeInfo, backup_conn); /* Obtain current timeline */ -#if PG_VERSION_NUM >= 90600 current.tli = get_current_timeline(backup_conn); -#else - /* PG-9.5 */ - current.tli = get_current_timeline_from_control(FIO_DB_HOST, instance_config.pgdata, false); -#endif /* * In incremental backup mode ensure that already-validated @@ -1053,7 +1048,6 @@ pg_start_backup(const char *label, bool smooth, pgBackup *backup, /* * Switch to a new WAL segment. It should be called only for master. - * For PG 9.5 it should be called only if pguser is superuser. */ void pg_switch_wal(PGconn *conn) @@ -1062,11 +1056,7 @@ pg_switch_wal(PGconn *conn) pg_silent_client_messages(conn); -#if PG_VERSION_NUM >= 100000 res = pgut_execute(conn, "SELECT pg_catalog.pg_switch_wal()", 0, NULL); -#else - res = pgut_execute(conn, "SELECT pg_catalog.pg_switch_xlog()", 0, NULL); -#endif PQclear(res); } diff --git a/src/catchup.c b/src/catchup.c index 385d8e9df..522279ac9 100644 --- a/src/catchup.c +++ b/src/catchup.c @@ -66,13 +66,7 @@ catchup_init_state(PGNodeInfo *source_node_info, const char *source_pgdata, cons source_node_info->is_ptrack_enabled = pg_is_ptrack_enabled(source_conn, source_node_info->ptrack_version_num); /* Obtain current timeline */ -#if PG_VERSION_NUM >= 90600 current.tli = get_current_timeline(source_conn); -#else - /* PG-9.5 */ - instance_config.pgdata = source_pgdata; - current.tli = get_current_timeline_from_control(FIO_DB_HOST, source_pgdata, false); -#endif elog(INFO, "Catchup start, pg_probackup version: %s, " "PostgreSQL version: %s, " @@ -1033,7 +1027,6 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, if (!dry_run) wait_wal_and_calculate_stop_lsn(dest_xlog_path, stop_backup_result.lsn, ¤t); -#if PG_VERSION_NUM >= 90600 /* Write backup_label */ Assert(stop_backup_result.backup_label_content != NULL); if (!dry_run) @@ -1061,7 +1054,6 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, stop_backup_result.tablespace_map_content = NULL; stop_backup_result.tablespace_map_content_len = 0; } -#endif /* wait for end of wal streaming and calculate wal size transfered */ if (!dry_run) diff --git a/src/parsexlog.c b/src/parsexlog.c index df9b96fb3..39fb64f0a 100644 --- a/src/parsexlog.c +++ b/src/parsexlog.c @@ -29,13 +29,8 @@ * RmgrNames is an array of resource manager names, to make error messages * a bit nicer. */ -#if PG_VERSION_NUM >= 100000 #define PG_RMGR(symname,name,redo,desc,identify,startup,cleanup,mask) \ name, -#else -#define PG_RMGR(symname,name,redo,desc,identify,startup,cleanup) \ - name, -#endif static const char *RmgrNames[RM_MAX_ID + 1] = { #include "access/rmgrlist.h" diff --git a/src/pg_probackup.c b/src/pg_probackup.c index ff5ab85d3..b0245f864 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -78,10 +78,8 @@ pid_t my_pid = 0; __thread int my_thread_num = 1; bool progress = false; bool no_sync = false; -#if PG_VERSION_NUM >= 100000 char *replication_slot = NULL; bool temp_slot = false; -#endif bool perm_slot = false; /* backup options */ @@ -205,9 +203,7 @@ static ConfigOption cmd_options[] = { 'f', 'b', "backup-mode", opt_backup_mode, SOURCE_CMD_STRICT }, { 'b', 'C', "smooth-checkpoint", &smooth_checkpoint, SOURCE_CMD_STRICT }, { 's', 'S', "slot", &replication_slot, SOURCE_CMD_STRICT }, -#if PG_VERSION_NUM >= 100000 { 'b', 181, "temp-slot", &temp_slot, SOURCE_CMD_STRICT }, -#endif { 'b', 'P', "perm-slot", &perm_slot, SOURCE_CMD_STRICT }, { 'b', 182, "delete-wal", &delete_wal, SOURCE_CMD_STRICT }, { 'b', 183, "delete-expired", &delete_expired, SOURCE_CMD_STRICT }, @@ -905,14 +901,13 @@ main(int argc, char *argv[]) wal_file_name, instanceState->instance_name, instance_config.system_identifier, system_id); } -#if PG_VERSION_NUM >= 100000 if (temp_slot && perm_slot) elog(ERROR, "You cannot specify \"--perm-slot\" option with the \"--temp-slot\" option"); /* if slot name was not provided for temp slot, use default slot name */ if (!replication_slot && temp_slot) replication_slot = DEFAULT_TEMP_SLOT_NAME; -#endif + if (!replication_slot && perm_slot) replication_slot = DEFAULT_PERMANENT_SLOT_NAME; diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 6f6dcdff6..2439fc23b 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -65,13 +65,8 @@ extern const char *PROGRAM_EMAIL; #define DATABASE_DIR "database" #define BACKUPS_DIR "backups" #define WAL_SUBDIR "wal" -#if PG_VERSION_NUM >= 100000 #define PG_XLOG_DIR "pg_wal" #define PG_LOG_DIR "log" -#else -#define PG_XLOG_DIR "pg_xlog" -#define PG_LOG_DIR "pg_log" -#endif #define PG_TBLSPC_DIR "pg_tblspc" #define PG_GLOBAL_DIR "global" #define BACKUP_CONTROL_FILE "backup.control" @@ -777,11 +772,8 @@ extern bool stream_wal; extern bool show_color; extern bool progress; extern bool is_archive_cmd; /* true for archive-{get,push} */ -/* In pre-10 'replication_slot' is defined in receivelog.h */ extern char *replication_slot; -#if PG_VERSION_NUM >= 100000 extern bool temp_slot; -#endif extern bool perm_slot; /* backup options */ diff --git a/src/stream.c b/src/stream.c index 1ee8dee37..b10eb7308 100644 --- a/src/stream.c +++ b/src/stream.c @@ -2,7 +2,7 @@ * * stream.c: pg_probackup specific code for WAL streaming * - * Portions Copyright (c) 2015-2020, Postgres Professional + * Portions Copyright (c) 2015-2022, Postgres Professional * *------------------------------------------------------------------------- */ @@ -174,10 +174,10 @@ checkpoint_timeout(PGconn *backup_conn) * CreateReplicationSlot(PGconn *conn, const char *slot_name, const char *plugin, * bool is_temporary, bool is_physical, bool reserve_wal, * bool slot_exists_ok) - * PG 9.5-10 + * PG 10 * CreateReplicationSlot(PGconn *conn, const char *slot_name, const char *plugin, * bool is_physical, bool slot_exists_ok) - * NOTE: PG 9.6 and 10 support reserve_wal in + * NOTE: PG 10 support reserve_wal in * pg_catalog.pg_create_physical_replication_slot(slot_name name [, immediately_reserve boolean]) * and * CREATE_REPLICATION_SLOT slot_name { PHYSICAL [ RESERVE_WAL ] | LOGICAL output_plugin } @@ -194,7 +194,7 @@ CreateReplicationSlot_compat(PGconn *conn, const char *slot_name, const char *pl #elif PG_VERSION_NUM >= 110000 return CreateReplicationSlot(conn, slot_name, plugin, is_temporary, is_physical, /* reserve_wal = */ true, slot_exists_ok); -#elif PG_VERSION_NUM >= 100000 +#else /* * PG-10 doesn't support creating temp_slot by calling CreateReplicationSlot(), but * it will be created by setting StreamCtl.temp_slot later in StreamLog() @@ -203,10 +203,6 @@ CreateReplicationSlot_compat(PGconn *conn, const char *slot_name, const char *pl return CreateReplicationSlot(conn, slot_name, plugin, /*is_temporary,*/ is_physical, /*reserve_wal,*/ slot_exists_ok); else return true; -#else - /* these parameters not supported in PG < 10 */ - Assert(!is_temporary); - return CreateReplicationSlot(conn, slot_name, plugin, /*is_temporary,*/ is_physical, /*reserve_wal,*/ slot_exists_ok); #endif } @@ -229,13 +225,8 @@ StreamLog(void *arg) stream_stop_begin = 0; /* Create repslot */ -#if PG_VERSION_NUM >= 100000 if (temp_slot || perm_slot) if (!CreateReplicationSlot_compat(stream_arg->conn, replication_slot, NULL, temp_slot, true, false)) -#else - if (perm_slot) - if (!CreateReplicationSlot_compat(stream_arg->conn, replication_slot, NULL, false, true, false)) -#endif { interrupted = true; elog(ERROR, "Couldn't create physical replication slot %s", replication_slot); @@ -248,18 +239,13 @@ StreamLog(void *arg) elog(LOG, "started streaming WAL at %X/%X (timeline %u) using%s slot %s", (uint32) (stream_arg->startpos >> 32), (uint32) stream_arg->startpos, stream_arg->starttli, -#if PG_VERSION_NUM >= 100000 temp_slot ? " temporary" : "", -#else - "", -#endif replication_slot); else elog(LOG, "started streaming WAL at %X/%X (timeline %u)", (uint32) (stream_arg->startpos >> 32), (uint32) stream_arg->startpos, stream_arg->starttli); -#if PG_VERSION_NUM >= 90600 { StreamCtl ctl; @@ -274,7 +260,6 @@ StreamLog(void *arg) ctl.synchronous = false; ctl.mark_done = false; -#if PG_VERSION_NUM >= 100000 ctl.walmethod = CreateWalDirectoryMethod( stream_arg->basedir, // (instance_config.compress_alg == NONE_COMPRESS) ? 0 : instance_config.compress_level, @@ -284,13 +269,10 @@ StreamLog(void *arg) ctl.stop_socket = PGINVALID_SOCKET; ctl.do_sync = false; /* We sync all files at the end of backup */ // ctl.mark_done /* for future use in s3 */ -#if PG_VERSION_NUM >= 100000 && PG_VERSION_NUM < 110000 +#if PG_VERSION_NUM < 110000 /* StreamCtl.temp_slot used only for PG-10, in PG>10, temp_slots are created by calling CreateReplicationSlot() */ ctl.temp_slot = temp_slot; -#endif /* PG_VERSION_NUM >= 100000 && PG_VERSION_NUM < 110000 */ -#else /* PG_VERSION_NUM < 100000 */ - ctl.basedir = (char *) stream_arg->basedir; -#endif /* PG_VERSION_NUM >= 100000 */ +#endif /* PG_VERSION_NUM < 110000 */ if (ReceiveXlogStream(stream_arg->conn, &ctl) == false) { @@ -298,25 +280,13 @@ StreamLog(void *arg) elog(ERROR, "Problem in receivexlog"); } -#if PG_VERSION_NUM >= 100000 if (!ctl.walmethod->finish()) { interrupted = true; elog(ERROR, "Could not finish writing WAL files: %s", strerror(errno)); } -#endif /* PG_VERSION_NUM >= 100000 */ - } -#else /* PG_VERSION_NUM < 90600 */ - /* PG-9.5 */ - if (ReceiveXlogStream(stream_arg->conn, stream_arg->startpos, stream_arg->starttli, - NULL, (char *) stream_arg->basedir, stop_streaming, - standby_message_timeout, NULL, false, false) == false) - { - interrupted = true; - elog(ERROR, "Problem in receivexlog"); } -#endif /* PG_VERSION_NUM >= 90600 */ /* be paranoid and sort xlog_files_list, * so if stop_lsn segno is already in the list, diff --git a/src/util.c b/src/util.c index e89f5776b..28bdf283e 100644 --- a/src/util.c +++ b/src/util.c @@ -102,11 +102,7 @@ checkControlFile(ControlFileData *ControlFile) static void digestControlFile(ControlFileData *ControlFile, char *src, size_t size) { -#if PG_VERSION_NUM >= 100000 int ControlFileSize = PG_CONTROL_FILE_SIZE; -#else - int ControlFileSize = PG_CONTROL_SIZE; -#endif if (size != ControlFileSize) elog(ERROR, "unexpected control file size %d, expected %d", @@ -127,11 +123,7 @@ writeControlFile(fio_location location, const char *path, ControlFileData *Contr int fd; char *buffer = NULL; -#if PG_VERSION_NUM >= 100000 int ControlFileSize = PG_CONTROL_FILE_SIZE; -#else - int ControlFileSize = PG_CONTROL_SIZE; -#endif /* copy controlFileSize */ buffer = pg_malloc0(ControlFileSize); @@ -207,44 +199,25 @@ get_current_timeline_from_control(fio_location location, const char *pgdata_path } /* - * Get last check point record ptr from pg_tonrol. + * Get last check point record ptr from pg_control. */ XLogRecPtr get_checkpoint_location(PGconn *conn) { -#if PG_VERSION_NUM >= 90600 PGresult *res; uint32 lsn_hi; uint32 lsn_lo; XLogRecPtr lsn; -#if PG_VERSION_NUM >= 100000 res = pgut_execute(conn, "SELECT checkpoint_lsn FROM pg_catalog.pg_control_checkpoint()", 0, NULL); -#else - res = pgut_execute(conn, - "SELECT checkpoint_location FROM pg_catalog.pg_control_checkpoint()", - 0, NULL); -#endif XLogDataFromLSN(PQgetvalue(res, 0, 0), &lsn_hi, &lsn_lo); PQclear(res); /* Calculate LSN */ lsn = ((uint64) lsn_hi) << 32 | lsn_lo; return lsn; -#else - /* PG-9.5 */ - char *buffer; - size_t size; - ControlFileData ControlFile; - - buffer = slurpFile(FIO_DB_HOST, instance_config.pgdata, XLOG_CONTROL_FILE, &size, false); - digestControlFile(&ControlFile, buffer, size); - pg_free(buffer); - - return ControlFile.checkPoint; -#endif } uint64 @@ -267,7 +240,6 @@ get_system_identifier(fio_location location, const char *pgdata_path, bool safe) uint64 get_remote_system_identifier(PGconn *conn) { -#if PG_VERSION_NUM >= 90600 PGresult *res; uint64 system_id_conn; char *val; @@ -284,18 +256,6 @@ get_remote_system_identifier(PGconn *conn) PQclear(res); return system_id_conn; -#else - /* PG-9.5 */ - char *buffer; - size_t size; - ControlFileData ControlFile; - - buffer = slurpFile(FIO_DB_HOST, instance_config.pgdata, XLOG_CONTROL_FILE, &size, false); - digestControlFile(&ControlFile, buffer, size); - pg_free(buffer); - - return ControlFile.system_identifier; -#endif } uint32 diff --git a/src/utils/pgut.c b/src/utils/pgut.c index c220b807d..f1b8da0b2 100644 --- a/src/utils/pgut.c +++ b/src/utils/pgut.c @@ -3,7 +3,7 @@ * pgut.c * * Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION - * Portions Copyright (c) 2017-2021, Postgres Professional + * Portions Copyright (c) 2017-2022, Postgres Professional * *------------------------------------------------------------------------- */ @@ -20,11 +20,7 @@ #include "common/string.h" #endif -#if PG_VERSION_NUM >= 100000 #include "common/connect.h" -#else -#include "fe_utils/connect.h" -#endif #include @@ -94,7 +90,7 @@ prompt_for_password(const char *username) snprintf(message, lengthof(message), "Password for user %s: ", username); password = simple_prompt(message , false); } -#elif PG_VERSION_NUM >= 100000 +#else password = (char *) pgut_malloc(sizeof(char) * 100 + 1); if (username == NULL) simple_prompt("Password: ", password, 100, false); @@ -104,17 +100,7 @@ prompt_for_password(const char *username) snprintf(message, lengthof(message), "Password for user %s: ", username); simple_prompt(message, password, 100, false); } -#else - if (username == NULL) - password = simple_prompt("Password: ", 100, false); - else - { - char message[256]; - snprintf(message, lengthof(message), "Password for user %s: ", username); - password = simple_prompt(message, 100, false); - } #endif - in_password = false; } diff --git a/tests/backup.py b/tests/backup.py index 20ac480e0..23836cdbe 100644 --- a/tests/backup.py +++ b/tests/backup.py @@ -1856,118 +1856,43 @@ def test_backup_with_least_privileges_role(self): "CREATE SCHEMA ptrack; " "CREATE EXTENSION ptrack WITH SCHEMA ptrack") - # PG 9.5 - if self.get_version(node) < 90600: - node.safe_psql( - 'backupdb', - "REVOKE ALL ON DATABASE backupdb from PUBLIC; " - "REVOKE ALL ON SCHEMA public from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " - "CREATE ROLE backup WITH LOGIN REPLICATION; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) - # PG 9.6 - elif self.get_version(node) > 90600 and self.get_version(node) < 100000: - node.safe_psql( - 'backupdb', - "REVOKE ALL ON DATABASE backupdb from PUBLIC; " - "REVOKE ALL ON SCHEMA public from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " - "CREATE ROLE backup WITH LOGIN REPLICATION; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) - # >= 10 - else: - node.safe_psql( - 'backupdb', - "REVOKE ALL ON DATABASE backupdb from PUBLIC; " - "REVOKE ALL ON SCHEMA public from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " - "CREATE ROLE backup WITH LOGIN REPLICATION; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) + # PG >= 10 + node.safe_psql( + 'backupdb', + "REVOKE ALL ON DATABASE backupdb from PUBLIC; " + "REVOKE ALL ON SCHEMA public from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " + "CREATE ROLE backup WITH LOGIN REPLICATION; " + "GRANT CONNECT ON DATABASE backupdb to backup; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + ) if self.ptrack: node.safe_psql( diff --git a/tests/checkdb.py b/tests/checkdb.py index 5b6dda250..71f81fd6c 100644 --- a/tests/checkdb.py +++ b/tests/checkdb.py @@ -640,66 +640,8 @@ def test_checkdb_with_least_privileges(self): "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC;") - # PG 9.5 - if self.get_version(node) < 90600: - node.safe_psql( - 'backupdb', - 'CREATE ROLE backup WITH LOGIN; ' - 'GRANT CONNECT ON DATABASE backupdb to backup; ' - 'GRANT USAGE ON SCHEMA pg_catalog TO backup; ' - 'GRANT USAGE ON SCHEMA public TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_am TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_class TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_index TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_namespace TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.texteq(text, text) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.namene(name, name) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.int8(integer) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.charne("char", "char") TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.string_to_array(text, text) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.array_position(anyarray, anyelement) TO backup; ' - 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool) TO backup;' # amcheck-next function - ) - # PG 9.6 - elif self.get_version(node) > 90600 and self.get_version(node) < 100000: - node.safe_psql( - 'backupdb', - 'CREATE ROLE backup WITH LOGIN; ' - 'GRANT CONNECT ON DATABASE backupdb to backup; ' - 'GRANT USAGE ON SCHEMA pg_catalog TO backup; ' - 'GRANT USAGE ON SCHEMA public TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_am TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_class TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_index TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_namespace TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.texteq(text, text) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.namene(name, name) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.int8(integer) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.charne("char", "char") TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.string_to_array(text, text) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.array_position(anyarray, anyelement) TO backup; ' -# 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass) TO backup; ' - 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool) TO backup;' - ) # PG 10 - elif self.get_version(node) > 100000 and self.get_version(node) < 110000: + if self.get_version(node) < 110000: node.safe_psql( 'backupdb', 'CREATE ROLE backup WITH LOGIN; ' diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index e3036d9c4..0fa252739 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -417,52 +417,21 @@ def simple_bootstrap(self, node, role) -> None: 'postgres', 'CREATE ROLE {0} WITH LOGIN REPLICATION'.format(role)) - # PG 9.5 - if self.get_version(node) < 90600: - node.safe_psql( - 'postgres', - 'GRANT USAGE ON SCHEMA pg_catalog TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO {0};'.format(role)) - # PG 9.6 - elif self.get_version(node) > 90600 and self.get_version(node) < 100000: - node.safe_psql( - 'postgres', - 'GRANT USAGE ON SCHEMA pg_catalog TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_checkpoint() TO {0};'.format(role)) - # >= 10 - else: - node.safe_psql( - 'postgres', - 'GRANT USAGE ON SCHEMA pg_catalog TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_checkpoint() TO {0};'.format(role)) + # PG >= 10 + node.safe_psql( + 'postgres', + 'GRANT USAGE ON SCHEMA pg_catalog TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_checkpoint() TO {0};'.format(role)) def create_tblspace_in_node(self, node, tblspc_name, tblspc_path=None, cfs=False): res = node.execute( From ae275dccd35e2e865bce92f51e554331947cd030 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Wed, 20 Jul 2022 02:15:07 +0300 Subject: [PATCH 02/17] [PBCKP-232] remove 9.5-9.6 support, part 2 --- .travis.yml | 4 - src/backup.c | 172 ++++++-------------- src/catchup.c | 13 +- src/dir.c | 16 -- src/pg_probackup.h | 6 +- src/utils/file.c | 5 +- tests/archive.py | 169 ++++---------------- tests/auth_test.py | 30 +--- tests/backup.py | 272 ++++++++------------------------ tests/catchup.py | 41 +++-- tests/false_positive.py | 3 - tests/helpers/ptrack_helpers.py | 32 +--- tests/incr_restore.py | 10 -- tests/pgpro2068.py | 24 +-- tests/pgpro560.py | 40 ++--- tests/ptrack.py | 143 ++++------------- tests/replica.py | 85 +--------- tests/restore.py | 201 +++++++---------------- tests/retention.py | 13 -- tests/validate.py | 19 +-- 20 files changed, 295 insertions(+), 1003 deletions(-) diff --git a/.travis.yml b/.travis.yml index ed932b68e..9e48c9cab 100644 --- a/.travis.yml +++ b/.travis.yml @@ -32,8 +32,6 @@ env: - PG_VERSION=12 PG_BRANCH=REL_12_STABLE PTRACK_PATCH_PG_BRANCH=REL_12_STABLE - PG_VERSION=11 PG_BRANCH=REL_11_STABLE PTRACK_PATCH_PG_BRANCH=REL_11_STABLE - PG_VERSION=10 PG_BRANCH=REL_10_STABLE - - PG_VERSION=9.6 PG_BRANCH=REL9_6_STABLE - - PG_VERSION=9.5 PG_BRANCH=REL9_5_STABLE # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=archive # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=backup # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=catchup @@ -52,8 +50,6 @@ env: jobs: allow_failures: - if: env(PG_BRANCH) = master - - if: env(PG_BRANCH) = REL9_6_STABLE - - if: env(PG_BRANCH) = REL9_5_STABLE # - if: env(MODE) IN (archive, backup, delta, locking, merge, replica, retention, restore) # Only run CI for master branch commits to limit our travis usage diff --git a/src/backup.c b/src/backup.c index 0edb57710..449d8d09c 100644 --- a/src/backup.c +++ b/src/backup.c @@ -32,9 +32,6 @@ parray *backup_files_list = NULL; /* We need critical section for datapagemap_add() in case of using threads */ static pthread_mutex_t backup_pagemap_mutex = PTHREAD_MUTEX_INITIALIZER; -// TODO: move to PGnodeInfo -bool exclusive_backup = false; - /* Is pg_start_backup() was executed */ bool backup_in_progress = false; @@ -80,7 +77,7 @@ backup_stopbackup_callback(bool fatal, void *userdata) { elog(WARNING, "backup in progress, stop backup"); /* don't care about stop_lsn in case of error */ - pg_stop_backup_send(st->conn, st->server_version, current.from_replica, exclusive_backup, NULL); + pg_stop_backup_send(st->conn, st->server_version, current.from_replica, NULL); } } @@ -493,10 +490,10 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, /* Notify end of backup */ pg_stop_backup(instanceState, ¤t, backup_conn, nodeInfo); - /* In case of backup from replica >= 9.6 we must fix minRecPoint, + /* In case of backup from replica we must fix minRecPoint, * First we must find pg_control in backup_files_list. */ - if (current.from_replica && !exclusive_backup) + if (current.from_replica) { pgFile *pg_control = NULL; @@ -781,11 +778,6 @@ do_backup(InstanceState *instanceState, pgSetBackupParams *set_backup_params, } } - if (current.from_replica && exclusive_backup) - /* Check master connection options */ - if (instance_config.master_conn_opt.pghost == NULL) - elog(ERROR, "Options for connection to master must be provided to perform backup from replica"); - /* add note to backup if requested */ if (set_backup_params && set_backup_params->note) add_note(¤t, set_backup_params->note); @@ -866,22 +858,12 @@ check_server_version(PGconn *conn, PGNodeInfo *nodeInfo) elog(ERROR, "Unknown server version %d", nodeInfo->server_version); if (nodeInfo->server_version < 100000) - sprintf(nodeInfo->server_version_str, "%d.%d", - nodeInfo->server_version / 10000, - (nodeInfo->server_version / 100) % 100); - else - sprintf(nodeInfo->server_version_str, "%d", - nodeInfo->server_version / 10000); - - if (nodeInfo->server_version < 90500) elog(ERROR, "server version is %s, must be %s or higher", - nodeInfo->server_version_str, "9.5"); + nodeInfo->server_version_str, "10"); - if (current.from_replica && nodeInfo->server_version < 90600) - elog(ERROR, - "server version is %s, must be %s or higher for backup from replica", - nodeInfo->server_version_str, "9.6"); + sprintf(nodeInfo->server_version_str, "%d", + nodeInfo->server_version / 10000); if (nodeInfo->pgpro_support) res = pgut_execute(conn, "SELECT pg_catalog.pgpro_edition()", 0, NULL); @@ -922,9 +904,6 @@ check_server_version(PGconn *conn, PGNodeInfo *nodeInfo) if (res) PQclear(res); - - /* Do exclusive backup only for PostgreSQL 9.5 */ - exclusive_backup = nodeInfo->server_version < 90600; } /* @@ -1006,16 +985,10 @@ pg_start_backup(const char *label, bool smooth, pgBackup *backup, /* 2nd argument is 'fast'*/ params[1] = smooth ? "false" : "true"; - if (!exclusive_backup) - res = pgut_execute(conn, - "SELECT pg_catalog.pg_start_backup($1, $2, false)", - 2, - params); - else - res = pgut_execute(conn, - "SELECT pg_catalog.pg_start_backup($1, $2)", - 2, - params); + res = pgut_execute(conn, + "SELECT pg_catalog.pg_start_backup($1, $2, false)", + 2, + params); /* * Set flag that pg_start_backup() was called. If an error will happen it @@ -1034,14 +1007,10 @@ pg_start_backup(const char *label, bool smooth, pgBackup *backup, PQclear(res); if ((!backup->stream || backup->backup_mode == BACKUP_MODE_DIFF_PAGE) && - !backup->from_replica && - !(nodeInfo->server_version < 90600 && - !nodeInfo->is_superuser)) + !backup->from_replica) /* * Switch to a new WAL segment. It is necessary to get archived WAL * segment, which includes start LSN of current backup. - * Don`t do this for replica backups and for PG 9.5 if pguser is not superuser - * (because in 9.5 only superuser can switch WAL) */ pg_switch_wal(conn); } @@ -1546,20 +1515,9 @@ pg_create_restore_point(PGconn *conn, time_t backup_start_time) } void -pg_stop_backup_send(PGconn *conn, int server_version, bool is_started_on_replica, bool is_exclusive, char **query_text) +pg_stop_backup_send(PGconn *conn, int server_version, bool is_started_on_replica, char **query_text) { static const char - stop_exlusive_backup_query[] = - /* - * Stop the non-exclusive backup. Besides stop_lsn it returns from - * pg_stop_backup(false) copy of the backup label and tablespace map - * so they can be written to disk by the caller. - * TODO, question: add NULLs as backup_label and tablespace_map? - */ - "SELECT" - " pg_catalog.txid_snapshot_xmax(pg_catalog.txid_current_snapshot())," - " current_timestamp(0)::timestamptz," - " pg_catalog.pg_stop_backup() as lsn", stop_backup_on_master_query[] = "SELECT" " pg_catalog.txid_snapshot_xmax(pg_catalog.txid_current_snapshot())," @@ -1568,16 +1526,8 @@ pg_stop_backup_send(PGconn *conn, int server_version, bool is_started_on_replica " labelfile," " spcmapfile" " FROM pg_catalog.pg_stop_backup(false, false)", - stop_backup_on_master_before10_query[] = - "SELECT" - " pg_catalog.txid_snapshot_xmax(pg_catalog.txid_current_snapshot())," - " current_timestamp(0)::timestamptz," - " lsn," - " labelfile," - " spcmapfile" - " FROM pg_catalog.pg_stop_backup(false)", /* - * In case of backup from replica >= 9.6 we do not trust minRecPoint + * In case of backup from replica we do not trust minRecPoint * and stop_backup LSN, so we use latest replayed LSN as STOP LSN. */ stop_backup_on_replica_query[] = @@ -1587,28 +1537,12 @@ pg_stop_backup_send(PGconn *conn, int server_version, bool is_started_on_replica " pg_catalog.pg_last_wal_replay_lsn()," " labelfile," " spcmapfile" - " FROM pg_catalog.pg_stop_backup(false, false)", - stop_backup_on_replica_before10_query[] = - "SELECT" - " pg_catalog.txid_snapshot_xmax(pg_catalog.txid_current_snapshot())," - " current_timestamp(0)::timestamptz," - " pg_catalog.pg_last_xlog_replay_location()," - " labelfile," - " spcmapfile" - " FROM pg_catalog.pg_stop_backup(false)"; + " FROM pg_catalog.pg_stop_backup(false, false)"; const char * const stop_backup_query = - is_exclusive ? - stop_exlusive_backup_query : - server_version >= 100000 ? - (is_started_on_replica ? + is_started_on_replica ? stop_backup_on_replica_query : - stop_backup_on_master_query - ) : - (is_started_on_replica ? - stop_backup_on_replica_before10_query : - stop_backup_on_master_before10_query - ); + stop_backup_on_master_query; bool sent = false; /* Make proper timestamp format for parse_time(recovery_time) */ @@ -1641,7 +1575,7 @@ pg_stop_backup_send(PGconn *conn, int server_version, bool is_started_on_replica */ void pg_stop_backup_consume(PGconn *conn, int server_version, - bool is_exclusive, uint32 timeout, const char *query_text, + uint32 timeout, const char *query_text, PGStopBackupResult *result) { PGresult *query_result; @@ -1743,28 +1677,18 @@ pg_stop_backup_consume(PGconn *conn, int server_version, /* get backup_label_content */ result->backup_label_content = NULL; // if (!PQgetisnull(query_result, 0, backup_label_colno)) - if (!is_exclusive) - { - result->backup_label_content_len = PQgetlength(query_result, 0, backup_label_colno); - if (result->backup_label_content_len > 0) - result->backup_label_content = pgut_strndup(PQgetvalue(query_result, 0, backup_label_colno), - result->backup_label_content_len); - } else { - result->backup_label_content_len = 0; - } + result->backup_label_content_len = PQgetlength(query_result, 0, backup_label_colno); + if (result->backup_label_content_len > 0) + result->backup_label_content = pgut_strndup(PQgetvalue(query_result, 0, backup_label_colno), + result->backup_label_content_len); /* get tablespace_map_content */ result->tablespace_map_content = NULL; // if (!PQgetisnull(query_result, 0, tablespace_map_colno)) - if (!is_exclusive) - { - result->tablespace_map_content_len = PQgetlength(query_result, 0, tablespace_map_colno); - if (result->tablespace_map_content_len > 0) - result->tablespace_map_content = pgut_strndup(PQgetvalue(query_result, 0, tablespace_map_colno), - result->tablespace_map_content_len); - } else { - result->tablespace_map_content_len = 0; - } + result->tablespace_map_content_len = PQgetlength(query_result, 0, tablespace_map_colno); + if (result->tablespace_map_content_len > 0) + result->tablespace_map_content = pgut_strndup(PQgetvalue(query_result, 0, tablespace_map_colno), + result->tablespace_map_content_len); } /* @@ -1832,21 +1756,18 @@ pg_stop_backup(InstanceState *instanceState, pgBackup *backup, PGconn *pg_startb /* Create restore point * Only if backup is from master. - * For PG 9.5 create restore point only if pguser is superuser. */ - if (!backup->from_replica && - !(nodeInfo->server_version < 90600 && - !nodeInfo->is_superuser)) //TODO: check correctness + if (!backup->from_replica) pg_create_restore_point(pg_startbackup_conn, backup->start_time); /* Execute pg_stop_backup using PostgreSQL connection */ - pg_stop_backup_send(pg_startbackup_conn, nodeInfo->server_version, backup->from_replica, exclusive_backup, &query_text); + pg_stop_backup_send(pg_startbackup_conn, nodeInfo->server_version, backup->from_replica, &query_text); /* * Wait for the result of pg_stop_backup(), but no longer than * archive_timeout seconds */ - pg_stop_backup_consume(pg_startbackup_conn, nodeInfo->server_version, exclusive_backup, timeout, query_text, &stop_backup_result); + pg_stop_backup_consume(pg_startbackup_conn, nodeInfo->server_version, timeout, query_text, &stop_backup_result); if (backup->stream) { @@ -1859,28 +1780,25 @@ pg_stop_backup(InstanceState *instanceState, pgBackup *backup, PGconn *pg_startb wait_wal_and_calculate_stop_lsn(xlog_path, stop_backup_result.lsn, backup); /* Write backup_label and tablespace_map */ - if (!exclusive_backup) + Assert(stop_backup_result.backup_label_content != NULL); + + /* Write backup_label */ + pg_stop_backup_write_file_helper(backup->database_dir, PG_BACKUP_LABEL_FILE, "backup label", + stop_backup_result.backup_label_content, stop_backup_result.backup_label_content_len, + backup_files_list); + free(stop_backup_result.backup_label_content); + stop_backup_result.backup_label_content = NULL; + stop_backup_result.backup_label_content_len = 0; + + /* Write tablespace_map */ + if (stop_backup_result.tablespace_map_content != NULL) { - Assert(stop_backup_result.backup_label_content != NULL); - - /* Write backup_label */ - pg_stop_backup_write_file_helper(backup->database_dir, PG_BACKUP_LABEL_FILE, "backup label", - stop_backup_result.backup_label_content, stop_backup_result.backup_label_content_len, + pg_stop_backup_write_file_helper(backup->database_dir, PG_TABLESPACE_MAP_FILE, "tablespace map", + stop_backup_result.tablespace_map_content, stop_backup_result.tablespace_map_content_len, backup_files_list); - free(stop_backup_result.backup_label_content); - stop_backup_result.backup_label_content = NULL; - stop_backup_result.backup_label_content_len = 0; - - /* Write tablespace_map */ - if (stop_backup_result.tablespace_map_content != NULL) - { - pg_stop_backup_write_file_helper(backup->database_dir, PG_TABLESPACE_MAP_FILE, "tablespace map", - stop_backup_result.tablespace_map_content, stop_backup_result.tablespace_map_content_len, - backup_files_list); - free(stop_backup_result.tablespace_map_content); - stop_backup_result.tablespace_map_content = NULL; - stop_backup_result.tablespace_map_content_len = 0; - } + free(stop_backup_result.tablespace_map_content); + stop_backup_result.tablespace_map_content = NULL; + stop_backup_result.tablespace_map_content_len = 0; } if (backup->stream) diff --git a/src/catchup.c b/src/catchup.c index 522279ac9..1195f7a7f 100644 --- a/src/catchup.c +++ b/src/catchup.c @@ -185,9 +185,6 @@ catchup_preflight_checks(PGNodeInfo *source_node_info, PGconn *source_conn, elog(ERROR, "Ptrack is disabled"); } - if (current.from_replica && exclusive_backup) - elog(ERROR, "Catchup from standby is only available for PostgreSQL >= 9.6"); - /* check that we don't overwrite tablespace in source pgdata */ catchup_check_tablespaces_existance_in_tbsmapping(source_conn); @@ -1012,13 +1009,13 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, pg_silent_client_messages(source_conn); /* Execute pg_stop_backup using PostgreSQL connection */ - pg_stop_backup_send(source_conn, source_node_info.server_version, current.from_replica, exclusive_backup, &stop_backup_query_text); + pg_stop_backup_send(source_conn, source_node_info.server_version, current.from_replica, &stop_backup_query_text); /* * Wait for the result of pg_stop_backup(), but no longer than * archive_timeout seconds */ - pg_stop_backup_consume(source_conn, source_node_info.server_version, exclusive_backup, timeout, stop_backup_query_text, &stop_backup_result); + pg_stop_backup_consume(source_conn, source_node_info.server_version, timeout, stop_backup_query_text, &stop_backup_result); /* Cleanup */ pg_free(stop_backup_query_text); @@ -1076,12 +1073,10 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, } /* - * In case of backup from replica >= 9.6 we must fix minRecPoint + * In case of backup from replica we must fix minRecPoint */ - if (current.from_replica && !exclusive_backup) - { + if (current.from_replica) set_min_recovery_point(source_pg_control_file, dest_pgdata, current.stop_lsn); - } /* close ssh session in main thread */ fio_disconnect(); diff --git a/src/dir.c b/src/dir.c index 3e5e28cef..5f25f2ee4 100644 --- a/src/dir.c +++ b/src/dir.c @@ -83,11 +83,7 @@ static char *pgdata_exclude_files[] = "probackup_recovery.conf", "recovery.signal", "standby.signal", - NULL -}; -static char *pgdata_exclude_files_non_exclusive[] = -{ /*skip in non-exclusive backup */ "backup_label", "tablespace_map", @@ -571,18 +567,6 @@ dir_check_file(pgFile *file, bool backup_logs) /* Check if we need to exclude file by name */ if (S_ISREG(file->mode)) { - if (!exclusive_backup) - { - for (i = 0; pgdata_exclude_files_non_exclusive[i]; i++) - if (strcmp(file->rel_path, - pgdata_exclude_files_non_exclusive[i]) == 0) - { - /* Skip */ - elog(VERBOSE, "Excluding file: %s", file->name); - return CHECK_FALSE; - } - } - for (i = 0; pgdata_exclude_files[i]; i++) if (strcmp(file->rel_path, pgdata_exclude_files[i]) == 0) { diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 2439fc23b..eb051065b 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -782,8 +782,6 @@ extern bool smooth_checkpoint; /* remote probackup options */ extern char* remote_agent; -extern bool exclusive_backup; - /* delete options */ extern bool delete_wal; extern bool delete_expired; @@ -1273,9 +1271,9 @@ extern void pg_start_backup(const char *label, bool smooth, pgBackup *backup, PGNodeInfo *nodeInfo, PGconn *conn); extern void pg_silent_client_messages(PGconn *conn); extern void pg_create_restore_point(PGconn *conn, time_t backup_start_time); -extern void pg_stop_backup_send(PGconn *conn, int server_version, bool is_started_on_replica, bool is_exclusive, char **query_text); +extern void pg_stop_backup_send(PGconn *conn, int server_version, bool is_started_on_replica, char **query_text); extern void pg_stop_backup_consume(PGconn *conn, int server_version, - bool is_exclusive, uint32 timeout, const char *query_text, + uint32 timeout, const char *query_text, PGStopBackupResult *result); extern void pg_stop_backup_write_file_helper(const char *path, const char *filename, const char *error_msg_filename, const void *data, size_t len, parray *file_list); diff --git a/src/utils/file.c b/src/utils/file.c index 53ab451f8..92bebc7c8 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -38,7 +38,6 @@ typedef struct bool follow_symlink; bool add_root; bool backup_logs; - bool exclusive_backup; bool skip_hidden; int external_dir_num; } fio_list_dir_request; @@ -2798,7 +2797,6 @@ fio_list_dir_internal(parray *files, const char *root, bool exclude, req.follow_symlink = follow_symlink; req.add_root = add_root; req.backup_logs = backup_logs; - req.exclusive_backup = exclusive_backup; req.skip_hidden = skip_hidden; req.external_dir_num = external_dir_num; @@ -2891,7 +2889,6 @@ fio_list_dir_impl(int out, char* buf) * TODO: correctly send elog messages from agent to main process. */ instance_config.logger.log_level_console = ERROR; - exclusive_backup = req->exclusive_backup; dir_list_file(file_files, req->path, req->exclude, req->follow_symlink, req->add_root, req->backup_logs, req->skip_hidden, @@ -4863,4 +4860,4 @@ init_pio_objects(void) localDrive = bindref_pioDrive($alloc(pioLocalDrive)); remoteDrive = bindref_pioDrive($alloc(pioRemoteDrive)); -} \ No newline at end of file +} diff --git a/tests/archive.py b/tests/archive.py index 52fb225e8..fe3d89b17 100644 --- a/tests/archive.py +++ b/tests/archive.py @@ -84,11 +84,6 @@ def test_pgpro434_2(self): 'checkpoint_timeout': '30s'} ) - if self.get_version(node) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because pg_control_checkpoint() is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -264,15 +259,9 @@ def test_pgpro434_3(self): with open(log_file, 'r') as f: log_content = f.read() - # in PG =< 9.6 pg_stop_backup always wait - if self.get_version(node) < 100000: - self.assertIn( - "ERROR: pg_stop_backup doesn't answer in 60 seconds, cancel it", - log_content) - else: - self.assertIn( - "ERROR: WAL segment 000000010000000000000003 could not be archived in 60 seconds", - log_content) + self.assertIn( + "ERROR: WAL segment 000000010000000000000003 could not be archived in 60 seconds", + log_content) log_file = os.path.join(node.logs_dir, 'postgresql.log') with open(log_file, 'r') as f: @@ -418,12 +407,8 @@ def test_archive_push_file_exists(self): self.assertNotIn( 'pg_probackup archive-push completed successfully', log_content) - if self.get_version(node) < 100000: - wal_src = os.path.join( - node.data_dir, 'pg_xlog', '000000010000000000000001') - else: - wal_src = os.path.join( - node.data_dir, 'pg_wal', '000000010000000000000001') + wal_src = os.path.join( + node.data_dir, 'pg_wal', '000000010000000000000001') if self.archive_compress: with open(wal_src, 'rb') as f_in, gzip.open( @@ -555,16 +540,10 @@ def test_archive_push_partial_file_exists(self): "postgres", "INSERT INTO t1 VALUES (1) RETURNING (xmin)").decode('utf-8').rstrip() - if self.get_version(node) < 100000: - filename_orig = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_xlogfile_name_offset(pg_current_xlog_location());").rstrip() - else: - filename_orig = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn());").rstrip() + filename_orig = node.safe_psql( + "postgres", + "SELECT file_name " + "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn());").rstrip() filename_orig = filename_orig.decode('utf-8') @@ -634,16 +613,10 @@ def test_archive_push_part_file_exists_not_stale(self): "postgres", "create table t2()") - if self.get_version(node) < 100000: - filename_orig = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_xlogfile_name_offset(pg_current_xlog_location());").rstrip() - else: - filename_orig = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn());").rstrip() + filename_orig = node.safe_psql( + "postgres", + "SELECT file_name " + "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn());").rstrip() filename_orig = filename_orig.decode('utf-8') @@ -708,11 +681,6 @@ def test_replica_archive(self): 'checkpoint_timeout': '30s', 'max_wal_size': '32MB'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) # ADD INSTANCE 'MASTER' self.add_instance(backup_dir, 'master', master) @@ -839,11 +807,6 @@ def test_master_and_replica_parallel_archiving(self): 'archive_timeout': '10s'} ) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - replica = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'replica')) replica.cleanup() @@ -921,9 +884,6 @@ def test_basic_master_and_replica_concurrent_archiving(self): set replica with archiving, make sure that archiving on both node is working. """ - if self.pg_config_version < self.version_to_num('9.6.0'): - return unittest.skip('You need PostgreSQL >= 9.6 for this test') - fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') master = self.make_simple_node( @@ -934,11 +894,6 @@ def test_basic_master_and_replica_concurrent_archiving(self): 'checkpoint_timeout': '30s', 'archive_timeout': '10s'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - replica = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'replica')) replica.cleanup() @@ -1115,10 +1070,7 @@ def test_archive_pg_receivexlog(self): self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() - if self.get_version(node) < 100000: - pg_receivexlog_path = self.get_bin_path('pg_receivexlog') - else: - pg_receivexlog_path = self.get_bin_path('pg_receivewal') + pg_receivexlog_path = self.get_bin_path('pg_receivewal') pg_receivexlog = self.run_binary( [ @@ -1188,11 +1140,8 @@ def test_archive_pg_receivexlog_compression_pg10(self): self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() - if self.get_version(node) < self.version_to_num('10.0'): - return unittest.skip('You need PostgreSQL >= 10 for this test') - else: - pg_receivexlog_path = self.get_bin_path('pg_receivewal') + pg_receivexlog_path = self.get_bin_path('pg_receivewal') pg_receivexlog = self.run_binary( [ pg_receivexlog_path, '-p', str(node.port), '--synchronous', @@ -1269,11 +1218,6 @@ def test_archive_catalog(self): 'archive_timeout': '30s', 'checkpoint_timeout': '30s'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) self.set_archiving(backup_dir, 'master', master) @@ -1930,10 +1874,6 @@ def test_waldir_outside_pgdata_archiving(self): """ check that archive-push works correct with symlinked waldir """ - if self.pg_config_version < self.version_to_num('10.0'): - return unittest.skip( - 'Skipped because waldir outside pgdata is supported since PG 10') - fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') external_wal_dir = os.path.join(self.tmp_path, module_name, fname, 'ext_wal_dir') @@ -2041,10 +1981,7 @@ def test_archiving_and_slots(self): self.set_archiving(backup_dir, 'node', node, log_level='verbose') node.slow_start() - if self.get_version(node) < 100000: - pg_receivexlog_path = self.get_bin_path('pg_receivexlog') - else: - pg_receivexlog_path = self.get_bin_path('pg_receivewal') + pg_receivexlog_path = self.get_bin_path('pg_receivewal') # "pg_receivewal --create-slot --slot archive_slot --if-not-exists " # "&& pg_receivewal --synchronous -Z 1 /tmp/wal --slot archive_slot --no-loop" @@ -2167,22 +2104,13 @@ def test_archive_pg_receivexlog_partial_handling(self): set_replication=True, initdb_params=['--data-checksums']) - if self.get_version(node) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() - if self.get_version(node) < 100000: - app_name = 'pg_receivexlog' - pg_receivexlog_path = self.get_bin_path('pg_receivexlog') - else: - app_name = 'pg_receivewal' - pg_receivexlog_path = self.get_bin_path('pg_receivewal') + app_name = 'pg_receivewal' + pg_receivexlog_path = self.get_bin_path('pg_receivewal') cmdline = [ pg_receivexlog_path, '-p', str(node.port), '--synchronous', @@ -2376,11 +2304,6 @@ def test_archive_get_batching_sanity(self): set_replication=True, initdb_params=['--data-checksums']) - if self.get_version(node) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -2600,16 +2523,10 @@ def test_archive_show_partial_files_handling(self): "postgres", "create table t1()") - if self.get_version(node) < 100000: - filename = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_xlogfile_name_offset(pg_current_xlog_location())").rstrip() - else: - filename = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn())").rstrip() + filename = node.safe_psql( + "postgres", + "SELECT file_name " + "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn())").rstrip() filename = filename.decode('utf-8') @@ -2624,16 +2541,10 @@ def test_archive_show_partial_files_handling(self): "postgres", "create table t2()") - if self.get_version(node) < 100000: - filename = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_xlogfile_name_offset(pg_current_xlog_location())").rstrip() - else: - filename = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn())").rstrip() + filename = node.safe_psql( + "postgres", + "SELECT file_name " + "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn())").rstrip() filename = filename.decode('utf-8') @@ -2648,16 +2559,10 @@ def test_archive_show_partial_files_handling(self): "postgres", "create table t3()") - if self.get_version(node) < 100000: - filename = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_xlogfile_name_offset(pg_current_xlog_location())").rstrip() - else: - filename = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn())").rstrip() + filename = node.safe_psql( + "postgres", + "SELECT file_name " + "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn())").rstrip() filename = filename.decode('utf-8') @@ -2672,16 +2577,10 @@ def test_archive_show_partial_files_handling(self): "postgres", "create table t4()") - if self.get_version(node) < 100000: - filename = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_xlogfile_name_offset(pg_current_xlog_location())").rstrip() - else: - filename = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn())").rstrip() + filename = node.safe_psql( + "postgres", + "SELECT file_name " + "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn())").rstrip() filename = filename.decode('utf-8') diff --git a/tests/auth_test.py b/tests/auth_test.py index 78af21be9..16c73308f 100644 --- a/tests/auth_test.py +++ b/tests/auth_test.py @@ -62,14 +62,9 @@ def test_backup_via_unprivileged_user(self): "GRANT EXECUTE ON FUNCTION" " pg_start_backup(text, boolean, boolean) TO backup;") - if self.get_version(node) < 100000: - node.safe_psql( - 'postgres', - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup") - else: - node.safe_psql( - 'postgres', - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup") + node.safe_psql( + 'postgres', + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup") try: self.backup_node( @@ -103,19 +98,10 @@ def test_backup_via_unprivileged_user(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - if self.get_version(node) < self.version_to_num('10.0'): - node.safe_psql( - "postgres", - "GRANT EXECUTE ON FUNCTION pg_stop_backup(boolean) TO backup") - else: - node.safe_psql( - "postgres", - "GRANT EXECUTE ON FUNCTION " - "pg_stop_backup(boolean, boolean) TO backup") - # Do this for ptrack backups - node.safe_psql( - "postgres", - "GRANT EXECUTE ON FUNCTION pg_stop_backup() TO backup") + node.safe_psql( + "postgres", + "GRANT EXECUTE ON FUNCTION " + "pg_stop_backup(boolean, boolean) TO backup") self.backup_node( backup_dir, 'node', node, options=['-U', 'backup']) @@ -184,8 +170,6 @@ def setUpClass(cls): "GRANT EXECUTE ON FUNCTION current_setting(text) TO backup; " "GRANT EXECUTE ON FUNCTION pg_is_in_recovery() TO backup; " "GRANT EXECUTE ON FUNCTION pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_stop_backup() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_stop_backup(boolean) TO backup; " "GRANT EXECUTE ON FUNCTION pg_create_restore_point(text) TO backup; " "GRANT EXECUTE ON FUNCTION pg_switch_xlog() TO backup; " "GRANT EXECUTE ON FUNCTION txid_current() TO backup; " diff --git a/tests/backup.py b/tests/backup.py index 23836cdbe..685436291 100644 --- a/tests/backup.py +++ b/tests/backup.py @@ -1427,9 +1427,6 @@ def test_basic_temp_slot_for_stream_backup(self): initdb_params=['--data-checksums'], pg_options={'max_wal_size': '40MB'}) - if self.get_version(node) < self.version_to_num('10.0'): - return unittest.skip('You need PostgreSQL >= 10 for this test') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -2167,62 +2164,24 @@ def test_backup_with_less_privileges_role(self): 'backupdb', 'CREATE EXTENSION ptrack') - # PG 9.5 - if self.get_version(node) < 90600: - node.safe_psql( - 'backupdb', - "BEGIN; " - "CREATE ROLE backup WITH LOGIN; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; " - "COMMIT;" - ) - # PG 9.6 - elif self.get_version(node) > 90600 and self.get_version(node) < 100000: - node.safe_psql( - 'backupdb', - "BEGIN; " - "CREATE ROLE backup WITH LOGIN; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; " - "COMMIT;" - ) - # >= 10 - else: - node.safe_psql( - 'backupdb', - "BEGIN; " - "CREATE ROLE backup WITH LOGIN; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; " - "COMMIT;" - ) + # PG >= 10 + node.safe_psql( + 'backupdb', + "BEGIN; " + "CREATE ROLE backup WITH LOGIN; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; " + "COMMIT;" + ) # enable STREAM backup node.safe_psql( @@ -2262,10 +2221,6 @@ def test_backup_with_less_privileges_role(self): backup_dir, 'node', node, backup_type='ptrack', datname='backupdb', options=['--stream', '-U', 'backup']) - if self.get_version(node) < 90600: - self.del_test_dir(module_name, fname) - return - # Restore as replica replica = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'replica')) @@ -2952,71 +2907,28 @@ def test_missing_replication_permission(self): 'postgres', 'CREATE DATABASE backupdb') - # PG 9.5 - if self.get_version(node) < 90600: - node.safe_psql( - 'backupdb', - "CREATE ROLE backup WITH LOGIN; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") - # PG 9.6 - elif self.get_version(node) > 90600 and self.get_version(node) < 100000: - node.safe_psql( - 'backupdb', - "CREATE ROLE backup WITH LOGIN; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") - # >= 10 - else: - node.safe_psql( - 'backupdb', - "CREATE ROLE backup WITH LOGIN; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) + # PG >= 10 + node.safe_psql( + 'backupdb', + "CREATE ROLE backup WITH LOGIN; " + "GRANT CONNECT ON DATABASE backupdb to backup; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + ) if ProbackupTest.enterprise: node.safe_psql( @@ -3083,73 +2995,28 @@ def test_missing_replication_permission_1(self): 'postgres', 'CREATE DATABASE backupdb') - # PG 9.5 - if self.get_version(node) < 90600: - node.safe_psql( - 'backupdb', - "CREATE ROLE backup WITH LOGIN; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) - # PG 9.6 - elif self.get_version(node) > 90600 and self.get_version(node) < 100000: - node.safe_psql( - 'backupdb', - "CREATE ROLE backup WITH LOGIN; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) - # >= 10 - else: - node.safe_psql( - 'backupdb', - "CREATE ROLE backup WITH LOGIN; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) + # PG >= 10 + node.safe_psql( + 'backupdb', + "CREATE ROLE backup WITH LOGIN; " + "GRANT CONNECT ON DATABASE backupdb to backup; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + ) if ProbackupTest.enterprise: node.safe_psql( @@ -3305,18 +3172,9 @@ def test_pg_stop_backup_missing_permissions(self): self.simple_bootstrap(node, 'backup') - if self.get_version(node) < 90600: - node.safe_psql( - 'postgres', - 'REVOKE EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() FROM backup') - elif self.get_version(node) > 90600 and self.get_version(node) < 100000: - node.safe_psql( - 'postgres', - 'REVOKE EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) FROM backup') - else: - node.safe_psql( - 'postgres', - 'REVOKE EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) FROM backup') + node.safe_psql( + 'postgres', + 'REVOKE EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) FROM backup') # Full backup in streaming mode try: diff --git a/tests/catchup.py b/tests/catchup.py index a83755c54..ac243da72 100644 --- a/tests/catchup.py +++ b/tests/catchup.py @@ -1231,27 +1231,26 @@ def test_catchup_with_replication_slot(self): ).decode('utf-8').rstrip() self.assertEqual(slot_name, 'pg_probackup_perm_slot', 'Slot name mismatch') - # 5. --perm-slot --temp-slot (PG>=10) - if self.get_version(src_pg) >= self.version_to_num('10.0'): - dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst_5')) - try: - self.catchup_node( - backup_mode = 'FULL', - source_pgdata = src_pg.data_dir, - destination_node = dst_pg, - options = [ - '-d', 'postgres', '-p', str(src_pg.port), '--stream', - '--perm-slot', - '--temp-slot' - ] - ) - self.assertEqual(1, 0, "Expecting Error because conflicting options --perm-slot and --temp-slot used together\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: You cannot specify "--perm-slot" option with the "--temp-slot" option', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) + # 5. --perm-slot --temp-slot + dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst_5')) + try: + self.catchup_node( + backup_mode = 'FULL', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = [ + '-d', 'postgres', '-p', str(src_pg.port), '--stream', + '--perm-slot', + '--temp-slot' + ] + ) + self.assertEqual(1, 0, "Expecting Error because conflicting options --perm-slot and --temp-slot used together\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: You cannot specify "--perm-slot" option with the "--temp-slot" option', + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) #self.assertEqual(1, 0, 'Stop test') self.del_test_dir(module_name, self.fname) diff --git a/tests/false_positive.py b/tests/false_positive.py index a101f8107..9cff54185 100644 --- a/tests/false_positive.py +++ b/tests/false_positive.py @@ -113,9 +113,6 @@ def test_pg_10_waldir(self): """ test group access for PG >= 11 """ - if self.pg_config_version < self.version_to_num('10.0'): - return unittest.skip('You need PostgreSQL >= 10 for this test') - fname = self.id().split('.')[3] wal_dir = os.path.join( os.path.join(self.tmp_path, module_name, fname), 'wal_dir') diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index 0fa252739..8e24dd279 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -547,13 +547,7 @@ def get_md5_per_page_for_fork(self, file, size_in_pages): def get_ptrack_bits_per_page_for_fork(self, node, file, size=[]): - if self.get_pgpro_edition(node) == 'enterprise': - if self.get_version(node) < self.version_to_num('10.0'): - header_size = 48 - else: - header_size = 24 - else: - header_size = 24 + header_size = 24 ptrack_bits_for_fork = [] # TODO: use macro instead of hard coded 8KB @@ -1517,25 +1511,15 @@ def version_to_num(self, version): def switch_wal_segment(self, node): """ - Execute pg_switch_wal/xlog() in given node + Execute pg_switch_wal() in given node Args: node: an instance of PostgresNode or NodeConnection class """ if isinstance(node, testgres.PostgresNode): - if self.version_to_num( - node.safe_psql('postgres', 'show server_version').decode('utf-8') - ) >= self.version_to_num('10.0'): - node.safe_psql('postgres', 'select pg_switch_wal()') - else: - node.safe_psql('postgres', 'select pg_switch_xlog()') + node.safe_psql('postgres', 'select pg_switch_wal()') else: - if self.version_to_num( - node.execute('show server_version')[0][0] - ) >= self.version_to_num('10.0'): - node.execute('select pg_switch_wal()') - else: - node.execute('select pg_switch_xlog()') + node.execute('select pg_switch_wal()') sleep(1) @@ -1545,12 +1529,8 @@ def wait_until_replica_catch_with_master(self, master, replica): 'postgres', 'show server_version').decode('utf-8').rstrip() - if self.version_to_num(version) >= self.version_to_num('10.0'): - master_function = 'pg_catalog.pg_current_wal_lsn()' - replica_function = 'pg_catalog.pg_last_wal_replay_lsn()' - else: - master_function = 'pg_catalog.pg_current_xlog_location()' - replica_function = 'pg_catalog.pg_last_xlog_replay_location()' + master_function = 'pg_catalog.pg_current_wal_lsn()' + replica_function = 'pg_catalog.pg_last_wal_replay_lsn()' lsn = master.safe_psql( 'postgres', diff --git a/tests/incr_restore.py b/tests/incr_restore.py index cb684a23a..b3a2ce4a6 100644 --- a/tests/incr_restore.py +++ b/tests/incr_restore.py @@ -1492,11 +1492,6 @@ def test_make_replica_via_incr_checksum_restore(self): set_replication=True, initdb_params=['--data-checksums']) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', master) self.set_archiving(backup_dir, 'node', master, replica=True) @@ -1565,11 +1560,6 @@ def test_make_replica_via_incr_lsn_restore(self): set_replication=True, initdb_params=['--data-checksums']) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', master) self.set_archiving(backup_dir, 'node', master, replica=True) diff --git a/tests/pgpro2068.py b/tests/pgpro2068.py index 3baa0ba0b..454cac532 100644 --- a/tests/pgpro2068.py +++ b/tests/pgpro2068.py @@ -136,29 +136,7 @@ def test_minrecpoint_on_replica(self): recovery_config, "recovery_target_action = 'pause'") replica.slow_start(replica=True) - if self.get_version(node) < 100000: - script = ''' -DO -$$ -relations = plpy.execute("select class.oid from pg_class class WHERE class.relkind IN ('r', 'i', 't', 'm') and class.relpersistence = 'p'") -current_xlog_lsn = plpy.execute("SELECT min_recovery_end_location as lsn FROM pg_control_recovery()")[0]['lsn'] -plpy.notice('CURRENT LSN: {0}'.format(current_xlog_lsn)) -found_corruption = False -for relation in relations: - pages_from_future = plpy.execute("with number_of_blocks as (select blknum from generate_series(0, pg_relation_size({0}) / 8192 -1) as blknum) select blknum, lsn, checksum, flags, lower, upper, special, pagesize, version, prune_xid from number_of_blocks, page_header(get_raw_page('{0}'::oid::regclass::text, number_of_blocks.blknum::int)) where lsn > '{1}'::pg_lsn".format(relation['oid'], current_xlog_lsn)) - - if pages_from_future.nrows() == 0: - continue - - for page in pages_from_future: - plpy.notice('Found page from future. OID: {0}, BLKNUM: {1}, LSN: {2}'.format(relation['oid'], page['blknum'], page['lsn'])) - found_corruption = True -if found_corruption: - plpy.error('Found Corruption') -$$ LANGUAGE plpython3u; -''' - else: - script = ''' + script = ''' DO $$ relations = plpy.execute("select class.oid from pg_class class WHERE class.relkind IN ('r', 'i', 't', 'm') and class.relpersistence = 'p'") diff --git a/tests/pgpro560.py b/tests/pgpro560.py index 53c7914a2..ffda7b5ee 100644 --- a/tests/pgpro560.py +++ b/tests/pgpro560.py @@ -84,20 +84,12 @@ def test_pgpro560_systemid_mismatch(self): "Expecting Error because of SYSTEM ID mismatch.\n " "Output: {0} \n CMD: {1}".format(repr(self.output), self.cmd)) except ProbackupException as e: - if self.get_version(node1) > 90600: - self.assertTrue( - 'ERROR: Backup data directory was ' - 'initialized for system id' in e.message and - 'but connected instance system id is' in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - else: - self.assertIn( - 'ERROR: System identifier mismatch. ' - 'Connected PostgreSQL instance has system id', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.assertTrue( + 'ERROR: Backup data directory was ' + 'initialized for system id' in e.message and + 'but connected instance system id is' in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) sleep(1) @@ -111,20 +103,12 @@ def test_pgpro560_systemid_mismatch(self): "Expecting Error because of of SYSTEM ID mismatch.\n " "Output: {0} \n CMD: {1}".format(repr(self.output), self.cmd)) except ProbackupException as e: - if self.get_version(node1) > 90600: - self.assertTrue( - 'ERROR: Backup data directory was initialized ' - 'for system id' in e.message and - 'but connected instance system id is' in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - else: - self.assertIn( - 'ERROR: System identifier mismatch. ' - 'Connected PostgreSQL instance has system id', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.assertTrue( + 'ERROR: Backup data directory was initialized ' + 'for system id' in e.message and + 'but connected instance system id is' in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) # Clean after yourself self.del_test_dir(module_name, fname) diff --git a/tests/ptrack.py b/tests/ptrack.py index 08ea90f8d..19df9ff16 100644 --- a/tests/ptrack.py +++ b/tests/ptrack.py @@ -511,114 +511,41 @@ def test_ptrack_unprivileged(self): "postgres", "CREATE DATABASE backupdb") - # PG 9.5 - if self.get_version(node) < 90600: - node.safe_psql( - 'backupdb', - "REVOKE ALL ON DATABASE backupdb from PUBLIC; " - "REVOKE ALL ON SCHEMA public from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " - "CREATE ROLE backup WITH LOGIN REPLICATION; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) - # PG 9.6 - elif self.get_version(node) > 90600 and self.get_version(node) < 100000: - node.safe_psql( - 'backupdb', - "REVOKE ALL ON DATABASE backupdb from PUBLIC; " - "REVOKE ALL ON SCHEMA public from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " - "CREATE ROLE backup WITH LOGIN REPLICATION; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) - # >= 10 - else: - node.safe_psql( - 'backupdb', - "REVOKE ALL ON DATABASE backupdb from PUBLIC; " - "REVOKE ALL ON SCHEMA public from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " - "CREATE ROLE backup WITH LOGIN REPLICATION; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) + # PG >= 10 + node.safe_psql( + 'backupdb', + "REVOKE ALL ON DATABASE backupdb from PUBLIC; " + "REVOKE ALL ON SCHEMA public from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " + "CREATE ROLE backup WITH LOGIN REPLICATION; " + "GRANT CONNECT ON DATABASE backupdb to backup; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + ) node.safe_psql( "backupdb", diff --git a/tests/replica.py b/tests/replica.py index acf655aac..4bcfa6083 100644 --- a/tests/replica.py +++ b/tests/replica.py @@ -28,11 +28,6 @@ def test_replica_switchover(self): set_replication=True, initdb_params=['--data-checksums']) - if self.get_version(node1) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'node1', node1) @@ -105,10 +100,6 @@ def test_replica_stream_ptrack_backup(self): if not self.ptrack: return unittest.skip('Skipped because ptrack support is disabled') - if self.pg_config_version > self.version_to_num('9.6.0'): - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') master = self.make_simple_node( @@ -239,11 +230,6 @@ def test_replica_archive_page_backup(self): 'checkpoint_timeout': '30s', 'max_wal_size': '32MB'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) self.set_archiving(backup_dir, 'master', master) @@ -381,11 +367,6 @@ def test_basic_make_replica_via_restore(self): pg_options={ 'archive_timeout': '10s'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) self.set_archiving(backup_dir, 'master', master) @@ -439,11 +420,6 @@ def test_take_backup_from_delayed_replica(self): initdb_params=['--data-checksums'], pg_options={'archive_timeout': '10s'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) self.set_archiving(backup_dir, 'master', master) @@ -552,11 +528,6 @@ def test_replica_promote(self): 'checkpoint_timeout': '30s', 'max_wal_size': '32MB'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) self.set_archiving(backup_dir, 'master', master) @@ -643,11 +614,6 @@ def test_replica_stop_lsn_null_offset(self): 'checkpoint_timeout': '1h', 'wal_level': 'replica'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', master) self.set_archiving(backup_dir, 'node', master) @@ -728,11 +694,6 @@ def test_replica_stop_lsn_null_offset_next_record(self): 'checkpoint_timeout': '1h', 'wal_level': 'replica'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) self.set_archiving(backup_dir, 'master', master) @@ -830,11 +791,6 @@ def test_archive_replica_null_offset(self): 'checkpoint_timeout': '1h', 'wal_level': 'replica'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', master) self.set_archiving(backup_dir, 'node', master) @@ -914,11 +870,6 @@ def test_archive_replica_not_null_offset(self): 'checkpoint_timeout': '1h', 'wal_level': 'replica'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', master) self.set_archiving(backup_dir, 'node', master) @@ -1003,11 +954,6 @@ def test_replica_toast(self): 'wal_level': 'replica', 'shared_buffers': '128MB'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) self.set_archiving(backup_dir, 'master', master) @@ -1105,11 +1051,6 @@ def test_start_stop_lsn_in_the_same_segno(self): 'wal_level': 'replica', 'shared_buffers': '128MB'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) master.slow_start() @@ -1183,11 +1124,6 @@ def test_replica_promote_1(self): 'checkpoint_timeout': '1h', 'wal_level': 'replica'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) # set replica True, so archive_mode 'always' is used. @@ -1310,11 +1246,6 @@ def test_replica_promote_archive_delta(self): 'checkpoint_timeout': '30s', 'archive_timeout': '30s'}) - if self.get_version(node1) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node1) self.set_config( @@ -1435,11 +1366,6 @@ def test_replica_promote_archive_page(self): 'checkpoint_timeout': '30s', 'archive_timeout': '30s'}) - if self.get_version(node1) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node1) self.set_archiving(backup_dir, 'node', node1) @@ -1557,11 +1483,6 @@ def test_parent_choosing(self): set_replication=True, initdb_params=['--data-checksums']) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) @@ -1708,11 +1629,7 @@ def test_replica_via_basebackup(self): # restore stream backup self.restore_node(backup_dir, 'node', node) - xlog_dir = 'pg_wal' - if self.get_version(node) < 100000: - xlog_dir = 'pg_xlog' - - filepath = os.path.join(node.data_dir, xlog_dir, "00000002.history") + filepath = os.path.join(node.data_dir, 'pg_wal', "00000002.history") self.assertTrue( os.path.exists(filepath), "History file do not exists: {0}".format(filepath)) diff --git a/tests/restore.py b/tests/restore.py index 5a00bc23b..9c300d232 100644 --- a/tests/restore.py +++ b/tests/restore.py @@ -361,10 +361,6 @@ def test_restore_to_lsn_inclusive(self): base_dir=os.path.join(module_name, fname, 'node'), initdb_params=['--data-checksums']) - if self.get_version(node) < self.version_to_num('10.0'): - self.del_test_dir(module_name, fname) - return - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -432,10 +428,6 @@ def test_restore_to_lsn_not_inclusive(self): base_dir=os.path.join(module_name, fname, 'node'), initdb_params=['--data-checksums']) - if self.get_version(node) < self.version_to_num('10.0'): - self.del_test_dir(module_name, fname) - return - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -2146,10 +2138,7 @@ def test_restore_target_new_options(self): with node.connect("postgres") as con: con.execute("INSERT INTO tbl0005 VALUES (1)") con.commit() - if self.get_version(node) > self.version_to_num('10.0'): - res = con.execute("SELECT pg_current_wal_lsn()") - else: - res = con.execute("SELECT pg_current_xlog_location()") + res = con.execute("SELECT pg_current_wal_lsn()") con.commit() con.execute("INSERT INTO tbl0005 VALUES (2)") @@ -2240,33 +2229,32 @@ def test_restore_target_new_options(self): node.slow_start() # Restore with recovery target lsn - if self.get_version(node) >= 100000: - node.cleanup() - self.restore_node( - backup_dir, 'node', node, - options=[ - '--recovery-target-lsn={0}'.format(target_lsn), - "--recovery-target-action=promote", - '--recovery-target-timeline=1', - ]) + node.cleanup() + self.restore_node( + backup_dir, 'node', node, + options=[ + '--recovery-target-lsn={0}'.format(target_lsn), + "--recovery-target-action=promote", + '--recovery-target-timeline=1', + ]) - with open(recovery_conf, 'r') as f: - recovery_conf_content = f.read() + with open(recovery_conf, 'r') as f: + recovery_conf_content = f.read() - self.assertIn( - "recovery_target_lsn = '{0}'".format(target_lsn), - recovery_conf_content) + self.assertIn( + "recovery_target_lsn = '{0}'".format(target_lsn), + recovery_conf_content) - self.assertIn( - "recovery_target_action = 'promote'", - recovery_conf_content) + self.assertIn( + "recovery_target_action = 'promote'", + recovery_conf_content) - self.assertIn( - "recovery_target_timeline = '1'", - recovery_conf_content) + self.assertIn( + "recovery_target_timeline = '1'", + recovery_conf_content) - node.slow_start() + node.slow_start() # Clean after yourself self.del_test_dir(module_name, fname) @@ -3197,117 +3185,42 @@ def test_missing_database_map(self): "postgres", "CREATE DATABASE backupdb") - # PG 9.5 - if self.get_version(node) < 90600: - node.safe_psql( - 'backupdb', - "REVOKE ALL ON DATABASE backupdb from PUBLIC; " - "REVOKE ALL ON SCHEMA public from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " - "CREATE ROLE backup WITH LOGIN REPLICATION; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) - # PG 9.6 - elif self.get_version(node) > 90600 and self.get_version(node) < 100000: - node.safe_psql( - 'backupdb', - "REVOKE ALL ON DATABASE backupdb from PUBLIC; " - "REVOKE ALL ON SCHEMA public from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " - "CREATE ROLE backup WITH LOGIN REPLICATION; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) - # >= 10 - else: - node.safe_psql( - 'backupdb', - "REVOKE ALL ON DATABASE backupdb from PUBLIC; " - "REVOKE ALL ON SCHEMA public from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " - "CREATE ROLE backup WITH LOGIN REPLICATION; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) + # PG >= 10 + node.safe_psql( + 'backupdb', + "REVOKE ALL ON DATABASE backupdb from PUBLIC; " + "REVOKE ALL ON SCHEMA public from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " + "CREATE ROLE backup WITH LOGIN REPLICATION; " + "GRANT CONNECT ON DATABASE backupdb to backup; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + ) if self.ptrack: # TODO why backup works without these grants ? diff --git a/tests/retention.py b/tests/retention.py index b0399a239..7bfff6b28 100644 --- a/tests/retention.py +++ b/tests/retention.py @@ -1575,11 +1575,6 @@ def test_window_error_backups_2(self): self.show_pb(backup_dir, 'node')[1]['id'] - if self.get_version(node) < 90600: - node.safe_psql( - 'postgres', - 'SELECT pg_catalog.pg_stop_backup()') - # Take DELTA backup self.backup_node( backup_dir, 'node', node, backup_type='delta', @@ -1599,10 +1594,6 @@ def test_retention_redundancy_overlapping_chains(self): base_dir=os.path.join(module_name, fname, 'node'), initdb_params=['--data-checksums']) - if self.get_version(node) < 90600: - self.del_test_dir(module_name, fname) - return unittest.skip('Skipped because ptrack support is disabled') - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -1649,10 +1640,6 @@ def test_retention_redundancy_overlapping_chains_1(self): base_dir=os.path.join(module_name, fname, 'node'), initdb_params=['--data-checksums']) - if self.get_version(node) < 90600: - self.del_test_dir(module_name, fname) - return unittest.skip('Skipped because ptrack support is disabled') - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) diff --git a/tests/validate.py b/tests/validate.py index 22a03c3be..7cdc0e92e 100644 --- a/tests/validate.py +++ b/tests/validate.py @@ -1757,14 +1757,9 @@ def test_validate_corrupt_wal_between_backups(self): con.commit() target_xid = res[0][0] - if self.get_version(node) < self.version_to_num('10.0'): - walfile = node.safe_psql( - 'postgres', - 'select pg_xlogfile_name(pg_current_xlog_location())').decode('utf-8').rstrip() - else: - walfile = node.safe_psql( - 'postgres', - 'select pg_walfile_name(pg_current_wal_lsn())').decode('utf-8').rstrip() + walfile = node.safe_psql( + 'postgres', + 'select pg_walfile_name(pg_current_wal_lsn())').decode('utf-8').rstrip() if self.archive_compress: walfile = walfile + '.gz' @@ -3506,12 +3501,8 @@ def test_corrupt_pg_control_via_resetxlog(self): backup_id = self.backup_node(backup_dir, 'node', node) - if self.get_version(node) < 100000: - pg_resetxlog_path = self.get_bin_path('pg_resetxlog') - wal_dir = 'pg_xlog' - else: - pg_resetxlog_path = self.get_bin_path('pg_resetwal') - wal_dir = 'pg_wal' + pg_resetxlog_path = self.get_bin_path('pg_resetwal') + wal_dir = 'pg_wal' os.mkdir( os.path.join( From 5ed469d500969be554bdc906ecfd3cb368d8372d Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Wed, 20 Jul 2022 03:08:01 +0300 Subject: [PATCH 03/17] [PBCKP-232] remove depricated options (master-db, master-host, master-port, master-user, replica-timeout) part 1 --- src/configure.c | 54 ------------------------ src/help.c | 16 +------ src/pg_probackup.h | 3 -- tests/archive.py | 9 ---- tests/ptrack.py | 102 +++++++-------------------------------------- tests/replica.py | 24 ++--------- 6 files changed, 21 insertions(+), 187 deletions(-) diff --git a/src/configure.c b/src/configure.c index 3871aa8b9..47433346f 100644 --- a/src/configure.c +++ b/src/configure.c @@ -90,32 +90,6 @@ ConfigOption instance_options[] = &instance_config.conn_opt.pguser, SOURCE_CMD, 0, OPTION_CONN_GROUP, 0, option_get_value }, - /* Replica options */ - { - 's', 202, "master-db", - &instance_config.master_conn_opt.pgdatabase, SOURCE_CMD, 0, - OPTION_REPLICA_GROUP, 0, option_get_value - }, - { - 's', 203, "master-host", - &instance_config.master_conn_opt.pghost, SOURCE_CMD, 0, - OPTION_REPLICA_GROUP, 0, option_get_value - }, - { - 's', 204, "master-port", - &instance_config.master_conn_opt.pgport, SOURCE_CMD, 0, - OPTION_REPLICA_GROUP, 0, option_get_value - }, - { - 's', 205, "master-user", - &instance_config.master_conn_opt.pguser, SOURCE_CMD, 0, - OPTION_REPLICA_GROUP, 0, option_get_value - }, - { - 'u', 206, "replica-timeout", - &instance_config.replica_timeout, SOURCE_CMD, SOURCE_DEFAULT, - OPTION_REPLICA_GROUP, OPTION_UNIT_S, option_get_value - }, /* Archive options */ { 'u', 207, "archive-timeout", @@ -362,8 +336,6 @@ init_config(InstanceConfig *config, const char *instance_name) config->xlog_seg_size = XLOG_SEG_SIZE; #endif - config->replica_timeout = REPLICA_TIMEOUT_DEFAULT; - config->archive_timeout = ARCHIVE_TIMEOUT_DEFAULT; /* Copy logger defaults */ @@ -437,32 +409,6 @@ readInstanceConfigFile(InstanceState *instanceState) &instance->conn_opt.pguser, SOURCE_CMD, 0, OPTION_CONN_GROUP, 0, option_get_value }, - /* Replica options */ - { - 's', 202, "master-db", - &instance->master_conn_opt.pgdatabase, SOURCE_CMD, 0, - OPTION_REPLICA_GROUP, 0, option_get_value - }, - { - 's', 203, "master-host", - &instance->master_conn_opt.pghost, SOURCE_CMD, 0, - OPTION_REPLICA_GROUP, 0, option_get_value - }, - { - 's', 204, "master-port", - &instance->master_conn_opt.pgport, SOURCE_CMD, 0, - OPTION_REPLICA_GROUP, 0, option_get_value - }, - { - 's', 205, "master-user", - &instance->master_conn_opt.pguser, SOURCE_CMD, 0, - OPTION_REPLICA_GROUP, 0, option_get_value - }, - { - 'u', 206, "replica-timeout", - &instance->replica_timeout, SOURCE_CMD, SOURCE_DEFAULT, - OPTION_REPLICA_GROUP, OPTION_UNIT_S, option_get_value - }, /* Archive options */ { 'u', 207, "archive-timeout", diff --git a/src/help.c b/src/help.c index b22fa912e..14ed38bc8 100644 --- a/src/help.c +++ b/src/help.c @@ -2,7 +2,7 @@ * * help.c * - * Copyright (c) 2017-2021, Postgres Professional + * Copyright (c) 2017-2022, Postgres Professional * *------------------------------------------------------------------------- */ @@ -416,13 +416,6 @@ help_backup(void) printf(_(" --remote-user=username user name for ssh connection (default: current user)\n")); printf(_(" --ssh-options=ssh_options additional ssh options (default: none)\n")); printf(_(" (example: --ssh-options='-c cipher_spec -F configfile')\n")); - - printf(_("\n Replica options:\n")); - printf(_(" --master-user=user_name user name to connect to master (deprecated)\n")); - printf(_(" --master-db=db_name database to connect to master (deprecated)\n")); - printf(_(" --master-host=host_name database server host of master (deprecated)\n")); - printf(_(" --master-port=port database server port of master (deprecated)\n")); - printf(_(" --replica-timeout=timeout wait timeout for WAL segment streaming through replication (deprecated)\n\n")); } static void @@ -878,13 +871,6 @@ help_set_config(void) printf(_(" --archive-host=destination address or hostname for ssh connection to archive host\n")); printf(_(" --archive-port=port port for ssh connection to archive host (default: 22)\n")); printf(_(" --archive-user=username user name for ssh connection to archive host (default: PostgreSQL user)\n")); - - printf(_("\n Replica options:\n")); - printf(_(" --master-user=user_name user name to connect to master (deprecated)\n")); - printf(_(" --master-db=db_name database to connect to master (deprecated)\n")); - printf(_(" --master-host=host_name database server host of master (deprecated)\n")); - printf(_(" --master-port=port database server port of master (deprecated)\n")); - printf(_(" --replica-timeout=timeout wait timeout for WAL segment streaming through replication (deprecated)\n\n")); } static void diff --git a/src/pg_probackup.h b/src/pg_probackup.h index eb051065b..8e9d1568f 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -378,9 +378,6 @@ typedef struct InstanceConfig char *external_dir_str; ConnectionOptions conn_opt; - ConnectionOptions master_conn_opt; - - uint32 replica_timeout; //Deprecated. Not used anywhere /* Wait timeout for WAL segment archiving */ uint32 archive_timeout; diff --git a/tests/archive.py b/tests/archive.py index fe3d89b17..be5e33fbc 100644 --- a/tests/archive.py +++ b/tests/archive.py @@ -725,9 +725,6 @@ def test_replica_archive(self): backup_dir, 'replica', replica, options=[ '--archive-timeout=30', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port), '--stream']) self.validate_pb(backup_dir, 'replica') @@ -764,9 +761,6 @@ def test_replica_archive(self): replica, backup_type='page', options=[ '--archive-timeout=60', - '--master-db=postgres', - '--master-host=localhost', - '--master-port={0}'.format(master.port), '--stream']) self.validate_pb(backup_dir, 'replica') @@ -857,9 +851,6 @@ def test_master_and_replica_parallel_archiving(self): backup_dir, 'replica', replica, options=[ '--archive-timeout=30', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port), '--stream']) self.validate_pb(backup_dir, 'replica') diff --git a/tests/ptrack.py b/tests/ptrack.py index 19df9ff16..7dec55cc7 100644 --- a/tests/ptrack.py +++ b/tests/ptrack.py @@ -1560,13 +1560,7 @@ def test_create_db_on_replica(self): self.backup_node( backup_dir, 'replica', replica, - options=[ - '-j10', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(node.port), - '--stream' - ] + options=['-j10', '--stream'] ) # CREATE DATABASE DB1 @@ -1584,13 +1578,7 @@ def test_create_db_on_replica(self): backup_id = self.backup_node( backup_dir, 'replica', replica, backup_type='ptrack', - options=[ - '-j10', - '--stream', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(node.port) - ] + options=['-j10', '--stream'] ) if self.paranoia: @@ -2304,11 +2292,7 @@ def test_ptrack_clean_replica(self): backup_dir, 'replica', replica, - options=[ - '-j10', '--stream', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port)]) + options=['-j10', '--stream']) master.safe_psql('postgres', 'checkpoint') for i in idx_ptrack: @@ -2335,11 +2319,7 @@ def test_ptrack_clean_replica(self): 'replica', replica, backup_type='ptrack', - options=[ - '-j10', '--stream', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port)]) + options=['-j10', '--stream']) master.safe_psql('postgres', 'checkpoint') for i in idx_ptrack: @@ -2367,11 +2347,7 @@ def test_ptrack_clean_replica(self): 'replica', replica, backup_type='page', - options=[ - '-j10', '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port), - '--stream']) + options=['-j10', '--stream']) master.safe_psql('postgres', 'checkpoint') for i in idx_ptrack: @@ -2437,8 +2413,7 @@ def test_ptrack_cluster_on_btree(self): idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork( idx_ptrack[i]['path'], idx_ptrack[i]['old_size']) - self.backup_node( - backup_dir, 'node', node, options=['-j10', '--stream']) + self.backup_node(backup_dir, 'node', node, options=['-j10', '--stream']) node.safe_psql('postgres', 'delete from t_heap where id%2 = 1') node.safe_psql('postgres', 'cluster t_heap using t_btree') @@ -2573,11 +2548,7 @@ def test_ptrack_cluster_on_btree_replica(self): master.safe_psql('postgres', 'vacuum t_heap') master.safe_psql('postgres', 'checkpoint') - self.backup_node( - backup_dir, 'replica', replica, options=[ - '-j10', '--stream', '--master-host=localhost', - '--master-db=postgres', '--master-port={0}'.format( - master.port)]) + self.backup_node(backup_dir, 'replica', replica, options=['-j10', '--stream']) for i in idx_ptrack: # get size of heap and indexes. size calculated in pages @@ -2674,9 +2645,7 @@ def test_ptrack_cluster_on_gist_replica(self): self.backup_node( backup_dir, 'replica', replica, options=[ - '-j10', '--stream', '--master-host=localhost', - '--master-db=postgres', '--master-port={0}'.format( - master.port)]) + '-j10', '--stream']) for i in idx_ptrack: # get size of heap and indexes. size calculated in pages @@ -2844,11 +2813,7 @@ def test_ptrack_empty_replica(self): backup_dir, 'replica', replica, - options=[ - '-j10', '--stream', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port)]) + options=['-j10', '--stream']) # Create indexes for i in idx_ptrack: @@ -2868,11 +2833,7 @@ def test_ptrack_empty_replica(self): 'replica', replica, backup_type='ptrack', - options=[ - '-j1', '--stream', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port)]) + options=['-j1', '--stream']) if self.paranoia: pgdata = self.pgdata_content(replica.data_dir) @@ -3041,12 +3002,7 @@ def test_basic_ptrack_truncate_replica(self): # Make backup to clean every ptrack self.backup_node( backup_dir, 'replica', replica, - options=[ - '-j10', - '--stream', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port)]) + options=['-j10', '--stream']) if replica.major_version < 11: for i in idx_ptrack: @@ -3070,12 +3026,7 @@ def test_basic_ptrack_truncate_replica(self): self.backup_node( backup_dir, 'replica', replica, backup_type='ptrack', - options=[ - '-j10', - '--stream', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port)]) + options=['-j10', '--stream']) pgdata = self.pgdata_content(replica.data_dir) @@ -3245,12 +3196,7 @@ def test_ptrack_vacuum_replica(self): replica.safe_psql('postgres', 'checkpoint') # Make FULL backup to clean every ptrack - self.backup_node( - backup_dir, 'replica', replica, options=[ - '-j10', '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port), - '--stream']) + self.backup_node(backup_dir, 'replica', replica, options=['-j10', '--stream']) if replica.major_version < 11: for i in idx_ptrack: @@ -3430,12 +3376,7 @@ def test_ptrack_vacuum_bits_frozen_replica(self): # Take backup to clean every ptrack self.backup_node( backup_dir, 'replica', replica, - options=[ - '-j10', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port), - '--stream']) + options=['-j10', '--stream']) if replica.major_version < 11: for i in idx_ptrack: @@ -3688,12 +3629,7 @@ def test_ptrack_vacuum_full_replica(self): # Take FULL backup to clean every ptrack self.backup_node( backup_dir, 'replica', replica, - options=[ - '-j10', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port), - '--stream']) + options=['-j10', '--stream']) if replica.major_version < 11: for i in idx_ptrack: @@ -3860,13 +3796,7 @@ def test_ptrack_vacuum_truncate_replica(self): # Take FULL backup to clean every ptrack self.backup_node( backup_dir, 'replica', replica, - options=[ - '-j10', - '--stream', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port) - ] + options=['-j10', '--stream'] ) if master.major_version < 11: diff --git a/tests/replica.py b/tests/replica.py index 4bcfa6083..4fe009062 100644 --- a/tests/replica.py +++ b/tests/replica.py @@ -152,11 +152,7 @@ def test_replica_stream_ptrack_backup(self): backup_id = self.backup_node( backup_dir, 'replica', replica, - options=[ - '--stream', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port)]) + options=['--stream']) self.validate_pb(backup_dir, 'replica') self.assertEqual( 'OK', self.show_pb(backup_dir, 'replica', backup_id)['status']) @@ -188,11 +184,7 @@ def test_replica_stream_ptrack_backup(self): backup_id = self.backup_node( backup_dir, 'replica', replica, backup_type='ptrack', - options=[ - '--stream', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port)]) + options=['--stream']) self.validate_pb(backup_dir, 'replica') self.assertEqual( 'OK', self.show_pb(backup_dir, 'replica', backup_id)['status']) @@ -279,11 +271,7 @@ def test_replica_archive_page_backup(self): backup_id = self.backup_node( backup_dir, 'replica', replica, - options=[ - '--archive-timeout=60', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port)]) + options=['--archive-timeout=60']) self.validate_pb(backup_dir, 'replica') self.assertEqual( @@ -315,11 +303,7 @@ def test_replica_archive_page_backup(self): backup_id = self.backup_node( backup_dir, 'replica', replica, backup_type='page', - options=[ - '--archive-timeout=60', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port)]) + options=['--archive-timeout=60']) pgbench.wait() From a880b9165b4e0f89b0ae798f2b15aafadcb02a0b Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Wed, 20 Jul 2022 03:13:35 +0300 Subject: [PATCH 04/17] [PBCKP-232] makefile simplification --- Makefile | 26 ++++++-------------------- get_pg_version.mk | 36 ------------------------------------ 2 files changed, 6 insertions(+), 56 deletions(-) delete mode 100644 get_pg_version.mk diff --git a/Makefile b/Makefile index 3753d9cb7..a1b1ebed3 100644 --- a/Makefile +++ b/Makefile @@ -17,21 +17,17 @@ # git clone https://github.com/postgrespro/pg_probackup postgresql/contrib/pg_probackup # cd postgresql # ./configure ... && make -# make --no-print-directory -C contrib/pg_probackup +# make -C contrib/pg_probackup # # 4. out of PG source and without PGXS # git clone https://git.postgresql.org/git/postgresql.git postgresql-src # git clone https://github.com/postgrespro/pg_probackup postgresql-src/contrib/pg_probackup # mkdir postgresql-build && cd postgresql-build # ../postgresql-src/configure ... && make -# make --no-print-directory -C contrib/pg_probackup +# make -C contrib/pg_probackup # top_pbk_srcdir := $(dir $(realpath $(firstword $(MAKEFILE_LIST)))) -# get postgres version -PG_MAJORVER != $(MAKE) USE_PGXS=$(USE_PGXS) PG_CONFIG=$(PG_CONFIG) --silent --makefile=$(top_pbk_srcdir)get_pg_version.mk -#$(info Making with PG_MAJORVER=$(PG_MAJORVER)) - PROGRAM := pg_probackup # pg_probackup sources @@ -47,18 +43,14 @@ OBJS += src/archive.o src/backup.o src/catalog.o src/checkdb.o src/configure.o s BORROWED_H_SRC := \ src/include/portability/instr_time.h \ src/bin/pg_basebackup/receivelog.h \ - src/bin/pg_basebackup/streamutil.h + src/bin/pg_basebackup/streamutil.h \ + src/bin/pg_basebackup/walmethods.h BORROWED_C_SRC := \ src/backend/access/transam/xlogreader.c \ src/backend/utils/hash/pg_crc.c \ src/bin/pg_basebackup/receivelog.c \ - src/bin/pg_basebackup/streamutil.c -ifneq ($(PG_MAJORVER), $(findstring $(PG_MAJORVER), 9.5 9.6)) -BORROWED_H_SRC += \ - src/bin/pg_basebackup/walmethods.h -BORROWED_C_SRC += \ + src/bin/pg_basebackup/streamutil.c \ src/bin/pg_basebackup/walmethods.c -endif BORROW_DIR := src/borrowed BORROWED_H := $(addprefix $(BORROW_DIR)/, $(notdir $(BORROWED_H_SRC))) @@ -84,9 +76,6 @@ include $(top_builddir)/src/Makefile.global include $(top_srcdir)/contrib/contrib-global.mk endif -# now we can use standard MAJORVERSION variable instead of calculated PG_MAJORVER -undefine PG_MAJORVER - # PG_CPPFLAGS = -I$(libpq_srcdir) ${PTHREAD_CFLAGS} -I$(top_pbk_srcdir)src -I$(BORROW_DIR) PG_CPPFLAGS += -I$(top_pbk_srcdir)src/fu_util -Wno-declaration-after-statement @@ -99,11 +88,8 @@ PG_LIBS_INTERNAL = $(libpq_pgport) ${PTHREAD_CFLAGS} # additional dependencies on borrowed files src/archive.o: $(BORROW_DIR)/instr_time.h src/backup.o src/catchup.o src/pg_probackup.o: $(BORROW_DIR)/streamutil.h -src/stream.o $(BORROW_DIR)/receivelog.o $(BORROW_DIR)/streamutil.o: $(BORROW_DIR)/receivelog.h -ifneq ($(MAJORVERSION), $(findstring $(MAJORVERSION), 9.5 9.6)) +src/stream.o $(BORROW_DIR)/receivelog.o $(BORROW_DIR)/streamutil.o $(BORROW_DIR)/walmethods.o: $(BORROW_DIR)/receivelog.h $(BORROW_DIR)/receivelog.h: $(BORROW_DIR)/walmethods.h -$(BORROW_DIR)/walmethods.o: $(BORROW_DIR)/receivelog.h -endif # generate separate makefile to handle borrowed files borrowed.mk: $(firstword $(MAKEFILE_LIST)) diff --git a/get_pg_version.mk b/get_pg_version.mk deleted file mode 100644 index d5468c5bb..000000000 --- a/get_pg_version.mk +++ /dev/null @@ -1,36 +0,0 @@ -# pg_probackup build system -# -# When building pg_probackup, there is a chicken and egg problem: -# 1. We have to define the OBJS list before including the PG makefiles. -# 2. To define this list, we need to know the PG major version. -# 3. But we can find out the postgres version only after including makefiles. -# -# This minimal makefile solves this problem, its only purpose is to -# calculate the version number from which the main build will occur next. -# -# Usage: -# include this line into main makefile -# PG_MAJORVER != $(MAKE) USE_PGXS=$(USE_PGXS) PG_CONFIG=$(PG_CONFIG) --silent --makefile=get_pg_version.mk -# -# Known issues: -# When parent make called with -C and without --no-print-directory, then -# 'make: Leaving directory ...' string will be added (by caller make process) to PG_MAJORVER -# (at least with GNU Make 4.2.1) -# -.PHONY: get_pg_version -get_pg_version: - -ifdef USE_PGXS -PG_CONFIG = pg_config -PGXS := $(shell $(PG_CONFIG) --pgxs) -include $(PGXS) -else -subdir = contrib/pg_probackup -top_builddir = ../.. -include $(top_builddir)/src/Makefile.global -include $(top_srcdir)/contrib/contrib-global.mk -endif - -get_pg_version: - $(info $(MAJORVERSION)) - From 249876ad2b6a1b74e175ae4585e1c99fc29f3378 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Fri, 26 Aug 2022 17:16:38 +0300 Subject: [PATCH 05/17] [PBCKP-232] remove deprecated options (master-db, master-host, master-port, master-user, replica-timeout) part 2 --- src/configure.c | 25 +++++++++++++++++++++- tests/compatibility.py | 48 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 72 insertions(+), 1 deletion(-) diff --git a/src/configure.c b/src/configure.c index 47433346f..b828a30e5 100644 --- a/src/configure.c +++ b/src/configure.c @@ -34,13 +34,15 @@ static void show_configure_json(ConfigOption *opt); #define OPTION_INSTANCE_GROUP "Backup instance information" #define OPTION_CONN_GROUP "Connection parameters" -#define OPTION_REPLICA_GROUP "Replica parameters" #define OPTION_ARCHIVE_GROUP "Archive parameters" #define OPTION_LOG_GROUP "Logging parameters" #define OPTION_RETENTION_GROUP "Retention parameters" #define OPTION_COMPRESS_GROUP "Compression parameters" #define OPTION_REMOTE_GROUP "Remote access parameters" +/* dummy placeholder for obsolete options to store in following instance_options[] */ +static char *obsolete_option_placeholder = NULL; + /* * Short name should be non-printable ASCII character. */ @@ -90,6 +92,27 @@ ConfigOption instance_options[] = &instance_config.conn_opt.pguser, SOURCE_CMD, 0, OPTION_CONN_GROUP, 0, option_get_value }, + /* Obsolete options */ + { + 's', 202, "master-db", + &obsolete_option_placeholder, SOURCE_FILE_STRICT, SOURCE_CONST, "", 0, option_get_value + }, + { + 's', 203, "master-host", + &obsolete_option_placeholder, SOURCE_FILE_STRICT, SOURCE_CONST, "", 0, option_get_value + }, + { + 's', 204, "master-port", + &obsolete_option_placeholder, SOURCE_FILE_STRICT, SOURCE_CONST, "", 0, option_get_value + }, + { + 's', 205, "master-user", + &obsolete_option_placeholder, SOURCE_FILE_STRICT, SOURCE_CONST, "", 0, option_get_value + }, + { + 's', 206, "replica-timeout", + &obsolete_option_placeholder, SOURCE_FILE_STRICT, SOURCE_CONST, "", 0, option_get_value + }, /* Archive options */ { 'u', 207, "archive-timeout", diff --git a/tests/compatibility.py b/tests/compatibility.py index e274c22be..e3aab15e0 100644 --- a/tests/compatibility.py +++ b/tests/compatibility.py @@ -1482,3 +1482,51 @@ def test_compatibility_tablespace(self): # Clean after yourself self.del_test_dir(module_name, fname) + + # @unittest.skip("skip") + def test_compatibility_master_options(self): + """ + Test correctness of handling of removed master-db, master-host, master-port, + master-user and replica-timeout options + """ + self.assertTrue( + self.version_to_num(self.old_probackup_version) <= self.version_to_num('2.6.0'), + 'You need pg_probackup old_binary =< 2.6.0 for this test') + + fname = self.id().split('.')[3] + node = self.make_simple_node(base_dir=os.path.join(module_name, fname, 'node')) + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + + self.init_pb(backup_dir, old_binary=True) + self.add_instance(backup_dir, 'node', node, old_binary=True) + + # add deprecated options (using probackup< 2.6) into pg_probackup.conf + # don't care about option values, we can use random values here + self.set_config( + backup_dir, 'node', + options=[ + '--master-db=postgres', + '--master-host=localhost', + '--master-port=5432', + '--master-user={0}'.format(self.user), + '--replica-timeout=100500'], + old_binary=True) + + # and try to show config with new binary (those options must be silently skipped) + self.show_config(backup_dir, 'node', old_binary=False) + + # store config with new version (those options must disappear from config) + self.set_config( + backup_dir, 'node', + options=[], + old_binary=False) + + # and check absence + config_options = self.show_config(backup_dir, 'node', old_binary=False) + self.assertFalse( + ['master-db', 'master-host', 'master-port', 'master-user', 'replica-timeout'] & config_options.keys(), + 'Obsolete options found in new config') + + # Clean after yourself + self.del_test_dir(module_name, fname) + From dcf52ee5c826216b994ab4d83ff6fe4483656000 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Fri, 26 Aug 2022 19:52:27 +0300 Subject: [PATCH 06/17] [PBCKP-230] remove instr_time.h from borrowed filelist --- Makefile | 2 -- src/archive.c | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/Makefile b/Makefile index 2b663f59b..e9ed487cc 100644 --- a/Makefile +++ b/Makefile @@ -40,7 +40,6 @@ OBJS += src/archive.o src/backup.o src/catalog.o src/checkdb.o src/configure.o s # sources borrowed from postgresql (paths are relative to pg top dir) BORROWED_H_SRC := \ - src/include/portability/instr_time.h \ src/bin/pg_basebackup/receivelog.h \ src/bin/pg_basebackup/streamutil.h \ src/bin/pg_basebackup/walmethods.h @@ -87,7 +86,6 @@ override CPPFLAGS := -DFRONTEND $(CPPFLAGS) $(PG_CPPFLAGS) PG_LIBS_INTERNAL = $(libpq_pgport) ${PTHREAD_CFLAGS} # additional dependencies on borrowed files -src/archive.o: $(BORROW_DIR)/instr_time.h src/backup.o src/catchup.o src/pg_probackup.o: $(BORROW_DIR)/streamutil.h src/stream.o $(BORROW_DIR)/receivelog.o $(BORROW_DIR)/streamutil.o $(BORROW_DIR)/walmethods.o: $(BORROW_DIR)/receivelog.h $(BORROW_DIR)/receivelog.h: $(BORROW_DIR)/walmethods.h diff --git a/src/archive.c b/src/archive.c index 0ebe5e504..b552689cd 100644 --- a/src/archive.c +++ b/src/archive.c @@ -11,7 +11,7 @@ #include #include "pg_probackup.h" #include "utils/thread.h" -#include "instr_time.h" +#include "portability/instr_time.h" static int push_file_internal(const char *wal_file_name, const char *pg_xlog_dir, From 491452ae614d144a71c352de51b3efeccc1ce493 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Sat, 27 Aug 2022 13:33:42 +0300 Subject: [PATCH 07/17] [PBCKP-256] remove src/backend/utils/hash/pg_crc.c borrowing for this * use of crc macros has been split * places where _TRADITIONAL_CRC32 and _CRC32C are used are explicitly marked * added two files to ensure compatibility with pg-11 * at the end of pg-11 support, these new files and places where _TRADITIONAL_CRC32 is used can be deleted --- Makefile | 4 +- src/backup.c | 2 +- src/catalog.c | 12 ++--- src/compatibility/pg-11.c | 98 +++++++++++++++++++++++++++++++++++ src/compatibility/pg-11.h | 67 ++++++++++++++++++++++++ src/data.c | 54 +++++++++++--------- src/dir.c | 104 ++++++++++++++++++++++++++++---------- src/pg_probackup.h | 29 ++--------- src/restore.c | 8 --- src/stream.c | 8 +-- src/util.c | 2 +- src/utils/file.c | 18 +++---- src/utils/pgut.h | 3 +- src/validate.c | 35 +++++++++---- 14 files changed, 325 insertions(+), 119 deletions(-) create mode 100644 src/compatibility/pg-11.c create mode 100644 src/compatibility/pg-11.h diff --git a/Makefile b/Makefile index e9ed487cc..5c50c6d5e 100644 --- a/Makefile +++ b/Makefile @@ -36,7 +36,8 @@ OBJS := src/utils/configuration.o src/utils/json.o src/utils/logger.o \ OBJS += src/archive.o src/backup.o src/catalog.o src/checkdb.o src/configure.o src/data.o \ src/delete.o src/dir.o src/fetch.o src/help.o src/init.o src/merge.o \ src/parsexlog.o src/ptrack.o src/pg_probackup.o src/restore.o src/show.o src/stream.o \ - src/util.o src/validate.o src/datapagemap.o src/catchup.o + src/util.o src/validate.o src/datapagemap.o src/catchup.o \ + src/compatibility/pg-11.o # sources borrowed from postgresql (paths are relative to pg top dir) BORROWED_H_SRC := \ @@ -45,7 +46,6 @@ BORROWED_H_SRC := \ src/bin/pg_basebackup/walmethods.h BORROWED_C_SRC := \ src/backend/access/transam/xlogreader.c \ - src/backend/utils/hash/pg_crc.c \ src/bin/pg_basebackup/receivelog.c \ src/bin/pg_basebackup/streamutil.c \ src/bin/pg_basebackup/walmethods.c diff --git a/src/backup.c b/src/backup.c index 15f1a4d1c..b98935b8e 100644 --- a/src/backup.c +++ b/src/backup.c @@ -1725,7 +1725,7 @@ pg_stop_backup_write_file_helper(const char *path, const char *filename, const c if (S_ISREG(file->mode)) { - file->crc = pgFileGetCRC(full_filename, true, false); + file->crc = pgFileGetCRC32C(full_filename, false); file->write_size = file->size; file->uncompressed_size = file->size; diff --git a/src/catalog.c b/src/catalog.c index b93564f7e..4e132438e 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -1067,7 +1067,7 @@ get_backup_filelist(pgBackup *backup, bool strict) files = parray_new(); - INIT_FILE_CRC32(true, content_crc); + INIT_CRC32C(content_crc); while (fgets(buf, lengthof(buf), fp)) { @@ -1089,7 +1089,7 @@ get_backup_filelist(pgBackup *backup, bool strict) hdr_size; pgFile *file; - COMP_FILE_CRC32(true, content_crc, buf, strlen(buf)); + COMP_CRC32C(content_crc, buf, strlen(buf)); get_control_value_str(buf, "path", path, sizeof(path),true); get_control_value_int64(buf, "size", &write_size, true); @@ -1141,7 +1141,7 @@ get_backup_filelist(pgBackup *backup, bool strict) parray_append(files, file); } - FIN_FILE_CRC32(true, content_crc); + FIN_CRC32C(content_crc); if (ferror(fp)) elog(ERROR, "Failed to read from file: \"%s\"", backup_filelist_path); @@ -2538,7 +2538,7 @@ write_backup_filelist(pgBackup *backup, parray *files, const char *root, setvbuf(out, buf, _IOFBF, BUFFERSZ); if (sync) - INIT_FILE_CRC32(true, backup->content_crc); + INIT_CRC32C(backup->content_crc); /* print each file in the list */ for (i = 0; i < parray_num(files); i++) @@ -2606,13 +2606,13 @@ write_backup_filelist(pgBackup *backup, parray *files, const char *root, sprintf(line+len, "}\n"); if (sync) - COMP_FILE_CRC32(true, backup->content_crc, line, strlen(line)); + COMP_CRC32C(backup->content_crc, line, strlen(line)); fprintf(out, "%s", line); } if (sync) - FIN_FILE_CRC32(true, backup->content_crc); + FIN_CRC32C(backup->content_crc); if (fflush(out) != 0) elog(ERROR, "Cannot flush file list \"%s\": %s", diff --git a/src/compatibility/pg-11.c b/src/compatibility/pg-11.c new file mode 100644 index 000000000..52f4b551c --- /dev/null +++ b/src/compatibility/pg-11.c @@ -0,0 +1,98 @@ +/*------------------------------------------------------------------------- + * + * pg-11.c + * PostgreSQL <= 11 compatibility + * + * Portions Copyright (c) 2022, Postgres Professional + * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + *------------------------------------------------------------------------- + */ + +#include + +#if PG_VERSION_NUM < 120000 + +#include "c.h" +#include "utils/pg_crc.h" + +/* From postgresql src/backend/utils/hash/pg_crc.c */ + +/* + * Lookup table for calculating CRC-32 using Sarwate's algorithm. + * + * This table is based on the polynomial + * x^32+x^26+x^23+x^22+x^16+x^12+x^11+x^10+x^8+x^7+x^5+x^4+x^2+x+1. + * (This is the same polynomial used in Ethernet checksums, for instance.) + * Using Williams' terms, this is the "normal", not "reflected" version. + */ + +const uint32 pg_crc32_table[256] = { + 0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA, + 0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3, + 0x0EDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988, + 0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91, + 0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE, + 0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7, + 0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC, + 0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5, + 0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172, + 0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B, + 0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940, + 0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59, + 0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116, + 0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F, + 0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924, + 0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D, + 0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A, + 0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433, + 0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818, + 0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01, + 0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E, + 0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457, + 0x65B0D9C6, 0x12B7E950, 0x8BBEB8EA, 0xFCB9887C, + 0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65, + 0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2, + 0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB, + 0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0, + 0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9, + 0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086, + 0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F, + 0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4, + 0x59B33D17, 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD, + 0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A, + 0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683, + 0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8, + 0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1, + 0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE, + 0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7, + 0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC, + 0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5, + 0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252, + 0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B, + 0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60, + 0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79, + 0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236, + 0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F, + 0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04, + 0xC2D7FFA7, 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D, + 0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A, + 0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713, + 0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38, + 0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21, + 0x86D3D2D4, 0xF1D4E242, 0x68DDB3F8, 0x1FDA836E, + 0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777, + 0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C, + 0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45, + 0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2, + 0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB, + 0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0, + 0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9, + 0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6, + 0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF, + 0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94, + 0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D +}; + +#endif diff --git a/src/compatibility/pg-11.h b/src/compatibility/pg-11.h new file mode 100644 index 000000000..63a83070a --- /dev/null +++ b/src/compatibility/pg-11.h @@ -0,0 +1,67 @@ +/*------------------------------------------------------------------------- + * + * pg-11.h + * PostgreSQL <= 11 compatibility + * + * Copyright (c) 2022, Postgres Professional + * + * When PG-11 reaches the end of support, we will need to remove + * *_CRC32_COMPAT macros and use *_CRC32C instead. + * And this file will be removed. + *------------------------------------------------------------------------- + */ + +#ifndef PG11_COMPAT_H +#define PG11_COMPAT_H + +#include "utils/pgut.h" + +#if PG_VERSION_NUM >= 120000 + +#define INIT_CRC32_COMPAT(backup_version, crc) \ +do { \ + Assert(backup_version >= 20025); \ + INIT_CRC32C(crc); \ +} while (0) + +#define COMP_CRC32_COMPAT(backup_version, crc, data, len) \ +do { \ + Assert(backup_version >= 20025); \ + COMP_CRC32C((crc), (data), (len)); \ +} while (0) + +#define FIN_CRC32_COMPAT(backup_version, crc) \ +do { \ + Assert(backup_version >= 20025); \ + FIN_CRC32C(crc); \ +} while (0) + +#else /* PG_VERSION_NUM < 120000 */ + +#define INIT_CRC32_COMPAT(backup_version, crc) \ +do { \ + if (backup_version <= 20021 || backup_version >= 20025) \ + INIT_CRC32C(crc); \ + else \ + INIT_TRADITIONAL_CRC32(crc); \ +} while (0) + +#define COMP_CRC32_COMPAT(backup_version, crc, data, len) \ +do { \ + if (backup_version <= 20021 || backup_version >= 20025) \ + COMP_CRC32C((crc), (data), (len)); \ + else \ + COMP_TRADITIONAL_CRC32(crc, data, len); \ +} while (0) + +#define FIN_CRC32_COMPAT(backup_version, crc) \ +do { \ + if (backup_version <= 20021 || backup_version >= 20025) \ + FIN_CRC32C(crc); \ + else \ + FIN_TRADITIONAL_CRC32(crc); \ +} while (0) + +#endif /* PG_VERSION_NUM < 120000 */ + +#endif /* PG11_COMPAT_H */ diff --git a/src/data.c b/src/data.c index 17ae4b91a..f50749497 100644 --- a/src/data.c +++ b/src/data.c @@ -24,6 +24,9 @@ #include "utils/thread.h" +/* for crc32_compat macros */ +#include "compatibility/pg-11.h" + /* Union to ease operations on relation pages */ typedef struct DataPage { @@ -32,7 +35,7 @@ typedef struct DataPage } DataPage; static bool get_page_header(FILE *in, const char *fullpath, BackupPageHeader *bph, - pg_crc32 *crc, bool use_crc32c); + pg_crc32 *crc, uint32 backup_version); #ifdef HAVE_LIBZ /* Implementation of zlib compression method */ @@ -448,7 +451,7 @@ compress_and_backup_page(pgFile *file, BlockNumber blknum, write_buffer_size = compressed_size + sizeof(BackupPageHeader); /* Update CRC */ - COMP_FILE_CRC32(true, *crc, write_buffer, write_buffer_size); + COMP_CRC32C(*crc, write_buffer, write_buffer_size); /* write data page */ if (fio_fwrite(out, write_buffer, write_buffer_size) != write_buffer_size) @@ -529,7 +532,7 @@ backup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpat file->read_size = 0; file->write_size = 0; file->uncompressed_size = 0; - INIT_FILE_CRC32(true, file->crc); + INIT_CRC32C(file->crc); /* * Read each page, verify checksum and write it to backup. @@ -628,7 +631,7 @@ backup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpat cleanup: /* finish CRC calculation */ - FIN_FILE_CRC32(true, file->crc); + FIN_CRC32C(file->crc); /* dump page headers */ write_page_headers(headers, file, hdr_map, is_merge); @@ -805,7 +808,7 @@ backup_non_data_file(pgFile *file, pgFile *prev_file, file->crc = fio_get_crc32(FIO_DB_HOST, from_fullpath, false); /* ...and checksum is the same... */ - if (EQ_TRADITIONAL_CRC32(file->crc, prev_file->crc)) + if (EQ_CRC32C(file->crc, prev_file->crc)) { file->write_size = BYTES_INVALID; return; /* ...skip copying file. */ @@ -1018,7 +1021,7 @@ restore_data_file_internal(FILE *in, FILE *out, pgFile *file, uint32 backup_vers * or when merging something. Align read_len only when restoring * or merging old backups. */ - if (get_page_header(in, from_fullpath, &(page).bph, NULL, false)) + if (get_page_header(in, from_fullpath, &(page).bph, NULL, backup_version)) { cur_pos_in += sizeof(BackupPageHeader); @@ -1389,7 +1392,7 @@ backup_non_data_file_internal(const char *from_fullpath, ssize_t read_len = 0; char *buf = NULL; - INIT_FILE_CRC32(true, file->crc); + INIT_CRC32C(file->crc); /* reset size summary */ file->read_size = 0; @@ -1485,7 +1488,7 @@ backup_non_data_file_internal(const char *from_fullpath, strerror(errno)); /* update CRC */ - COMP_FILE_CRC32(true, file->crc, buf, read_len); + COMP_CRC32C(file->crc, buf, read_len); file->read_size += read_len; } @@ -1501,7 +1504,7 @@ backup_non_data_file_internal(const char *from_fullpath, cleanup: /* finish CRC calculation and store into pgFile */ - FIN_FILE_CRC32(true, file->crc); + FIN_CRC32C(file->crc); if (in && fclose(in)) elog(ERROR, "Cannot close the file \"%s\": %s", from_fullpath, strerror(errno)); @@ -1678,7 +1681,6 @@ validate_file_pages(pgFile *file, const char *fullpath, XLogRecPtr stop_lsn, bool is_valid = true; FILE *in; pg_crc32 crc; - bool use_crc32c = backup_version <= 20021 || backup_version >= 20025; BackupPageHeader2 *headers = NULL; int n_hdr = -1; off_t cur_pos_in = 0; @@ -1702,7 +1704,7 @@ validate_file_pages(pgFile *file, const char *fullpath, XLogRecPtr stop_lsn, } /* calc CRC of backup file */ - INIT_FILE_CRC32(use_crc32c, crc); + INIT_CRC32_COMPAT(backup_version, crc); /* read and validate pages one by one */ while (true) @@ -1718,7 +1720,7 @@ validate_file_pages(pgFile *file, const char *fullpath, XLogRecPtr stop_lsn, if (interrupted || thread_interrupted) elog(ERROR, "Interrupted during data file validation"); - /* newer backups have page headers in separate storage */ + /* newer backups (post 2.4.0) have page headers in separate storage */ if (headers) { n_hdr++; @@ -1747,10 +1749,10 @@ validate_file_pages(pgFile *file, const char *fullpath, XLogRecPtr stop_lsn, cur_pos_in = headers[n_hdr].pos; } } - /* old backups rely on header located directly in data file */ + /* old backups (pre 2.4.0) rely on header located directly in data file */ else { - if (get_page_header(in, fullpath, &(compressed_page).bph, &crc, use_crc32c)) + if (get_page_header(in, fullpath, &(compressed_page).bph, &crc, backup_version)) { /* Backward compatibility kludge, TODO: remove in 3.0 * for some reason we padded compressed pages in old versions @@ -1790,9 +1792,9 @@ validate_file_pages(pgFile *file, const char *fullpath, XLogRecPtr stop_lsn, cur_pos_in += read_len; if (headers) - COMP_FILE_CRC32(use_crc32c, crc, &compressed_page, read_len); + COMP_CRC32_COMPAT(backup_version, crc, &compressed_page, read_len); else - COMP_FILE_CRC32(use_crc32c, crc, compressed_page.data, read_len); + COMP_CRC32_COMPAT(backup_version, crc, compressed_page.data, read_len); if (compressed_size != BLCKSZ || page_may_be_compressed(compressed_page.data, file->compress_alg, @@ -1861,7 +1863,7 @@ validate_file_pages(pgFile *file, const char *fullpath, XLogRecPtr stop_lsn, } } - FIN_FILE_CRC32(use_crc32c, crc); + FIN_CRC32_COMPAT(backup_version, crc); fclose(in); if (crc != file->crc) @@ -2017,7 +2019,7 @@ get_lsn_map(const char *fullpath, uint32 checksum_version, /* Every page in data file contains BackupPageHeader, extract it */ bool get_page_header(FILE *in, const char *fullpath, BackupPageHeader* bph, - pg_crc32 *crc, bool use_crc32c) + pg_crc32 *crc, uint32 backup_version) { /* read BackupPageHeader */ size_t read_len = fread(bph, 1, sizeof(BackupPageHeader), in); @@ -2044,7 +2046,7 @@ get_page_header(FILE *in, const char *fullpath, BackupPageHeader* bph, * the problem of backward compatibility for backups of old versions */ if (crc) - COMP_FILE_CRC32(use_crc32c, *crc, bph, read_len); + COMP_CRC32_COMPAT(backup_version, *crc, bph, read_len); if (bph->block == 0 && bph->compressed_size == 0) elog(ERROR, "Empty block in file \"%s\"", fullpath); @@ -2363,6 +2365,8 @@ copy_pages(const char *to_fullpath, const char *from_fullpath, * array of headers. * TODO: some access optimizations would be great here: * less fseeks, buffering, descriptor sharing, etc. + * + * Used for post 2.4.0 backups */ BackupPageHeader2* get_data_file_headers(HeaderMap *hdr_map, pgFile *file, uint32 backup_version, bool strict) @@ -2437,9 +2441,9 @@ get_data_file_headers(HeaderMap *hdr_map, pgFile *file, uint32 backup_version, b } /* validate checksum */ - INIT_FILE_CRC32(true, hdr_crc); - COMP_FILE_CRC32(true, hdr_crc, headers, read_len); - FIN_FILE_CRC32(true, hdr_crc); + INIT_CRC32C(hdr_crc); + COMP_CRC32C(hdr_crc, headers, read_len); + FIN_CRC32C(hdr_crc); if (hdr_crc != file->hdr_crc) { @@ -2486,9 +2490,9 @@ write_page_headers(BackupPageHeader2 *headers, pgFile *file, HeaderMap *hdr_map, read_len = (file->n_headers + 1) * sizeof(BackupPageHeader2); /* calculate checksums */ - INIT_FILE_CRC32(true, file->hdr_crc); - COMP_FILE_CRC32(true, file->hdr_crc, headers, read_len); - FIN_FILE_CRC32(true, file->hdr_crc); + INIT_CRC32C(file->hdr_crc); + COMP_CRC32C(file->hdr_crc, headers, read_len); + FIN_CRC32C(file->hdr_crc); zheaders = pgut_malloc(read_len * 2); memset(zheaders, 0, read_len * 2); diff --git a/src/dir.c b/src/dir.c index 0bcd60169..53f92ef74 100644 --- a/src/dir.c +++ b/src/dir.c @@ -203,26 +203,23 @@ pgFileInit(const char *rel_path) * obvious about it. */ pg_crc32 -pgFileGetCRC(const char *file_path, bool use_crc32c, bool missing_ok) +pgFileGetCRC32C(const char *file_path, bool missing_ok) { FILE *fp; pg_crc32 crc = 0; char *buf; size_t len = 0; - INIT_FILE_CRC32(use_crc32c, crc); + INIT_CRC32C(crc); /* open file in binary read mode */ fp = fopen(file_path, PG_BINARY_R); if (fp == NULL) { - if (errno == ENOENT) + if (missing_ok && errno == ENOENT) { - if (missing_ok) - { - FIN_FILE_CRC32(use_crc32c, crc); - return crc; - } + FIN_CRC32C(crc); + return crc; } elog(ERROR, "Cannot open file \"%s\": %s", @@ -234,7 +231,7 @@ pgFileGetCRC(const char *file_path, bool use_crc32c, bool missing_ok) buf = pgut_malloc(STDIO_BUFSIZE); /* calc CRC of file */ - for (;;) + do { if (interrupted) elog(ERROR, "interrupted during CRC calculation"); @@ -244,19 +241,75 @@ pgFileGetCRC(const char *file_path, bool use_crc32c, bool missing_ok) if (ferror(fp)) elog(ERROR, "Cannot read \"%s\": %s", file_path, strerror(errno)); - /* update CRC */ - COMP_FILE_CRC32(use_crc32c, crc, buf, len); + COMP_CRC32C(crc, buf, len); + } + while (!feof(fp)); + + FIN_CRC32C(crc); + fclose(fp); + pg_free(buf); + + return crc; +} + +#if PG_VERSION_NUM < 120000 +/* + * Read the local file to compute its CRC using traditional algorithm. + * (*_TRADITIONAL_CRC32 macros) + * This was used only in version 2.0.22--2.0.24 + * And never used for PG >= 12 + * To be removed with end of PG-11 support + */ +pg_crc32 +pgFileGetCRC32(const char *file_path, bool missing_ok) +{ + FILE *fp; + pg_crc32 crc = 0; + char *buf; + size_t len = 0; + + INIT_TRADITIONAL_CRC32(crc); + + /* open file in binary read mode */ + fp = fopen(file_path, PG_BINARY_R); + if (fp == NULL) + { + if (missing_ok && errno == ENOENT) + { + FIN_TRADITIONAL_CRC32(crc); + return crc; + } - if (feof(fp)) - break; + elog(ERROR, "Cannot open file \"%s\": %s", + file_path, strerror(errno)); } - FIN_FILE_CRC32(use_crc32c, crc); + /* disable stdio buffering */ + setvbuf(fp, NULL, _IONBF, BUFSIZ); + buf = pgut_malloc(STDIO_BUFSIZE); + + /* calc CRC of file */ + do + { + if (interrupted) + elog(ERROR, "interrupted during CRC calculation"); + + len = fread(buf, 1, STDIO_BUFSIZE, fp); + + if (ferror(fp)) + elog(ERROR, "Cannot read \"%s\": %s", file_path, strerror(errno)); + + COMP_TRADITIONAL_CRC32(crc, buf, len); + } + while (!feof(fp)); + + FIN_TRADITIONAL_CRC32(crc); fclose(fp); pg_free(buf); return crc; } +#endif /* PG_VERSION_NUM < 120000 */ /* * Read the local file to compute its CRC. @@ -265,7 +318,7 @@ pgFileGetCRC(const char *file_path, bool use_crc32c, bool missing_ok) * obvious about it. */ pg_crc32 -pgFileGetCRCgz(const char *file_path, bool use_crc32c, bool missing_ok) +pgFileGetCRC32Cgz(const char *file_path, bool missing_ok) { gzFile fp; pg_crc32 crc = 0; @@ -273,19 +326,16 @@ pgFileGetCRCgz(const char *file_path, bool use_crc32c, bool missing_ok) int err; char *buf; - INIT_FILE_CRC32(use_crc32c, crc); + INIT_CRC32C(crc); /* open file in binary read mode */ fp = gzopen(file_path, PG_BINARY_R); if (fp == NULL) { - if (errno == ENOENT) + if (missing_ok && errno == ENOENT) { - if (missing_ok) - { - FIN_FILE_CRC32(use_crc32c, crc); - return crc; - } + FIN_CRC32C(crc); + return crc; } elog(ERROR, "Cannot open file \"%s\": %s", @@ -311,16 +361,16 @@ pgFileGetCRCgz(const char *file_path, bool use_crc32c, bool missing_ok) { const char *err_str = NULL; - err_str = gzerror(fp, &err); - elog(ERROR, "Cannot read from compressed file %s", err_str); + err_str = gzerror(fp, &err); + elog(ERROR, "Cannot read from compressed file %s", err_str); } } /* update CRC */ - COMP_FILE_CRC32(use_crc32c, crc, buf, len); + COMP_CRC32C(crc, buf, len); } - FIN_FILE_CRC32(use_crc32c, crc); + FIN_CRC32C(crc); gzclose(fp); pg_free(buf); @@ -1758,7 +1808,7 @@ write_database_map(pgBackup *backup, parray *database_map, parray *backup_files_ /* Add metadata to backup_content.control */ file = pgFileNew(database_map_path, DATABASE_MAP, true, 0, FIO_BACKUP_HOST); - file->crc = pgFileGetCRC(database_map_path, true, false); + file->crc = pgFileGetCRC32C(database_map_path, false); file->write_size = file->size; file->uncompressed_size = file->read_size; diff --git a/src/pg_probackup.h b/src/pg_probackup.h index aeb55f83e..a52942d06 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -208,28 +208,6 @@ typedef enum ForkName ptrack } ForkName; -#define INIT_FILE_CRC32(use_crc32c, crc) \ -do { \ - if (use_crc32c) \ - INIT_CRC32C(crc); \ - else \ - INIT_TRADITIONAL_CRC32(crc); \ -} while (0) -#define COMP_FILE_CRC32(use_crc32c, crc, data, len) \ -do { \ - if (use_crc32c) \ - COMP_CRC32C((crc), (data), (len)); \ - else \ - COMP_TRADITIONAL_CRC32(crc, data, len); \ -} while (0) -#define FIN_FILE_CRC32(use_crc32c, crc) \ -do { \ - if (use_crc32c) \ - FIN_CRC32C(crc); \ - else \ - FIN_TRADITIONAL_CRC32(crc); \ -} while (0) - #define pg_off_t unsigned long long @@ -1046,8 +1024,11 @@ extern pgFile *pgFileNew(const char *path, const char *rel_path, extern pgFile *pgFileInit(const char *rel_path); extern void pgFileFree(void *file); -extern pg_crc32 pgFileGetCRC(const char *file_path, bool use_crc32c, bool missing_ok); -extern pg_crc32 pgFileGetCRCgz(const char *file_path, bool use_crc32c, bool missing_ok); +extern pg_crc32 pgFileGetCRC32C(const char *file_path, bool missing_ok); +#if PG_VERSION_NUM < 120000 +extern pg_crc32 pgFileGetCRC32(const char *file_path, bool missing_ok); +#endif +extern pg_crc32 pgFileGetCRC32Cgz(const char *file_path, bool missing_ok); extern int pgFileMapComparePath(const void *f1, const void *f2); extern int pgFileCompareName(const void *f1, const void *f2); diff --git a/src/restore.c b/src/restore.c index 28a79f1ed..e4479a242 100644 --- a/src/restore.c +++ b/src/restore.c @@ -2010,7 +2010,6 @@ get_dbOid_exclude_list(pgBackup *backup, parray *datname_list, { int i; int j; -// pg_crc32 crc; parray *database_map = NULL; parray *dbOid_exclude_list = NULL; pgFile *database_map_file = NULL; @@ -2040,13 +2039,6 @@ get_dbOid_exclude_list(pgBackup *backup, parray *datname_list, join_path_components(path, backup->root_dir, DATABASE_DIR); join_path_components(database_map_path, path, DATABASE_MAP); - /* check database_map CRC */ -// crc = pgFileGetCRC(database_map_path, true, true, NULL, FIO_LOCAL_HOST); -// -// if (crc != database_map_file->crc) -// elog(ERROR, "Invalid CRC of backup file \"%s\" : %X. Expected %X", -// database_map_file->path, crc, database_map_file->crc); - /* get database_map from file */ database_map = read_database_map(backup); diff --git a/src/stream.c b/src/stream.c index b10eb7308..e2e016f4d 100644 --- a/src/stream.c +++ b/src/stream.c @@ -2,7 +2,7 @@ * * stream.c: pg_probackup specific code for WAL streaming * - * Portions Copyright (c) 2015-2022, Postgres Professional + * Copyright (c) 2015-2022, Postgres Professional * *------------------------------------------------------------------------- */ @@ -689,7 +689,7 @@ add_walsegment_to_filelist(parray *filelist, uint32 timeline, XLogRecPtr xlogpos if (existing_file) { if (do_crc) - (*existing_file)->crc = pgFileGetCRC(wal_segment_fullpath, true, false); + (*existing_file)->crc = pgFileGetCRC32C(wal_segment_fullpath, false); (*existing_file)->write_size = xlog_seg_size; (*existing_file)->uncompressed_size = xlog_seg_size; @@ -697,7 +697,7 @@ add_walsegment_to_filelist(parray *filelist, uint32 timeline, XLogRecPtr xlogpos } if (do_crc) - file->crc = pgFileGetCRC(wal_segment_fullpath, true, false); + file->crc = pgFileGetCRC32C(wal_segment_fullpath, false); /* Should we recheck it using stat? */ file->write_size = xlog_seg_size; @@ -728,7 +728,7 @@ add_history_file_to_filelist(parray *filelist, uint32 timeline, char *basedir) /* calculate crc */ if (do_crc) - file->crc = pgFileGetCRC(fullpath, true, false); + file->crc = pgFileGetCRC32C(fullpath, false); file->write_size = file->size; file->uncompressed_size = file->size; diff --git a/src/util.c b/src/util.c index b58d88f96..e16241a70 100644 --- a/src/util.c +++ b/src/util.c @@ -304,7 +304,7 @@ get_pgcontrol_checksum(const char *pgdata_path) /* First fetch file... */ buffer = slurpFile(FIO_BACKUP_HOST, pgdata_path, XLOG_CONTROL_FILE, &size, false); - + elog(WARNING, "checking %s", pgdata_path); digestControlFile(&ControlFile, buffer, size); pg_free(buffer); diff --git a/src/utils/file.c b/src/utils/file.c index 86977a19a..27b5edf86 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -1428,9 +1428,9 @@ fio_get_crc32(fio_location location, const char *file_path, bool decompress) else { if (decompress) - return pgFileGetCRCgz(file_path, true, true); + return pgFileGetCRC32Cgz(file_path, true); else - return pgFileGetCRC(file_path, true, true); + return pgFileGetCRC32C(file_path, true); } } @@ -2082,7 +2082,7 @@ fio_send_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, Assert(hdr.size <= sizeof(buf)); IO_CHECK(fio_read_all(fio_stdin, buf, hdr.size), hdr.size); - COMP_FILE_CRC32(true, file->crc, buf, hdr.size); + COMP_CRC32C(file->crc, buf, hdr.size); /* lazily open backup file */ if (!out) @@ -2270,8 +2270,6 @@ fio_copy_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, Assert(hdr.size <= sizeof(buf)); IO_CHECK(fio_read_all(fio_stdin, buf, hdr.size), hdr.size); - COMP_FILE_CRC32(true, file->crc, buf, hdr.size); - if (fio_fseek(out, blknum * BLCKSZ) < 0) { elog(ERROR, "Cannot seek block %u of \"%s\": %s", @@ -2635,7 +2633,7 @@ fio_send_file(const char *from_fullpath, const char *to_fullpath, FILE* out, if (file) { file->read_size += hdr.size; - COMP_FILE_CRC32(true, file->crc, buf, hdr.size); + COMP_CRC32C(file->crc, buf, hdr.size); } } else @@ -3366,9 +3364,9 @@ fio_communicate(int in, int out) case FIO_GET_CRC32: /* calculate crc32 for a file */ if (hdr.arg == 1) - crc = pgFileGetCRCgz(buf, true, true); + crc = pgFileGetCRC32Cgz(buf, true); else - crc = pgFileGetCRC(buf, true, true); + crc = pgFileGetCRC32C(buf, true); IO_CHECK(fio_write_all(out, &crc, sizeof(crc)), sizeof(crc)); break; case FIO_GET_CHECKSUM_MAP: @@ -3606,9 +3604,9 @@ pioLocalDrive_pioGetCRC32(VSelf, path_t path, bool compressed, err_i *err) elog(VERBOSE, "Local Drive calculate crc32 for '%s', compressed=%d", path, compressed); if (compressed) - return pgFileGetCRCgz(path, true, true); + return pgFileGetCRC32Cgz(path, true); else - return pgFileGetCRC(path, true, true); + return pgFileGetCRC32C(path, true); } static bool diff --git a/src/utils/pgut.h b/src/utils/pgut.h index 638259a3c..72ac20379 100644 --- a/src/utils/pgut.h +++ b/src/utils/pgut.h @@ -3,7 +3,7 @@ * pgut.h * * Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION - * Portions Copyright (c) 2017-2021, Postgres Professional + * Portions Copyright (c) 2017-2022, Postgres Professional * *------------------------------------------------------------------------- */ @@ -11,6 +11,7 @@ #ifndef PGUT_H #define PGUT_H +#include #include "postgres_fe.h" #include "libpq-fe.h" diff --git a/src/validate.c b/src/validate.c index d88de5583..f76def06f 100644 --- a/src/validate.c +++ b/src/validate.c @@ -3,7 +3,7 @@ * validate.c: validate backup files. * * Portions Copyright (c) 2009-2011, NIPPON TELEGRAPH AND TELEPHONE CORPORATION - * Portions Copyright (c) 2015-2019, Postgres Professional + * Portions Copyright (c) 2015-2022, Postgres Professional * *------------------------------------------------------------------------- */ @@ -341,14 +341,22 @@ pgBackupValidateFiles(void *arg) * Starting from 2.0.25 we calculate crc of pg_control differently. */ if (arguments->backup_version >= 20025 && - strcmp(file->name, "pg_control") == 0 && - !file->external_dir_num) + strcmp(file->rel_path, XLOG_CONTROL_FILE) == 0 && + file->external_dir_num == 0) crc = get_pgcontrol_checksum(arguments->base_path); else - crc = pgFileGetCRC(file_fullpath, - arguments->backup_version <= 20021 || - arguments->backup_version >= 20025, - false); +#if PG_VERSION_NUM >= 120000 + { + Assert(arguments->backup_version >= 20025); + crc = pgFileGetCRC32C(file_fullpath, false); + } +#else /* PG_VERSION_NUM < 120000 */ + if (arguments->backup_version <= 20021 || arguments->backup_version >= 20025) + crc = pgFileGetCRC32C(file_fullpath, false); + else + crc = pgFileGetCRC32(file_fullpath, false); +#endif /* PG_VERSION_NUM < 120000 */ + if (crc != file->crc) { elog(WARNING, "Invalid CRC of backup file \"%s\" : %X. Expected %X", @@ -720,8 +728,6 @@ validate_tablespace_map(pgBackup *backup, bool no_validate) pgFile **tablespace_map = NULL; pg_crc32 crc; parray *files = get_backup_filelist(backup, true); - bool use_crc32c = parse_program_version(backup->program_version) <= 20021 || - parse_program_version(backup->program_version) >= 20025; parray_qsort(files, pgFileCompareRelPathWithExternal); join_path_components(map_path, backup->database_dir, PG_TABLESPACE_MAP_FILE); @@ -746,7 +752,16 @@ validate_tablespace_map(pgBackup *backup, bool no_validate) /* check tablespace map checksumms */ if (!no_validate) { - crc = pgFileGetCRC(map_path, use_crc32c, false); +#if PG_VERSION_NUM >= 120000 + Assert(parse_program_version(backup->program_version) >= 20025); + crc = pgFileGetCRC32C(map_path, false); +#else /* PG_VERSION_NUM < 120000 */ + if (parse_program_version(backup->program_version) <= 20021 + || parse_program_version(backup->program_version) >= 20025) + crc = pgFileGetCRC32C(map_path, false); + else + crc = pgFileGetCRC32(map_path, false); +#endif /* PG_VERSION_NUM < 120000 */ if ((*tablespace_map)->crc != crc) elog(ERROR, "Invalid CRC of tablespace map file \"%s\" : %X. Expected %X, " From 4b08603d5a42a9d40b7e6b805f7c85b92593850d Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Mon, 12 Sep 2022 12:24:25 +0300 Subject: [PATCH 08/17] [PBCKP-245] fu_utils: compilable under MinGW --- Makefile | 4 +- src/fu_util/CMakeLists.txt | 24 ++++++++- src/fu_util/fm_util.h | 86 ++++++++++++++---------------- src/fu_util/fo_obj.h | 1 - src/fu_util/ft_array.inc.h | 11 +++- src/fu_util/ft_search.inc.h | 7 ++- src/fu_util/ft_util.h | 25 +++++---- src/fu_util/impl/fo_impl.c | 30 +++++------ src/fu_util/impl/fo_impl2.h | 5 +- src/fu_util/impl/ft_impl.c | 82 +++++++++++++++++++++------- src/fu_util/test/CMakeLists.txt | 7 +++ src/fu_util/test/obj1.c | 1 + src/fu_util/test/qsort/qsort.inc.c | 1 - 13 files changed, 179 insertions(+), 105 deletions(-) diff --git a/Makefile b/Makefile index 2b663f59b..21553f97c 100644 --- a/Makefile +++ b/Makefile @@ -51,14 +51,14 @@ BORROWED_C_SRC := \ src/bin/pg_basebackup/streamutil.c \ src/bin/pg_basebackup/walmethods.c -OBJS += src/fu_util/impl/ft_impl.o src/fu_util/impl/fo_impl.o - BORROW_DIR := src/borrowed BORROWED_H := $(addprefix $(BORROW_DIR)/, $(notdir $(BORROWED_H_SRC))) BORROWED_C := $(addprefix $(BORROW_DIR)/, $(notdir $(BORROWED_C_SRC))) OBJS += $(patsubst %.c, %.o, $(BORROWED_C)) EXTRA_CLEAN := $(BORROWED_H) $(BORROWED_C) $(BORROW_DIR) borrowed.mk +OBJS += src/fu_util/impl/ft_impl.o src/fu_util/impl/fo_impl.o + # off-source build support ifneq ($(abspath $(CURDIR))/, $(top_pbk_srcdir)) VPATH := $(top_pbk_srcdir) diff --git a/src/fu_util/CMakeLists.txt b/src/fu_util/CMakeLists.txt index 6752d5dd2..a5426df42 100644 --- a/src/fu_util/CMakeLists.txt +++ b/src/fu_util/CMakeLists.txt @@ -5,6 +5,7 @@ set(CMAKE_C_STANDARD 99) set(CMAKE_C_EXTENSIONS true) include(CheckCSourceCompiles) +include(CheckFunctionExists) add_library(fu_utils impl/ft_impl.c impl/fo_impl.c) @@ -12,12 +13,21 @@ set(THREADS_PREFER_PTHREAD_FLAG ON) find_package(Threads REQUIRED) target_link_libraries(fu_utils PRIVATE Threads::Threads) +if(CMAKE_USE_PTHREADS_INIT) + target_compile_definitions(fu_utils PRIVATE USE_PTHREADS) +else() + message(FATAL_ERROR "Need pthread support to build") +endif() + +CHECK_FUNCTION_EXISTS(strerror_r HAVE_STRERROR_R) + # Detect for installed beautiful https://github.com/ianlancetaylor/libbacktrace include_directories(.) if(NOT CMAKE_C_COMPILER MATCHES tcc) find_library(LIBBACKTRACE backtrace) if(LIBBACKTRACE) set(CMAKE_REQUIRED_LIBRARIES backtrace) + target_link_libraries(fu_utils PRIVATE backtrace) check_c_source_compiles(" #include int main(void) { @@ -30,11 +40,21 @@ if(NOT CMAKE_C_COMPILER MATCHES tcc) endif() endif() endif() +check_include_file(execinfo.h HAVE_EXECINFO_H) + +set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fexceptions") + +if(HAVE_EXECINFO_H) + target_compile_definitions(fu_utils PRIVATE HAVE_EXECINFO_H) +endif() +if(HAVE_STRERROR_R) + target_compile_definitions(fu_utils PRIVATE HAVE_STRERROR_R) +endif() + configure_file(fu_utils_cfg.h.in fu_utils_cfg.h) target_include_directories(fu_utils INTERFACE "${CMAKE_CURRENT_SOURCE_DIR}") target_include_directories(fu_utils PRIVATE "${PROJECT_BINARY_DIR}") -target_link_libraries(fu_utils PUBLIC backtrace) install(TARGETS fu_utils DESTINATION lib) install(FILES fm_util.h ft_util.h fo_obj.h @@ -43,4 +63,4 @@ install(FILES fm_util.h ft_util.h fo_obj.h DESTINATION include/fu_utils) install(FILES impl/ft_impl.h impl/fo_impl.h DESTINATION include/fu_utils/impl) -add_subdirectory(test) \ No newline at end of file +add_subdirectory(test) diff --git a/src/fu_util/fm_util.h b/src/fu_util/fm_util.h index 11d96682d..18a971aa7 100644 --- a/src/fu_util/fm_util.h +++ b/src/fu_util/fm_util.h @@ -24,6 +24,9 @@ /****************************************/ // LOGIC +#define fm_true 1 +#define fm_false 0 + #define fm_compl(v) fm_cat(fm_compl_, v) #define fm_compl_0 1 #define fm_compl_1 0 @@ -81,38 +84,56 @@ #define fm_tail(...) fm__tail(__VA_ARGS__) #define fm__tail(x, ...) __VA_ARGS__ -#define fm_or_default(...) \ - fm_iif(fm_va_01(__VA_ARGS__))(__VA_ARGS__) #define fm_va_single(...) fm__va_single(__VA_ARGS__, fm__comma) #define fm_va_many(...) fm__va_many(__VA_ARGS__, fm__comma) #define fm__va_single(x, y, ...) fm__va_result(y, 1, 0) #define fm__va_many(x, y, ...) fm__va_result(y, 0, 1) -#define fm__va_result(x, y, res, ...) res +#define fm__va_result(...) fm__va_result_fin(__VA_ARGS__) +#define fm__va_result_fin(x, y, res, ...) res #define fm_no_va fm_is_empty #define fm_va_01 fm_isnt_empty -#define fm_va_01n(...) fm_cat3(fm__va_01n_, fm__isnt_empty(__VA_ARGS__), fm_va_many(__VA_ARGS__)) -#define fm__va_01n_00 0 -#define fm__va_01n_10 1 -#define fm__va_01n_11 n -#if !__STRICT_ANSI__ +#ifndef FM_USE_STRICT + #if defined(__STRICT_ANSI__) || defined(_MSC_VER) /* well, clang-cl doesn't allow to distinguish std mode */ + #define FM_USE_STRICT + #endif +#endif + +#ifndef FM_USE_STRICT #define fm_is_empty(...) fm__is_empty(__VA_ARGS__) #define fm__is_empty(...) fm_va_single(~, ##__VA_ARGS__) #define fm_isnt_empty(...) fm__isnt_empty(__VA_ARGS__) #define fm__isnt_empty(...) fm_va_many(~, ##__VA_ARGS__) + +#define fm_va_01n(...) fm_cat3(fm__va_01n_, fm__isnt_empty(__VA_ARGS__), fm_va_many(__VA_ARGS__)) +#define fm__va_01n_00 0 +#define fm__va_01n_10 1 +#define fm__va_01n_11 n + +#define fm_when_isnt_empty(...) fm_cat(fm__when_, fm__isnt_empty(__VA_ARGS__)) #else #define fm_is_empty(...) fm_and(fm__is_emptyfirst(__VA_ARGS__), fm_va_single(__VA_ARGS__)) #define fm_isnt_empty(...) fm_nand(fm__is_emptyfirst(__VA_ARGS__), fm_va_single(__VA_ARGS__)) #define fm__is_emptyfirst(x, ...) fm_iif(fm_is_tuple(x))(0)(fm__is_emptyfirst_impl(x)) -#define fm__is_emptyfirst_impl(x,...) fm_tuple_2((\ - fm__is_emptyfirst_do1 x (fm__is_emptyfirst_do2), 1, 0)) +#define fm__is_emptyfirst_impl(x,...) fm__va_result(\ + fm__is_emptyfirst_do1 x (fm__is_emptyfirst_do2), 1, 0) #define fm__is_emptyfirst_do1(F) F() #define fm__is_emptyfirst_do2(...) , + +#define fm_when_isnt_empty(...) fm_cat(fm__when_, fm_isnt_empty(__VA_ARGS__)) + +#define fm_va_01n(...) fm_cat3(fm__va_01n_, fm__is_emptyfirst(__VA_ARGS__), fm_va_many(__VA_ARGS__)) +#define fm__va_01n_10 0 +#define fm__va_01n_00 1 +#define fm__va_01n_01 n +#define fm__va_01n_11 n #endif -#define fm_when_isnt_empty(...) fm_cat(fm__when_, fm__isnt_empty(__VA_ARGS__)) +#define fm_or_default(...) \ + fm_iif(fm_va_01(__VA_ARGS__))(__VA_ARGS__) + #define fm_va_comma(...) \ fm_when_isnt_empty(__VA_ARGS__)(fm__comma) #define fm_va_comma_fun(...) \ @@ -127,23 +148,6 @@ #define fm__is_tuple_help(...) , #define fm__is_tuple_(...) fm__is_tuple_choose(__VA_ARGS__) -#define fm_tuple_expand(x) fm_expand x -#define fm_tuple_tag(x) fm_head x -#define fm_tuple_data(x) fm_tail x -#define fm_tuple_0(x) fm_head x -#define fm_tuple_1(x) fm__tuple_1 x -#define fm__tuple_1(_0, _1, ...) _1 -#define fm_tuple_2(x) fm__tuple_2 x -#define fm__tuple_2(_0, _1, _2, ...) _2 - -#define fm_tuple_tag_or_0(x) fm__tuple_tag_or_0_(fm__tuple_tag_or_0_help x, 0) -#define fm__tuple_tag_or_0_(...) fm__tuple_tag_or_0_choose(__VA_ARGS__) -#define fm__tuple_tag_or_0_choose(a,x,...) x -#define fm__tuple_tag_or_0_help(tag, ...) , tag - -#define fm_dispatch_tag_or_0(prefix, x) \ - fm_cat(prefix, fm_tuple_tag_or_0(x)) - /****************************************/ // Iteration @@ -160,20 +164,18 @@ // recursion handle : delay macro expansion to next recursion iteration #define fm_recurs(id) id fm_empty fm_empty() () -#define fm_recurs2(a,b) fm_cat fm_empty fm_empty() () (a,b) +#define fm_recurs2(a,b) fm_cat fm_empty() (a,b) #define fm_defer(id) id fm_empty() #define fm_foreach_join(join, macro, ...) \ - fm_foreach_join_(fm_empty, join, macro, __VA_ARGS__) -#define fm_foreach_join_(join1, join2, macro, ...) \ - fm_cat(fm_foreach_join_, fm_va_01n(__VA_ARGS__))(join1, join2, macro, __VA_ARGS__) + fm_cat(fm_foreach_join_, fm_va_01n(__VA_ARGS__))(fm_empty, join, macro, __VA_ARGS__) #define fm_foreach_join_0(join1, join2, macro, ...) #define fm_foreach_join_1(join1, join2, macro, x) \ join1() macro(x) #define fm_foreach_join_n(join1, join2, macro, x, y, ...) \ join1() macro(x) \ join2() macro(y) \ - fm_recurs2(fm_, foreach_join_) (join2, join2, macro, __VA_ARGS__) + fm_recurs2(fm_foreach_join_, fm_va_01n(__VA_ARGS__))(join2, join2, macro, __VA_ARGS__) #define fm_foreach(macro, ...) \ fm_foreach_join(fm_empty, macro, __VA_ARGS__) @@ -181,16 +183,14 @@ fm_foreach_join(fm_comma, macro, __VA_ARGS__) #define fm_foreach_arg_join(join, macro, arg, ...) \ - fm_foreach_arg_join_(fm_empty, join, macro, arg, __VA_ARGS__) -#define fm_foreach_arg_join_(join1, join2, macro, arg, ...) \ - fm_cat(fm_foreach_arg_join_, fm_va_01n(__VA_ARGS__))(join1, join2, macro, arg, __VA_ARGS__) + fm_cat(fm_foreach_arg_join_, fm_va_01n(__VA_ARGS__))(fm_empty, join, macro, arg, __VA_ARGS__) #define fm_foreach_arg_join_0(join1, join2, macro, ...) #define fm_foreach_arg_join_1(join1, join2, macro, arg, x) \ join1() macro(arg, x) #define fm_foreach_arg_join_n(join1, join2, macro, arg, x, y, ...) \ join1() macro(arg, x) \ join2() macro(arg, y) \ - fm_recurs2(fm_, foreach_arg_join_) (join2, join2, macro, arg, __VA_ARGS__) + fm_recurs2(fm_foreach_arg_join_, fm_va_01n(__VA_ARGS__))(join1, join2, macro, arg, __VA_ARGS__) #define fm_foreach_arg(macro, arg, ...) \ fm_foreach_arg_join(fm_empty, macro, arg, __VA_ARGS__) @@ -198,16 +198,14 @@ fm_foreach_arg_join(fm_comma, macro, arg, __VA_ARGS__) #define fm_foreach_tuple_join(join, macro, ...) \ - fm_foreach_tuple_join_(fm_empty, join, macro, __VA_ARGS__) -#define fm_foreach_tuple_join_(join1, join2, macro, ...) \ - fm_cat(fm_foreach_tuple_join_, fm_va_01n(__VA_ARGS__))(join1, join2, macro, __VA_ARGS__) + fm_cat(fm_foreach_tuple_join_, fm_va_01n(__VA_ARGS__))(fm_empty, join, macro, __VA_ARGS__) #define fm_foreach_tuple_join_0(join1, join2, macro, ...) #define fm_foreach_tuple_join_1(join1, join2, macro, x) \ join1() macro x #define fm_foreach_tuple_join_n(join1, join2, macro, x, y, ...) \ join1() macro x \ join2() macro y \ - fm_recurs2(fm_, foreach_tuple_join_) (join2, join2, macro, __VA_ARGS__) + fm_recurs2(fm_foreach_tuple_join_, fm_va_01n(__VA_ARGS__))(join2, join2, macro, __VA_ARGS__) #define fm_foreach_tuple(macro, ...) \ fm_foreach_tuple_join(fm_empty, macro, __VA_ARGS__) @@ -215,16 +213,14 @@ fm_foreach_tuple_join(fm_comma, macro, __VA_ARGS__) #define fm_foreach_tuple_arg_join(join, macro, arg, ...) \ - fm_foreach_tuple_arg_join_(fm_empty, join, macro, arg, __VA_ARGS__) -#define fm_foreach_tuple_arg_join_(join1, join2, macro, arg, ...) \ - fm_cat(fm_foreach_tuple_arg_join_, fm_va_01n(__VA_ARGS__))(join1, join2, macro, arg, __VA_ARGS__) + fm_cat(fm_foreach_tuple_arg_join_, fm_va_01n(__VA_ARGS__))(fm_empty, join, macro, arg, __VA_ARGS__) #define fm_foreach_tuple_arg_join_0(join1, join2, macro, ...) #define fm_foreach_tuple_arg_join_1(join1, join2, macro, arg, x) \ join1() fm_apply(macro, arg, fm_expand x) #define fm_foreach_tuple_arg_join_n(join1, join2, macro, arg, x, y, ...) \ join1() fm_apply(macro, arg, fm_expand x) \ join2() fm_apply(macro, arg, fm_expand y) \ - fm_recurs2(fm_, foreach_tuple_arg_join_) (join2, join2, macro, arg, __VA_ARGS__) + fm_recurs2(fm_foreach_tuple_arg_join_, fm_va_01n(__VA_ARGS__))(join1, join2, macro, arg, __VA_ARGS__) #define fm_foreach_tuple_arg(macro, arg, ...) \ fm_foreach_tuple_arg_join(fm_empty, macro, arg, __VA_ARGS__) diff --git a/src/fu_util/fo_obj.h b/src/fu_util/fo_obj.h index 70d4ee6b9..6ad423dc6 100644 --- a/src/fu_util/fo_obj.h +++ b/src/fu_util/fo_obj.h @@ -7,7 +7,6 @@ typedef void* fobj_t; #include -#include /* * Pointer to "object*. diff --git a/src/fu_util/ft_array.inc.h b/src/fu_util/ft_array.inc.h index 57d7cad42..847a6393d 100644 --- a/src/fu_util/ft_array.inc.h +++ b/src/fu_util/ft_array.inc.h @@ -1,5 +1,7 @@ /* vim: set expandtab autoindent cindent ts=4 sw=4 sts=4 */ -#include +#ifndef FU_UTIL_H +#error "ft_util.h should be included" +#endif /* * Accepts 2 macroses: @@ -176,7 +178,12 @@ #define ft_array_walk fm_cat(ft_array_pref, _walk) #define ft_array_walk_r fm_cat(ft_array_pref, _walk_r) -#define HUGE_SIZE ((uint64_t)UINT_MAX << 16) +#if __SIZEOF_SIZE_T__ < 8 +#define HUGE_SIZE ((size_t)UINT_MAX >> 2) +#else +#define HUGE_SIZE ((size_t)UINT_MAX << 16) +#endif + #ifndef NDEBUG /* try to catch uninitialized vars */ #define ft_slice_invariants(slc) \ diff --git a/src/fu_util/ft_search.inc.h b/src/fu_util/ft_search.inc.h index b567e11bf..149874cd6 100644 --- a/src/fu_util/ft_search.inc.h +++ b/src/fu_util/ft_search.inc.h @@ -1,3 +1,8 @@ +/* vim: set expandtab autoindent cindent ts=4 sw=4 sts=4 */ +#ifndef FU_UTIL_H +#error "ft_util.h should be included" +#endif + /* * Sort template. * Accepts four macrosses: @@ -39,8 +44,6 @@ * */ -#include - #define ft_func_bsearch fm_cat(ft_bsearch_, FT_SEARCH) #define ft_func_bsearch_r fm_cat3(ft_bsearch_, FT_SEARCH, _r) #define ft_func_search fm_cat(ft_search_, FT_SEARCH) diff --git a/src/fu_util/ft_util.h b/src/fu_util/ft_util.h index 56a0d05d2..084eabf9b 100644 --- a/src/fu_util/ft_util.h +++ b/src/fu_util/ft_util.h @@ -1,24 +1,29 @@ /* vim: set expandtab autoindent cindent ts=4 sw=4 sts=4 */ #ifndef FU_UTIL_H -#define FU_UTIL_H +#define FU_UTIL_H 1 #include #include +#include #include #include #include +#include +#include /* trick to find ssize_t even on windows and strict ansi mode */ #if defined(_MSC_VER) #include typedef SSIZE_T ssize_t; -#else -#include +#define SSIZE_MAX ((ssize_t)((SIZE_MAX) >> 1)) + +#if !defined(WIN32) && defined(_WIN32) +#define WIN32 _WIN32 +#endif + #endif #include #include #include -#include - #ifdef __GNUC__ #define ft_gcc_const __attribute__((const)) @@ -29,7 +34,7 @@ typedef SSIZE_T ssize_t; #define ft_gcc_malloc(free, idx) __attribute__((malloc)) #endif #define ft_unused __attribute__((unused)) -#define ft_gnu_printf(fmt, arg) __attribute__((format(printf,fmt,arg))) +#define ft_gnu_printf(fmt, arg) __attribute__((format(gnu_printf,fmt,arg))) #define ft_likely(x) __builtin_expect(!!(x), 1) #define ft_unlikely(x) __builtin_expect(!!(x), 0) #define ft_always_inline __attribute__((always_inline)) @@ -103,6 +108,7 @@ typedef void ft_gnu_printf(4, 0) (*ft_log_hook_t)(enum FT_LOG_LEVEL, /* * Initialize logging in main executable file. * Pass custom hook or NULL. + * In MinGW if built with libbacktrace, pass executable path (argv[0]). */ #define ft_init_log(hook) ft__init_log(hook, __FILE__) @@ -135,7 +141,7 @@ const char* ft__truncate_log_filename(const char *file); #define ft_dbg_enabled() ft__dbg_enabled() #define ft_dbg_assert(x, ...) ft__dbg_assert(x, #x, __VA_ARGS__) -#define ft_assert(x, ...) ft__assert(x, #x, __VA_ARGS__) +#define ft_assert(x, ...) ft__assert(x, #x, ##__VA_ARGS__) #define ft_assyscall(syscall, ...) ft__assyscall(syscall, fm_uniq(res), __VA_ARGS__) /* threadsafe strerror */ @@ -305,13 +311,14 @@ typedef struct ft_bytes_t { } ft_bytes_t; ft_inline ft_bytes_t ft_bytes(void* ptr, size_t len) { - return (ft_bytes_t){.ptr = ptr, .len = len}; + return (ft_bytes_t){.ptr = (char*)ptr, .len = len}; } ft_inline void ft_bytes_consume(ft_bytes_t *bytes, size_t cut); ft_inline void ft_bytes_move(ft_bytes_t *dest, ft_bytes_t *src); // String utils +extern size_t ft_strlcpy(char *dest, const char* src, size_t dest_size); /* * Concat strings regarding destination buffer size. * Note: if dest already full and doesn't contain \0n character, then fatal log is issued. @@ -411,7 +418,7 @@ extern bool ft_strbuf_vcatf (ft_strbuf_t *buf, const char *fmt, va_list * Use it if format string comes from user. */ ft_gnu_printf(3, 0) -extern bool ft_strbuf_vcatf_err (ft_strbuf_t *buf, bool err[static 1], +extern bool ft_strbuf_vcatf_err (ft_strbuf_t *buf, bool err[1], const char *fmt, va_list args); /* * Returns string which points into the buffer. diff --git a/src/fu_util/impl/fo_impl.c b/src/fu_util/impl/fo_impl.c index bbc49ab7f..63fa372fb 100644 --- a/src/fu_util/impl/fo_impl.c +++ b/src/fu_util/impl/fo_impl.c @@ -5,13 +5,9 @@ #include #include -#ifdef WIN32 -#define __thread __declspec(thread) -#endif -#include - #include -#include + +#include /* * We limits total number of methods, klasses and method implementations. @@ -650,7 +646,7 @@ fobjStr* fobj_newstr(ft_str_t s, enum FOBJ_STR_ALLOC ownership) { fobjStr *str; #if __SIZEOF_POINTER__ < 8 - ft_assert(size < (1<<30)-2); + ft_assert(s.len < (1<<30)-2); #else ft_assert(s.len < UINT32_MAX-2); #endif @@ -871,13 +867,13 @@ fobj_format_int(ft_strbuf_t *buf, uint64_t i, bool _signed, const char *fmt) { /* now add real suitable format */ switch (base) { - case 'x': strcat(tfmt + fmtlen, PRIx64); break; - case 'X': strcat(tfmt + fmtlen, PRIX64); break; - case 'o': strcat(tfmt + fmtlen, PRIo64); break; - case 'u': strcat(tfmt + fmtlen, PRIu64); break; - case 'd': strcat(tfmt + fmtlen, PRId64); break; + case 'x': ft_strlcat(tfmt, PRIx64, sizeof(tfmt)); break; + case 'X': ft_strlcat(tfmt, PRIX64, sizeof(tfmt)); break; + case 'o': ft_strlcat(tfmt, PRIo64, sizeof(tfmt)); break; + case 'u': ft_strlcat(tfmt, PRIu64, sizeof(tfmt)); break; + case 'd': ft_strlcat(tfmt, PRId64, sizeof(tfmt)); break; default: - case 'i': strcat(tfmt + fmtlen, PRIi64); break; + case 'i': ft_strlcat(tfmt, PRIi64, sizeof(tfmt)); break; } switch (base) { @@ -1082,11 +1078,11 @@ fobj__format_errmsg(const char* msg, fobj_err_kv_t *kvs) { "ident is too long in message \"%s\"", msg); ft_assert(formatdelim == NULL || closebrace - formatdelim <= 31, "format is too long in message \"%s\"", msg); - strncpy(ident, cur, identlen); + memcpy(ident, cur, identlen); ident[identlen] = 0; formatlen = formatdelim ? closebrace - (formatdelim+1) : 0; if (formatlen > 0) { - strncpy(format, formatdelim + 1, formatlen); + memcpy(format, formatdelim + 1, formatlen); } format[formatlen] = 0; kv = kvs; @@ -1293,11 +1289,11 @@ fobj_printkv(const char *fmt, ft_slc_fokv_t kvs) { "ident is too long in format \"%s\"", fmt); ft_assert(formatdelim == NULL || closebrace - formatdelim <= 31, "format is too long in format \"%s\"", fmt); - strncpy(ident, cur, identlen); + memcpy(ident, cur, identlen); ident[identlen] = 0; formatlen = formatdelim ? closebrace - (formatdelim+1) : 0; if (formatlen > 0) { - strncpy(format, formatdelim + 1, formatlen); + memcpy(format, formatdelim + 1, formatlen); } format[formatlen] = 0; i = ft_search_fokv(kvs.ptr, kvs.len, ident, fobj_fokv_cmpc); diff --git a/src/fu_util/impl/fo_impl2.h b/src/fu_util/impl/fo_impl2.h index 916714997..1cac933a0 100644 --- a/src/fu_util/impl/fo_impl2.h +++ b/src/fu_util/impl/fo_impl2.h @@ -2,10 +2,6 @@ #ifndef FOBJ_OBJ_PRIV2_H #define FOBJ_OBJ_PRIV2_H -#include -#include -#include - enum fobjStrType { FOBJ_STR_SMALL = 1, FOBJ_STR_UNOWNED, @@ -57,6 +53,7 @@ fobj_getstr(fobjStr *str) { return ft_str(str->ptr.ptr, str->ptr.len); default: ft_log(FT_FATAL, "Unknown fobj_str type %d", str->type); + return ft_str(NULL, 0); } } diff --git a/src/fu_util/impl/ft_impl.c b/src/fu_util/impl/ft_impl.c index 097171e86..1897e6bec 100644 --- a/src/fu_util/impl/ft_impl.c +++ b/src/fu_util/impl/ft_impl.c @@ -1,25 +1,35 @@ /* vim: set expandtab autoindent cindent ts=4 sw=4 sts=4 */ -#include +#include + #include -#include -#include #include +#include +#if !defined(WIN32) || defined(__MINGW64__) || defined(__MINGW32__) #include #include -#include +#else +#define WIN32_LEAN_AND_MEAN + +#include +#include +#include +#undef small +#include +#include +#include +#undef near +#endif + #ifdef HAVE_LIBBACKTRACE #include -#else +#if defined(__MINGW32__) || defined(__MINGW64__) +#include +#endif +#elif HAVE_EXECINFO_H #include #endif -#ifdef WIN32 -#define __thread __declspec(thread) -#else #include -#endif - -#include #define FT_LOG_MAX_FILES (1<<12) @@ -100,12 +110,22 @@ ft_strlcat(char *dest, const char* src, size_t dest_size) { ft_assert(dest_null, "destination has no zero byte"); if (dest_len < dest_size-1) { size_t cpy_len = dest_size - dest_len - 1; - strncpy(dest+dest_len, src, cpy_len); + cpy_len = ft_min(cpy_len, strlen(src)); + memcpy(dest+dest_len, src, cpy_len); dest[dest_len + cpy_len] = '\0'; } return dest_len + strlen(src); } +size_t +ft_strlcpy(char *dest, const char* src, size_t dest_size) { + size_t cpy_len = dest_size - 1; + cpy_len = ft_min(cpy_len, strlen(src)); + memcpy(dest, src, cpy_len); + dest[cpy_len] = '\0'; + return strlen(src); +} + ft_str_t ft_vasprintf(const char *fmt, va_list args) { ft_strbuf_t buf = ft_strbuf_zero(); @@ -302,9 +322,23 @@ ft__base_log_filename(const char *file) { static struct backtrace_state * volatile ft_btstate = NULL; static pthread_once_t ft_btstate_once = PTHREAD_ONCE_INIT; + +static void +ft_backtrace_err(void *data, const char *msg, int errnum) +{ + fprintf(stderr, "ft_backtrace_err %s %d\n", msg, errnum); +} + static void ft_backtrace_init(void) { - __atomic_store_n(&ft_btstate, backtrace_create_state(NULL, 0, NULL, NULL), + const char *app = NULL; +#if defined(__MINGW32__) || defined(__MINGW64__) + static char appbuf[2048] = {0}; + /* 2048 should be enough, don't check error */ + GetModuleFileNameA(0, appbuf, sizeof(appbuf)-1); + app = appbuf; +#endif + __atomic_store_n(&ft_btstate, backtrace_create_state(app, 1, ft_backtrace_err, NULL), __ATOMIC_RELEASE); } @@ -315,9 +349,9 @@ ft_backtrace_add(void *data, uintptr_t pc, struct ft_strbuf_t *buf = data; ssize_t sz; if (filename == NULL) - return 1; - return ft_strbuf_catf(buf, "\n%s:%-4d %s", - ft__truncate_log_filename(filename), lineno, function); + return 0; + return !ft_strbuf_catf(buf, "\n\t%s:%-4d\t%s", + ft__truncate_log_filename(filename), lineno, function ? function : "(unknown)"); } #endif @@ -355,9 +389,9 @@ ft_default_log(enum FT_LOG_LEVEL level, ft_source_position_t srcpos, #ifdef HAVE_LIBBACKTRACE if (__atomic_load_n(&ft_btstate, __ATOMIC_ACQUIRE) == NULL) pthread_once(&ft_btstate_once, ft_backtrace_init); - - backtrace_full(ft_btstate, 1, ft_backtrace_add, NULL, &buf); -#else + if (ft_btstate) + backtrace_full(ft_btstate, 0, ft_backtrace_add, NULL, &buf); +#elif defined(HAVE_EXECINFO_H) void *backtr[32] = {0}; char **syms = NULL; int i, n; @@ -414,7 +448,15 @@ ft__log_fatal(ft_source_position_t srcpos, const char* error, const char* ft__strerror(int eno, char *buf, size_t len) { -#if !_GNU_SOURCE && (_POSIX_C_SOURCE >= 200112L || _XOPEN_SOURCE >= 600) +#ifndef HAVE_STRERROR_R + char *sbuf = strerror(eno); + + if (sbuf == NULL) /* can this still happen anywhere? */ + return NULL; + /* To minimize thread-unsafety hazard, copy into caller's buffer */ + ft_strlcpy(buf, sbuf, len); + return buf; +#elif !_GNU_SOURCE && (_POSIX_C_SOURCE >= 200112L || _XOPEN_SOURCE >= 600) int saveno = errno; int e = strerror_r(eno, buf, len); if (e != 0) { diff --git a/src/fu_util/test/CMakeLists.txt b/src/fu_util/test/CMakeLists.txt index 06f86effc..05eea86c6 100644 --- a/src/fu_util/test/CMakeLists.txt +++ b/src/fu_util/test/CMakeLists.txt @@ -1,5 +1,10 @@ cmake_minimum_required(VERSION 3.11) +add_executable(fm fm.c) +add_executable(fm1 fm.c) + +target_compile_options(fm1 PRIVATE -DFM_USE_STRICT=1) + add_executable(array array.c) target_link_libraries(array fu_utils) @@ -20,6 +25,8 @@ target_link_libraries(obj1 fu_utils) enable_testing() +add_test(NAME fm COMMAND fm) +add_test(NAME fm1 COMMAND fm1) add_test(NAME array COMMAND array) add_test(NAME bsearch COMMAND bsearch) add_test(NAME fuprintf COMMAND fuprintf) diff --git a/src/fu_util/test/obj1.c b/src/fu_util/test/obj1.c index db5f9b0b0..faa7aafa5 100644 --- a/src/fu_util/test/obj1.c +++ b/src/fu_util/test/obj1.c @@ -285,6 +285,7 @@ int main(int argc, char** argv) { ft_assert(fobj_streq_c(strf, "Some scary things cost > $$12.4800 $$"), "String is '%s'", $tostr(strf)); + ft_log(FT_ERROR, "and try backtrace"); logf("BEFORE EXIT"); } diff --git a/src/fu_util/test/qsort/qsort.inc.c b/src/fu_util/test/qsort/qsort.inc.c index c801ae52a..2a53ae93f 100644 --- a/src/fu_util/test/qsort/qsort.inc.c +++ b/src/fu_util/test/qsort/qsort.inc.c @@ -20,7 +20,6 @@ Engineering a sort function; Jon Bentley and M. Douglas McIlroy; Software - Practice and Experience; Vol. 23 (11), 1249-1265, 1993. */ -#include #include #include #include From 5e1dd7ee4227739479a0e83092bd1ae8899fcc86 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Mon, 15 Aug 2022 15:40:14 +0300 Subject: [PATCH 09/17] [PBCKP-245] don't include libpq-int.h There's really no need to. But doing it pulls bad dependency. --- src/backup.c | 4 +++- src/catchup.c | 4 +++- src/pg_probackup.h | 1 - src/utils/file.c | 1 + 4 files changed, 7 insertions(+), 3 deletions(-) diff --git a/src/backup.c b/src/backup.c index 15f1a4d1c..0363d7721 100644 --- a/src/backup.c +++ b/src/backup.c @@ -2179,6 +2179,7 @@ check_external_for_tablespaces(parray *external_list, PGconn *backup_conn) PGresult *res; int i = 0; int j = 0; + int ntups; char *tablespace_path = NULL; char *query = "SELECT pg_catalog.pg_tablespace_location(oid) " "FROM pg_catalog.pg_tablespace " @@ -2190,7 +2191,8 @@ check_external_for_tablespaces(parray *external_list, PGconn *backup_conn) if (!res) elog(ERROR, "Failed to get list of tablespaces"); - for (i = 0; i < res->ntups; i++) + ntups = PQntuples(res); + for (i = 0; i < ntups; i++) { tablespace_path = PQgetvalue(res, i, 0); Assert (strlen(tablespace_path) > 0); diff --git a/src/catchup.c b/src/catchup.c index 0f6e36b13..f91d199b3 100644 --- a/src/catchup.c +++ b/src/catchup.c @@ -230,6 +230,7 @@ catchup_check_tablespaces_existance_in_tbsmapping(PGconn *conn) { PGresult *res; int i; + int ntups; char *tablespace_path = NULL; const char *linked_path = NULL; char *query = "SELECT pg_catalog.pg_tablespace_location(oid) " @@ -241,7 +242,8 @@ catchup_check_tablespaces_existance_in_tbsmapping(PGconn *conn) if (!res) elog(ERROR, "Failed to get list of tablespaces"); - for (i = 0; i < res->ntups; i++) + ntups = PQntuples(res); + for (i = 0; i < ntups; i++) { tablespace_path = PQgetvalue(res, i, 0); Assert (strlen(tablespace_path) > 0); diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 7ce455459..e8d1968e4 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -14,7 +14,6 @@ #include "postgres_fe.h" #include "libpq-fe.h" -#include "libpq-int.h" #include "access/xlog_internal.h" #include "utils/pg_crc.h" diff --git a/src/utils/file.c b/src/utils/file.c index 86977a19a..6e8b5e9f3 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -4,6 +4,7 @@ #include "pg_probackup.h" /* sys/stat.h must be included after pg_probackup.h (see problems with compilation for windows described in PGPRO-5750) */ #include +#include #include "file.h" #include "storage/checksum.h" From 3fa33c83dd05d6f166082d175f8a0f522009f66f Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Mon, 12 Sep 2022 16:20:03 +0300 Subject: [PATCH 10/17] [PBCKP-245] mingw: no need to define custom sleep/usleep Mingw defines usable them. --- src/utils/pgut.c | 14 -------------- src/utils/pgut.h | 5 ----- 2 files changed, 19 deletions(-) diff --git a/src/utils/pgut.c b/src/utils/pgut.c index f1b8da0b2..9a7b465ee 100644 --- a/src/utils/pgut.c +++ b/src/utils/pgut.c @@ -1061,20 +1061,6 @@ init_cancel_handler(void) SetConsoleCtrlHandler(consoleHandler, TRUE); } -int -sleep(unsigned int seconds) -{ - Sleep(seconds * 1000); - return 0; -} - -int -usleep(unsigned int usec) -{ - Sleep((usec + 999) / 1000); /* rounded up */ - return 0; -} - #undef select static int select_win32(int nfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds, const struct timeval * timeout) diff --git a/src/utils/pgut.h b/src/utils/pgut.h index 638259a3c..d8b5fec85 100644 --- a/src/utils/pgut.h +++ b/src/utils/pgut.h @@ -98,9 +98,4 @@ extern char *pgut_str_strip_trailing_filename(const char *filepath, const char * extern int wait_for_socket(int sock, struct timeval *timeout); extern int wait_for_sockets(int nfds, fd_set *fds, struct timeval *timeout); -#ifdef WIN32 -extern int sleep(unsigned int seconds); -extern int usleep(unsigned int usec); -#endif - #endif /* PGUT_H */ From 2fd47579a36db47c618d9824a309be02d8645e34 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Sun, 18 Sep 2022 11:11:48 +0300 Subject: [PATCH 11/17] [PBCKP-248] a bit more accurate ifdef just small refactoring. --- src/archive.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/archive.c b/src/archive.c index 0ebe5e504..782ef1b21 100644 --- a/src/archive.c +++ b/src/archive.c @@ -553,17 +553,17 @@ push_file_internal(const char *wal_file_name, const char *pg_xlog_dir, /* enable streaming compression */ if (is_compress) -#ifdef HAVE_LIBZ { +#ifdef HAVE_LIBZ pioFilter_i flt = pioGZCompressFilter(compress_level); err = pioCopy($reduce(pioWriteFlush, out), $reduce(pioRead, in), flt); - } - else #else - elog(ERROR, "Compression is requested, but not compiled it"); + elog(ERROR, "Compression is requested, but not compiled it"); #endif + } + else { err = pioCopy($reduce(pioWriteFlush, out), $reduce(pioRead, in)); From e180a360312bf4710eecd574a1169dc865080841 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Sun, 18 Sep 2022 11:11:57 +0300 Subject: [PATCH 12/17] [PBCKP-248] Use native mingw pthread Mingw pthread "implementation" works reasonably well. There's no need to use emulation. Mingw's gcc links with winpthread.dll automatically, no need to force flags. --- src/archive.c | 4 +- src/checkdb.c | 3 +- src/pg_probackup.c | 1 - src/pg_probackup.h | 8 ---- src/utils/logger.c | 6 +-- src/utils/thread.c | 92 +++------------------------------------------- src/utils/thread.h | 26 +++---------- 7 files changed, 19 insertions(+), 121 deletions(-) diff --git a/src/archive.c b/src/archive.c index 782ef1b21..693270fce 100644 --- a/src/archive.c +++ b/src/archive.c @@ -279,7 +279,7 @@ push_files(void *arg) int rc; archive_push_arg *args = (archive_push_arg *) arg; - my_thread_num = args->thread_num; + set_my_thread_num(args->thread_num); for (i = 0; i < parray_num(args->files); i++) { @@ -1011,7 +1011,7 @@ get_files(void *arg) char from_fullpath[MAXPGPATH]; archive_get_arg *args = (archive_get_arg *) arg; - my_thread_num = args->thread_num; + set_my_thread_num(args->thread_num); for (i = 0; i < parray_num(args->files); i++) { diff --git a/src/checkdb.c b/src/checkdb.c index 177fc3cc7..f344d29b4 100644 --- a/src/checkdb.c +++ b/src/checkdb.c @@ -293,7 +293,8 @@ check_indexes(void *arg) int i; check_indexes_arg *arguments = (check_indexes_arg *) arg; int n_indexes = 0; - my_thread_num = arguments->thread_num; + + set_my_thread_num(arguments->thread_num); if (arguments->index_list) n_indexes = parray_num(arguments->index_list); diff --git a/src/pg_probackup.c b/src/pg_probackup.c index b7308405c..a580fb3c2 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -75,7 +75,6 @@ bool no_color = false; bool show_color = true; bool is_archive_cmd = false; pid_t my_pid = 0; -__thread int my_thread_num = 1; bool progress = false; bool no_sync = false; time_t start_time = 0; diff --git a/src/pg_probackup.h b/src/pg_probackup.h index e8d1968e4..13d4b06f0 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -43,13 +43,6 @@ #include "pg_probackup_state.h" - -#ifdef WIN32 -#define __thread __declspec(thread) -#else -#include -#endif - /* Wrap the code that we're going to delete after refactoring in this define*/ #define REFACTORE_ME @@ -759,7 +752,6 @@ typedef struct StopBackupCallbackParams /* common options */ extern pid_t my_pid; -extern __thread int my_thread_num; extern int num_threads; extern bool stream_wal; extern bool show_color; diff --git a/src/utils/logger.c b/src/utils/logger.c index e58802e28..e49012368 100644 --- a/src/utils/logger.c +++ b/src/utils/logger.c @@ -344,7 +344,7 @@ elog_internal(int elevel, bool file_only, const char *message) if (format_file == JSON || format_console == JSON) { snprintf(str_pid_json, sizeof(str_pid_json), "%d", my_pid); - snprintf(str_thread_json, sizeof(str_thread_json), "[%d-1]", my_thread_num); + snprintf(str_thread_json, sizeof(str_thread_json), "[%d-1]", my_thread_num()); initPQExpBuffer(&show_buf); json_add_min(buf_json, JT_BEGIN_OBJECT); @@ -424,7 +424,7 @@ elog_internal(int elevel, bool file_only, const char *message) { char str_thread[64]; /* [Issue #213] fix pgbadger parsing */ - snprintf(str_thread, sizeof(str_thread), "[%d-1]:", my_thread_num); + snprintf(str_thread, sizeof(str_thread), "[%d-1]:", my_thread_num()); fprintf(stderr, "%s ", strfbuf); fprintf(stderr, "%s ", str_pid); @@ -498,7 +498,7 @@ elog_stderr(int elevel, const char *fmt, ...) strftime(strfbuf, sizeof(strfbuf), "%Y-%m-%d %H:%M:%S %Z", localtime(&log_time)); snprintf(str_pid, sizeof(str_pid), "%d", my_pid); - snprintf(str_thread_json, sizeof(str_thread_json), "[%d-1]", my_thread_num); + snprintf(str_thread_json, sizeof(str_thread_json), "[%d-1]", my_thread_num()); initPQExpBuffer(&show_buf); json_add_min(buf_json, JT_BEGIN_OBJECT); diff --git a/src/utils/thread.c b/src/utils/thread.c index 1c469bd29..1ad1e772e 100644 --- a/src/utils/thread.c +++ b/src/utils/thread.c @@ -17,97 +17,17 @@ */ bool thread_interrupted = false; -#ifdef WIN32 -DWORD main_tid = 0; -#else pthread_t main_tid = 0; -#endif -#ifdef WIN32 -#include - -typedef struct win32_pthread -{ - HANDLE handle; - void *(*routine) (void *); - void *arg; - void *result; -} win32_pthread; - -static long mutex_initlock = 0; - -static unsigned __stdcall -win32_pthread_run(void *arg) -{ - win32_pthread *th = (win32_pthread *)arg; - - th->result = th->routine(th->arg); - - return 0; -} - -int -pthread_create(pthread_t *thread, - pthread_attr_t *attr, - void *(*start_routine) (void *), - void *arg) -{ - int save_errno; - win32_pthread *th; - - th = (win32_pthread *)pg_malloc(sizeof(win32_pthread)); - th->routine = start_routine; - th->arg = arg; - th->result = NULL; - - th->handle = (HANDLE)_beginthreadex(NULL, 0, win32_pthread_run, th, 0, NULL); - if (th->handle == NULL) - { - save_errno = errno; - free(th); - return save_errno; - } - - *thread = th; - return 0; -} +static __thread int my_thread_num_var = 1; int -pthread_join(pthread_t th, void **thread_return) +my_thread_num(void) { - if (th == NULL || th->handle == NULL) - return errno = EINVAL; - - if (WaitForSingleObject(th->handle, INFINITE) != WAIT_OBJECT_0) - { - _dosmaperr(GetLastError()); - return errno; - } - - if (thread_return) - *thread_return = th->result; - - CloseHandle(th->handle); - free(th); - return 0; + return my_thread_num_var; } -#endif /* WIN32 */ - -int -pthread_lock(pthread_mutex_t *mp) +void +set_my_thread_num(int th) { -#ifdef WIN32 - if (*mp == NULL) - { - while (InterlockedExchange(&mutex_initlock, 1) == 1) - /* loop, another thread own the lock */ ; - if (*mp == NULL) - { - if (pthread_mutex_init(mp, NULL)) - return -1; - } - InterlockedExchange(&mutex_initlock, 0); - } -#endif - return pthread_mutex_lock(mp); + my_thread_num_var = th; } diff --git a/src/utils/thread.h b/src/utils/thread.h index 2eaa5fb45..a6c58f70e 100644 --- a/src/utils/thread.h +++ b/src/utils/thread.h @@ -10,32 +10,18 @@ #ifndef PROBACKUP_THREAD_H #define PROBACKUP_THREAD_H -#ifdef WIN32 -#include "postgres_fe.h" -#include "port/pthread-win32.h" - -/* Use native win32 threads on Windows */ -typedef struct win32_pthread *pthread_t; -typedef int pthread_attr_t; - -#define PTHREAD_MUTEX_INITIALIZER NULL //{ NULL, 0 } -#define PTHREAD_ONCE_INIT false +#if defined(WIN32) && !(defined(__MINGW64__) || defined(__MINGW32__) || defined(HAVE_PTHREAD)) +#error "Windows build supports only 'pthread' threading" +#endif -extern int pthread_create(pthread_t *thread, pthread_attr_t *attr, void *(*start_routine) (void *), void *arg); -extern int pthread_join(pthread_t th, void **thread_return); -#else /* Use platform-dependent pthread capability */ #include -#endif - -#ifdef WIN32 -extern DWORD main_tid; -#else extern pthread_t main_tid; -#endif +#define pthread_lock(mp) pthread_mutex_lock(mp) extern bool thread_interrupted; -extern int pthread_lock(pthread_mutex_t *mp); +int my_thread_num(void); +void set_my_thread_num(int); #endif /* PROBACKUP_THREAD_H */ From 3acd1a72b421169ce316d0ed9301b94f557a6f14 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 13 Sep 2022 13:21:13 +0300 Subject: [PATCH 13/17] [PBCKP-245] don't include sys/stat.h so often Postgresql's 'port' library defines custom 'stat' and 'struct stat'. It conflicts with system one in MinGW. We had to include either one or another, but not both. It is easier to do if we include 'sys/stat.h' only once and only in non-win32 environment. --- src/backup.c | 1 - src/catalog.c | 1 - src/catchup.c | 1 - src/checkdb.c | 1 - src/data.c | 1 - src/dir.c | 1 - src/fetch.c | 1 - src/init.c | 1 - src/merge.c | 1 - src/pg_probackup.c | 2 -- src/restore.c | 1 - src/show.c | 1 - src/utils/file.c | 2 -- src/utils/file.h | 6 +++++- src/validate.c | 1 - 15 files changed, 5 insertions(+), 17 deletions(-) diff --git a/src/backup.c b/src/backup.c index 0363d7721..f28ff1abc 100644 --- a/src/backup.c +++ b/src/backup.c @@ -17,7 +17,6 @@ #include "pgtar.h" #include "streamutil.h" -#include #include #include diff --git a/src/catalog.c b/src/catalog.c index b4be159d1..212add4ca 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -13,7 +13,6 @@ #include #include -#include #include #include "utils/file.h" diff --git a/src/catchup.c b/src/catchup.c index f91d199b3..fa126b884 100644 --- a/src/catchup.c +++ b/src/catchup.c @@ -17,7 +17,6 @@ #include "pgtar.h" #include "streamutil.h" -#include #include #include diff --git a/src/checkdb.c b/src/checkdb.c index f344d29b4..f1a5fcf78 100644 --- a/src/checkdb.c +++ b/src/checkdb.c @@ -16,7 +16,6 @@ #include "pg_probackup.h" -#include #include #include diff --git a/src/data.c b/src/data.c index 17ae4b91a..6308b110c 100644 --- a/src/data.c +++ b/src/data.c @@ -16,7 +16,6 @@ #include "utils/file.h" #include -#include #ifdef HAVE_LIBZ #include diff --git a/src/dir.c b/src/dir.c index 0bcd60169..8704a8d2f 100644 --- a/src/dir.c +++ b/src/dir.c @@ -19,7 +19,6 @@ #include "catalog/pg_tablespace.h" #include -#include #include #include "utils/configuration.h" diff --git a/src/fetch.c b/src/fetch.c index bbea7bffe..980bf531b 100644 --- a/src/fetch.c +++ b/src/fetch.c @@ -10,7 +10,6 @@ #include "pg_probackup.h" -#include #include /* diff --git a/src/init.c b/src/init.c index 41ee2e3c9..511256aa3 100644 --- a/src/init.c +++ b/src/init.c @@ -11,7 +11,6 @@ #include "pg_probackup.h" #include -#include /* * Initialize backup catalog. diff --git a/src/merge.c b/src/merge.c index 03698e92d..62ce3c300 100644 --- a/src/merge.c +++ b/src/merge.c @@ -9,7 +9,6 @@ #include "pg_probackup.h" -#include #include #include "utils/thread.h" diff --git a/src/pg_probackup.c b/src/pg_probackup.c index a580fb3c2..dda5cf65a 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -47,8 +47,6 @@ #include "streamutil.h" #include "utils/file.h" -#include - #include "utils/configuration.h" #include "utils/thread.h" #include diff --git a/src/restore.c b/src/restore.c index 28a79f1ed..7b37b2306 100644 --- a/src/restore.c +++ b/src/restore.c @@ -12,7 +12,6 @@ #include "access/timeline.h" -#include #include #include "utils/thread.h" diff --git a/src/show.c b/src/show.c index db8a9e225..46002198d 100644 --- a/src/show.c +++ b/src/show.c @@ -13,7 +13,6 @@ #include #include #include -#include #include "utils/json.h" diff --git a/src/utils/file.c b/src/utils/file.c index 6e8b5e9f3..d4282b8fc 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -2,8 +2,6 @@ #include #include "pg_probackup.h" -/* sys/stat.h must be included after pg_probackup.h (see problems with compilation for windows described in PGPRO-5750) */ -#include #include #include "file.h" diff --git a/src/utils/file.h b/src/utils/file.h index 7fd1e7919..79e86ee20 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -3,7 +3,9 @@ #include "storage/bufpage.h" #include +#ifndef WIN32 #include +#endif #include #ifdef HAVE_LIBZ @@ -223,11 +225,13 @@ fobj_iface(pioWriteFlush); fobj_iface(pioWriteCloser); fobj_iface(pioReadCloser); +typedef struct stat stat_t; + // Drive #define mth__pioOpen pioFile_i, (path_t, path), (int, flags), \ (int, permissions), (err_i *, err) #define mth__pioOpen__optional() (permissions, FILE_PERMISSION) -#define mth__pioStat struct stat, (path_t, path), (bool, follow_symlink), \ +#define mth__pioStat stat_t, (path_t, path), (bool, follow_symlink), \ (err_i *, err) #define mth__pioRemove err_i, (path_t, path), (bool, missing_ok) #define mth__pioRename err_i, (path_t, old_path), (path_t, new_path) diff --git a/src/validate.c b/src/validate.c index 79a450ac8..8e402a1c5 100644 --- a/src/validate.c +++ b/src/validate.c @@ -10,7 +10,6 @@ #include "pg_probackup.h" -#include #include #include "utils/thread.h" From ca6e3942a5ad6795fadc5da444000b341b9ee824 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 13 Sep 2022 13:14:29 +0300 Subject: [PATCH 14/17] [PBCKP-245] make Makefile more portable It is hard to consider all 'echo in makefile shell' variants. Mingw's one doesn't process escape sequence. That is why it is better to use raw TAB symbol. --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 21553f97c..2c126af29 100644 --- a/Makefile +++ b/Makefile @@ -97,7 +97,7 @@ borrowed.mk: $(firstword $(MAKEFILE_LIST)) $(file >$@,# This file is autogenerated. Do not edit!) $(foreach borrowed_file, $(BORROWED_H_SRC) $(BORROWED_C_SRC), \ $(file >>$@,$(addprefix $(BORROW_DIR)/, $(notdir $(borrowed_file))): | $(CURDIR)/$(BORROW_DIR)/ $(realpath $(top_srcdir)/$(borrowed_file))) \ - $(file >>$@,$(shell echo "\t"'$$(LN_S) $(realpath $(top_srcdir)/$(borrowed_file)) $$@')) \ + $(file >>$@,$(shell echo " "'$$(LN_S) $(realpath $(top_srcdir)/$(borrowed_file)) $$@')) \ ) include borrowed.mk From 085b99fc7c09cbf59cbddd2e485e8eebfe00277d Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 15 Sep 2022 11:18:39 +0300 Subject: [PATCH 15/17] [PBCKP-245] more mingw compatibility - pid_t is 64bit in mingw. Lets cast it to long long in most places on I/O. - int64 should be casted to long long as well - size_t should be printed as %zu/%zd - stat functino is imported from pgport, and there's no need to redefine it again. - no need to redeclare `__thread` since gcc work with it well. - arguments and types in launch agent. --- src/archive.c | 2 +- src/catalog.c | 36 ++++++++++++++--------------- src/catchup.c | 14 ++++++------ src/data.c | 28 +++++++++++------------ src/merge.c | 2 +- src/restore.c | 16 ++++++------- src/show.c | 8 +++---- src/utils/file.c | 56 ++++++++++------------------------------------ src/utils/logger.c | 8 +++---- src/utils/remote.c | 18 ++++++--------- src/validate.c | 4 ++-- 11 files changed, 78 insertions(+), 114 deletions(-) diff --git a/src/archive.c b/src/archive.c index 693270fce..a930ff557 100644 --- a/src/archive.c +++ b/src/archive.c @@ -148,7 +148,7 @@ do_archive_push(InstanceState *instanceState, InstanceConfig *instance, char *pg n_threads = parray_num(batch_files); elog(INFO, "pg_probackup archive-push WAL file: %s, " - "threads: %i/%i, batch: %lu/%i, compression: %s", + "threads: %i/%i, batch: %zu/%i, compression: %s", wal_file_name, n_threads, num_threads, parray_num(batch_files), batch_size, is_compress ? "zlib" : "none"); diff --git a/src/catalog.c b/src/catalog.c index 212add4ca..5c44a9940 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -422,8 +422,8 @@ grab_excl_lock_file(const char *root_dir, const char *backup_id, bool strict) /* complain every fifth interval */ if ((ntries % LOG_FREQ) == 0) { - elog(WARNING, "Process %d is using backup %s, and is still running", - encoded_pid, backup_id); + elog(WARNING, "Process %lld is using backup %s, and is still running", + (long long)encoded_pid, backup_id); elog(WARNING, "Waiting %u seconds on exclusive lock for backup %s", ntries, backup_id); @@ -437,8 +437,8 @@ grab_excl_lock_file(const char *root_dir, const char *backup_id, bool strict) else { if (errno == ESRCH) - elog(WARNING, "Process %d which used backup %s no longer exists", - encoded_pid, backup_id); + elog(WARNING, "Process %lld which used backup %s no longer exists", + (long long)encoded_pid, backup_id); else elog(ERROR, "Failed to send signal 0 to a process %d: %s", encoded_pid, strerror(errno)); @@ -467,7 +467,7 @@ grab_excl_lock_file(const char *root_dir, const char *backup_id, bool strict) /* * Successfully created the file, now fill it. */ - snprintf(buffer, sizeof(buffer), "%d\n", my_pid); + snprintf(buffer, sizeof(buffer), "%lld\n", (long long)my_pid); errno = 0; if (fio_write(fd, buffer, strlen(buffer)) != strlen(buffer)) @@ -574,8 +574,8 @@ wait_shared_owners(pgBackup *backup) /* complain from time to time */ if ((ntries % LOG_FREQ) == 0) { - elog(WARNING, "Process %d is using backup %s in shared mode, and is still running", - encoded_pid, base36enc(backup->start_time)); + elog(WARNING, "Process %lld is using backup %s in shared mode, and is still running", + (long long)encoded_pid, base36enc(backup->start_time)); elog(WARNING, "Waiting %u seconds on lock for backup %s", ntries, base36enc(backup->start_time)); @@ -587,8 +587,8 @@ wait_shared_owners(pgBackup *backup) continue; } else if (errno != ESRCH) - elog(ERROR, "Failed to send signal 0 to a process %d: %s", - encoded_pid, strerror(errno)); + elog(ERROR, "Failed to send signal 0 to a process %lld: %s", + (long long)encoded_pid, strerror(errno)); /* locker is dead */ break; @@ -605,8 +605,8 @@ wait_shared_owners(pgBackup *backup) /* some shared owners are still alive */ if (ntries <= 0) { - elog(WARNING, "Cannot to lock backup %s in exclusive mode, because process %u owns shared lock", - base36enc(backup->start_time), encoded_pid); + elog(WARNING, "Cannot to lock backup %s in exclusive mode, because process %llu owns shared lock", + base36enc(backup->start_time), (long long)encoded_pid); return 1; } @@ -661,11 +661,11 @@ grab_shared_lock_file(pgBackup *backup) * Somebody is still using this backup in shared mode, * copy this pid into a new file. */ - buffer_len += snprintf(buffer+buffer_len, 4096, "%u\n", encoded_pid); + buffer_len += snprintf(buffer+buffer_len, 4096, "%llu\n", (long long)encoded_pid); } else if (errno != ESRCH) - elog(ERROR, "Failed to send signal 0 to a process %d: %s", - encoded_pid, strerror(errno)); + elog(ERROR, "Failed to send signal 0 to a process %lld: %s", + (long long)encoded_pid, strerror(errno)); } if (fp_in) @@ -685,7 +685,7 @@ grab_shared_lock_file(pgBackup *backup) } /* add my own pid */ - buffer_len += snprintf(buffer+buffer_len, sizeof(buffer), "%u\n", my_pid); + buffer_len += snprintf(buffer+buffer_len, sizeof(buffer), "%llu\n", (long long)my_pid); /* write out the collected PIDs to temp lock file */ fwrite(buffer, 1, buffer_len, fp_out); @@ -783,11 +783,11 @@ release_shared_lock_file(const char *backup_dir) * Somebody is still using this backup in shared mode, * copy this pid into a new file. */ - buffer_len += snprintf(buffer+buffer_len, 4096, "%u\n", encoded_pid); + buffer_len += snprintf(buffer+buffer_len, 4096, "%llu\n", (long long)encoded_pid); } else if (errno != ESRCH) - elog(ERROR, "Failed to send signal 0 to a process %d: %s", - encoded_pid, strerror(errno)); + elog(ERROR, "Failed to send signal 0 to a process %lld: %s", + (long long)encoded_pid, strerror(errno)); } if (ferror(fp_in)) diff --git a/src/catchup.c b/src/catchup.c index fa126b884..08bc039f9 100644 --- a/src/catchup.c +++ b/src/catchup.c @@ -137,8 +137,8 @@ catchup_preflight_checks(PGNodeInfo *source_node_info, PGconn *source_conn, } else if (pid > 1) /* postmaster is up */ { - elog(ERROR, "Postmaster with pid %u is running in destination directory \"%s\"", - pid, dest_pgdata); + elog(ERROR, "Postmaster with pid %lld is running in destination directory \"%s\"", + (long long)pid, dest_pgdata); } } @@ -160,15 +160,15 @@ catchup_preflight_checks(PGNodeInfo *source_node_info, PGconn *source_conn, source_id = get_system_identifier(FIO_DB_HOST, source_pgdata, false); /* same as instance_config.system_identifier */ if (source_conn_id != source_id) - elog(ERROR, "Database identifiers mismatch: we connected to DB id %lu, but in \"%s\" we found id %lu", - source_conn_id, source_pgdata, source_id); + elog(ERROR, "Database identifiers mismatch: we connected to DB id %llu, but in \"%s\" we found id %llu", + (long long)source_conn_id, source_pgdata, (long long)source_id); if (current.backup_mode != BACKUP_MODE_FULL) { dest_id = get_system_identifier(FIO_LOCAL_HOST, dest_pgdata, false); if (source_conn_id != dest_id) - elog(ERROR, "Database identifiers mismatch: we connected to DB id %lu, but in \"%s\" we found id %lu", - source_conn_id, dest_pgdata, dest_id); + elog(ERROR, "Database identifiers mismatch: we connected to DB id %llu, but in \"%s\" we found id %llu", + (long long)source_conn_id, dest_pgdata, (long long)dest_id); } } @@ -439,7 +439,7 @@ catchup_thread_runner(void *arg) if (file->write_size == BYTES_INVALID) { - elog(LOG, "Skipping the unchanged file: \"%s\", read %li bytes", from_fullpath, file->read_size); + elog(LOG, "Skipping the unchanged file: \"%s\", read %zu bytes", from_fullpath, file->read_size); continue; } diff --git a/src/data.c b/src/data.c index 6308b110c..fdc7a8918 100644 --- a/src/data.c +++ b/src/data.c @@ -204,12 +204,12 @@ get_header_errormsg(Page page, char **errormsg) if (PageGetPageSize(phdr) != BLCKSZ) snprintf(*errormsg, ERRMSG_MAX_LEN, "page header invalid, " - "page size %lu is not equal to block size %u", + "page size %zu is not equal to block size %u", PageGetPageSize(phdr), BLCKSZ); else if (phdr->pd_lower < SizeOfPageHeaderData) snprintf(*errormsg, ERRMSG_MAX_LEN, "page header invalid, " - "pd_lower %i is less than page header size %lu", + "pd_lower %i is less than page header size %zu", phdr->pd_lower, SizeOfPageHeaderData); else if (phdr->pd_lower > phdr->pd_upper) @@ -229,7 +229,7 @@ get_header_errormsg(Page page, char **errormsg) else if (phdr->pd_special != MAXALIGN(phdr->pd_special)) snprintf(*errormsg, ERRMSG_MAX_LEN, "page header invalid, " - "pd_special %i is misaligned, expected %lu", + "pd_special %i is misaligned, expected %zu", phdr->pd_special, MAXALIGN(phdr->pd_special)); else if (phdr->pd_flags & ~PD_VALID_FLAG_BITS) @@ -1196,7 +1196,7 @@ restore_data_file_internal(FILE *in, FILE *out, pgFile *file, uint32 backup_vers datapagemap_add(map, blknum); } - elog(LOG, "Copied file \"%s\": %lu bytes", from_fullpath, write_len); + elog(LOG, "Copied file \"%s\": %zu bytes", from_fullpath, write_len); return write_len; } @@ -1240,7 +1240,7 @@ restore_non_data_file_internal(FILE *in, FILE *out, pgFile *file, pg_free(buf); - elog(LOG, "Copied file \"%s\": %lu bytes", from_fullpath, file->write_size); + elog(LOG, "Copied file \"%s\": %llu bytes", from_fullpath, (long long)file->write_size); } size_t @@ -1317,9 +1317,9 @@ restore_non_data_file(parray *parent_chain, pgBackup *dest_backup, elog(ERROR, "Failed to locate a full copy of non-data file \"%s\"", to_fullpath); if (tmp_file->write_size <= 0) - elog(ERROR, "Full copy of non-data file has invalid size: %li. " + elog(ERROR, "Full copy of non-data file has invalid size: %lli. " "Metadata corruption in backup %s in file: \"%s\"", - tmp_file->write_size, base36enc(tmp_backup->start_time), + (long long)tmp_file->write_size, base36enc(tmp_backup->start_time), to_fullpath); /* incremental restore */ @@ -2031,11 +2031,11 @@ get_page_header(FILE *in, const char *fullpath, BackupPageHeader* bph, return false; /* EOF found */ else if (read_len != 0 && feof(in)) elog(ERROR, - "Odd size page found at offset %ld of \"%s\"", - ftello(in), fullpath); + "Odd size page found at offset %lld of \"%s\"", + (long long)ftello(in), fullpath); else - elog(ERROR, "Cannot read header at offset %ld of \"%s\": %s", - ftello(in), fullpath, strerror(errno)); + elog(ERROR, "Cannot read header at offset %lld of \"%s\": %s", + (long long)ftello(in), fullpath, strerror(errno)); } /* In older versions < 2.4.0, when crc for file was calculated, header was @@ -2335,8 +2335,8 @@ copy_pages(const char *to_fullpath, const char *from_fullpath, to_fullpath, strerror(errno)); if (ftruncate(fileno(out), file->size) == -1) - elog(ERROR, "Cannot ftruncate file \"%s\" to size %lu: %s", - to_fullpath, file->size, strerror(errno)); + elog(ERROR, "Cannot ftruncate file \"%s\" to size %llu: %s", + to_fullpath, (long long)file->size, strerror(errno)); } } @@ -2443,7 +2443,7 @@ get_data_file_headers(HeaderMap *hdr_map, pgFile *file, uint32 backup_version, b if (hdr_crc != file->hdr_crc) { elog(strict ? ERROR : WARNING, "Header map for file \"%s\" crc mismatch \"%s\" " - "offset: %llu, len: %lu, current: %u, expected: %u", + "offset: %llu, len: %zu, current: %u, expected: %u", file->rel_path, hdr_map->path, file->hdr_off, read_len, hdr_crc, file->hdr_crc); goto cleanup; } diff --git a/src/merge.c b/src/merge.c index 62ce3c300..f64b72611 100644 --- a/src/merge.c +++ b/src/merge.c @@ -957,7 +957,7 @@ merge_files(void *arg) if (S_ISDIR(dest_file->mode)) goto done; - elog(progress ? INFO : LOG, "Progress: (%d/%lu). Merging file \"%s\"", + elog(progress ? INFO : LOG, "Progress: (%d/%zu). Merging file \"%s\"", i + 1, n_files, dest_file->rel_path); if (dest_file->is_datafile && !dest_file->is_cfs) diff --git a/src/restore.c b/src/restore.c index 7b37b2306..ce0604b0a 100644 --- a/src/restore.c +++ b/src/restore.c @@ -1104,14 +1104,14 @@ static void * restore_files(void *arg) { int i; - uint64 n_files; + size_t n_files; char to_fullpath[MAXPGPATH]; FILE *out = NULL; char *out_buf = pgut_malloc(STDIO_BUFSIZE); restore_files_arg *arguments = (restore_files_arg *) arg; - n_files = (unsigned long) parray_num(arguments->dest_files); + n_files = parray_num(arguments->dest_files); for (i = 0; i < parray_num(arguments->dest_files); i++) { @@ -1132,7 +1132,7 @@ restore_files(void *arg) if (interrupted || thread_interrupted) elog(ERROR, "Interrupted during restore"); - elog(progress ? INFO : LOG, "Progress: (%d/%lu). Restore file \"%s\"", + elog(progress ? INFO : LOG, "Progress: (%d/%zu). Restore file \"%s\"", i + 1, n_files, dest_file->rel_path); /* Only files from pgdata can be skipped by partial restore */ @@ -2173,8 +2173,8 @@ check_incremental_compatibility(const char *pgdata, uint64 system_identifier, } else if (pid > 1) /* postmaster is up */ { - elog(WARNING, "Postmaster with pid %u is running in destination directory \"%s\"", - pid, pgdata); + elog(WARNING, "Postmaster with pid %llu is running in destination directory \"%s\"", + (long long)pid, pgdata); success = false; postmaster_is_up = true; } @@ -2197,9 +2197,9 @@ check_incremental_compatibility(const char *pgdata, uint64 system_identifier, if (system_id_pgdata == instance_config.system_identifier) system_id_match = true; else - elog(WARNING, "Backup catalog was initialized for system id %lu, " - "but destination directory system id is %lu", - system_identifier, system_id_pgdata); + elog(WARNING, "Backup catalog was initialized for system id %llu, " + "but destination directory system id is %llu", + (long long)system_identifier, (long long)system_id_pgdata); /* * TODO: maybe there should be some other signs, pointing to pg_control diff --git a/src/show.c b/src/show.c index 46002198d..5440e28a2 100644 --- a/src/show.c +++ b/src/show.c @@ -910,7 +910,7 @@ show_archive_plain(const char *instance_name, uint32 xlog_seg_size, cur++; /* N files */ - snprintf(row->n_segments, lengthof(row->n_segments), "%lu", + snprintf(row->n_segments, lengthof(row->n_segments), "%zu", tlinfo->n_xlog_files); widths[cur] = Max(widths[cur], strlen(row->n_segments)); cur++; @@ -930,7 +930,7 @@ show_archive_plain(const char *instance_name, uint32 xlog_seg_size, cur++; /* N backups */ - snprintf(row->n_backups, lengthof(row->n_backups), "%lu", + snprintf(row->n_backups, lengthof(row->n_backups), "%zu", tlinfo->backups?parray_num(tlinfo->backups):0); widths[cur] = Max(widths[cur], strlen(row->n_backups)); cur++; @@ -1086,10 +1086,10 @@ show_archive_json(const char *instance_name, uint32 xlog_seg_size, json_add_value(buf, "max-segno", tmp_buf, json_level, true); json_add_key(buf, "n-segments", json_level); - appendPQExpBuffer(buf, "%lu", tlinfo->n_xlog_files); + appendPQExpBuffer(buf, "%zu", tlinfo->n_xlog_files); json_add_key(buf, "size", json_level); - appendPQExpBuffer(buf, "%lu", tlinfo->size); + appendPQExpBuffer(buf, "%zu", tlinfo->size); json_add_key(buf, "zratio", json_level); diff --git a/src/utils/file.c b/src/utils/file.c index d4282b8fc..fc97ab810 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -77,8 +77,8 @@ typedef struct #define fio_fileno(f) (((size_t)f - 1) | FIO_PIPE_MARKER) #if defined(WIN32) -#undef open(a, b, c) -#undef fopen(a, b) +#undef open +#undef fopen #endif void @@ -138,43 +138,6 @@ fio_is_remote_fd(int fd) return (fd & FIO_PIPE_MARKER) != 0; } -#ifdef WIN32 - -#undef stat - -/* - * The stat() function in win32 is not guaranteed to update the st_size - * field when run. So we define our own version that uses the Win32 API - * to update this field. - */ -static int -fio_safestat(const char *path, struct stat *buf) -{ - int r; - WIN32_FILE_ATTRIBUTE_DATA attr; - - r = stat(path, buf); - if (r < 0) - return r; - - if (!GetFileAttributesEx(path, GetFileExInfoStandard, &attr)) - { - errno = ENOENT; - return -1; - } - - /* - * XXX no support for large files here, but we don't do that in general on - * Win32 yet. - */ - buf->st_size = attr.nFileSizeLow; - - return 0; -} - -#define stat(x, y) fio_safestat(x, y) -#endif /* WIN32 */ - #ifdef WIN32 /* TODO: use real pread on Linux */ static ssize_t @@ -2202,10 +2165,10 @@ fio_copy_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, elog(ERROR, "Cannot change mode of \"%s\": %s", to_fullpath, strerror(errno)); - elog(VERBOSE, "ftruncate file \"%s\" to size %lu", + elog(VERBOSE, "ftruncate file \"%s\" to size %zu", to_fullpath, file->size); if (fio_ftruncate(out, file->size) == -1) - elog(ERROR, "Cannot ftruncate file \"%s\" to size %lu: %s", + elog(ERROR, "Cannot ftruncate file \"%s\" to size %zu: %s", to_fullpath, file->size, strerror(errno)); if (!fio_is_remote_file(out)) @@ -3099,6 +3062,7 @@ local_check_postmaster(const char *pgdata) { FILE *fp; pid_t pid; + long long lpid; char pid_file[MAXPGPATH]; join_path_components(pid_file, pgdata, "postmaster.pid"); @@ -3114,7 +3078,11 @@ local_check_postmaster(const char *pgdata) pid_file, strerror(errno)); } - if (fscanf(fp, "%i", &pid) != 1) + if (fscanf(fp, "%lli", &lpid) == 1) + { + pid = lpid; + } + else { /* something is wrong with the file content */ pid = 1; @@ -3128,8 +3096,8 @@ local_check_postmaster(const char *pgdata) if (errno == ESRCH) pid = 0; else - elog(ERROR, "Failed to send signal 0 to a process %d: %s", - pid, strerror(errno)); + elog(ERROR, "Failed to send signal 0 to a process %lld: %s", + (long long)pid, strerror(errno)); } } diff --git a/src/utils/logger.c b/src/utils/logger.c index e49012368..ec9194ec0 100644 --- a/src/utils/logger.c +++ b/src/utils/logger.c @@ -343,7 +343,7 @@ elog_internal(int elevel, bool file_only, const char *message) if (format_file == JSON || format_console == JSON) { - snprintf(str_pid_json, sizeof(str_pid_json), "%d", my_pid); + snprintf(str_pid_json, sizeof(str_pid_json), "%lld", (long long)my_pid); snprintf(str_thread_json, sizeof(str_thread_json), "[%d-1]", my_thread_num()); initPQExpBuffer(&show_buf); @@ -357,7 +357,7 @@ elog_internal(int elevel, bool file_only, const char *message) json_add_min(buf_json, JT_END_OBJECT); } - snprintf(str_pid, sizeof(str_pid), "[%d]:", my_pid); + snprintf(str_pid, sizeof(str_pid), "[%lld]:", (long long)my_pid); /* * Write message to log file. @@ -497,7 +497,7 @@ elog_stderr(int elevel, const char *fmt, ...) { strftime(strfbuf, sizeof(strfbuf), "%Y-%m-%d %H:%M:%S %Z", localtime(&log_time)); - snprintf(str_pid, sizeof(str_pid), "%d", my_pid); + snprintf(str_pid, sizeof(str_pid), "%lld", (long long)my_pid); snprintf(str_thread_json, sizeof(str_thread_json), "[%d-1]", my_thread_num()); initPQExpBuffer(&show_buf); @@ -971,7 +971,7 @@ open_logfile(FILE **file, const char *filename_format) elog_stderr(ERROR, "cannot open rotation file \"%s\": %s", control, strerror(errno)); - fprintf(control_file, "%ld", timestamp); + fprintf(control_file, "%lld", (long long)timestamp); fclose(control_file); } diff --git a/src/utils/remote.c b/src/utils/remote.c index 3286052a5..bceccc26a 100644 --- a/src/utils/remote.c +++ b/src/utils/remote.c @@ -5,12 +5,6 @@ #include #include -#ifdef WIN32 -#define __thread __declspec(thread) -#else -#include -#endif - #include "pg_probackup.h" #include "file.h" @@ -113,14 +107,14 @@ bool launch_agent(void) char cmd[MAX_CMDLINE_LENGTH]; char* ssh_argv[MAX_CMDLINE_OPTIONS]; int ssh_argc; - int outfd[2]; - int infd[2]; - int errfd[2]; + int outfd[2] = {0, 0}; + int infd[2] = {0, 0}; + int errfd[2] = {0, 0}; int agent_version; ssh_argc = 0; #ifdef WIN32 - ssh_argv[ssh_argc++] = PROGRAM_NAME_FULL; + ssh_argv[ssh_argc++] = (char *) PROGRAM_NAME_FULL; ssh_argv[ssh_argc++] = "ssh"; ssh_argc += 2; /* reserve space for pipe descriptors */ #endif @@ -198,7 +192,9 @@ bool launch_agent(void) ssh_argv[2] = psprintf("%d", outfd[0]); ssh_argv[3] = psprintf("%d", infd[1]); { - intptr_t pid = _spawnvp(_P_NOWAIT, ssh_argv[0], ssh_argv); + intptr_t pid = _spawnvp(_P_NOWAIT, + (const char*)ssh_argv[0], + (const char * const *) ssh_argv); if (pid < 0) return false; child_pid = GetProcessId((HANDLE)pid); diff --git a/src/validate.c b/src/validate.c index 8e402a1c5..f2b99e3a9 100644 --- a/src/validate.c +++ b/src/validate.c @@ -312,8 +312,8 @@ pgBackupValidateFiles(void *arg) if (file->write_size != st.st_size) { - elog(WARNING, "Invalid size of backup file \"%s\" : " INT64_FORMAT ". Expected %lu", - file_fullpath, (unsigned long) st.st_size, file->write_size); + elog(WARNING, "Invalid size of backup file \"%s\" : %lld. Expected %lld", + file_fullpath, (long long) st.st_size, (long long)file->write_size); arguments->corrupted = true; break; } From 0e85c52a983e62276b9ab985a17da2d42a7d562a Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Mon, 19 Sep 2022 21:47:38 +0300 Subject: [PATCH 16/17] [PBCKP-245] mingw: ucrt strftime is good enough msvcrt's strftime is quite limited, and we had to use pg_strftime. On the other hand, ucrt's one is capable for most of SU (Single UNIX) extensions, so we could safely use it. That means, we restrict windows port to MinGW64 UCRT environment. --- src/pg_probackup.h | 4 ++++ src/utils/logger.c | 4 ---- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 13d4b06f0..dc7effd9d 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -43,6 +43,10 @@ #include "pg_probackup_state.h" +#if defined(WIN32) && !(defined(_UCRT) && defined(__MINGW64__)) +#error Windows port requires compilation in MinGW64 UCRT environment +#endif + /* Wrap the code that we're going to delete after refactoring in this define*/ #define REFACTORE_ME diff --git a/src/utils/logger.c b/src/utils/logger.c index ec9194ec0..57b96e020 100644 --- a/src/utils/logger.c +++ b/src/utils/logger.c @@ -811,11 +811,7 @@ logfile_getname(const char *format, time_t timestamp) len = strlen(filename); /* Treat log_filename as a strftime pattern */ -#ifdef WIN32 - if (pg_strftime(filename + len, MAXPGPATH - len, format, tm) <= 0) -#else if (strftime(filename + len, MAXPGPATH - len, format, tm) <= 0) -#endif elog_stderr(ERROR, "strftime(%s) failed: %s", format, strerror(errno)); return filename; From 0e0027bd37adbf87d70473bc45442f009003b908 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 20 Sep 2022 14:15:04 +0300 Subject: [PATCH 17/17] [PBCKP-245] fix EACCES usage I can't find where EACCESS is used in postgres. Nor I can find where it is defined in Windows. It is quite strange it worked before, I can't explain it. --- src/utils/file.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/utils/file.c b/src/utils/file.c index fc97ab810..9f8301c56 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -156,7 +156,7 @@ remove_file_or_dir(const char* path) { int rc = remove(path); - if (rc < 0 && errno == EACCESS) + if (rc < 0 && errno == EACCES) rc = rmdir(path); return rc; }