diff --git a/src/backup.c b/src/backup.c index 9a451e72a..03ff7b72b 100644 --- a/src/backup.c +++ b/src/backup.c @@ -692,6 +692,8 @@ pgdata_basic_setup(ConnectionOptions conn_opt, PGNodeInfo *nodeInfo) /* * Entry point of pg_probackup BACKUP subcommand. + * + * if start_time == INVALID_BACKUP_ID then we can generate backup_id */ int do_backup(InstanceState *instanceState, pgSetBackupParams *set_backup_params, @@ -699,8 +701,13 @@ do_backup(InstanceState *instanceState, pgSetBackupParams *set_backup_params, { PGconn *backup_conn = NULL; PGNodeInfo nodeInfo; + time_t latest_backup_id = INVALID_BACKUP_ID; char pretty_bytes[20]; + if (!instance_config.pgdata) + elog(ERROR, "required parameter not specified: PGDATA " + "(-D, --pgdata)"); + /* Initialize PGInfonode */ pgNodeInit(&nodeInfo); @@ -709,12 +716,55 @@ do_backup(InstanceState *instanceState, pgSetBackupParams *set_backup_params, (pg_strcasecmp(instance_config.external_dir_str, "none") != 0)) current.external_dir_str = instance_config.external_dir_str; - /* Create backup directory and BACKUP_CONTROL_FILE */ - pgBackupCreateDir(¤t, instanceState, start_time); + /* Find latest backup_id */ + { + parray *backup_list = catalog_get_backup_list(instanceState, INVALID_BACKUP_ID); - if (!instance_config.pgdata) - elog(ERROR, "required parameter not specified: PGDATA " - "(-D, --pgdata)"); + if (parray_num(backup_list) > 0) + latest_backup_id = ((pgBackup *)parray_get(backup_list, 0))->backup_id; + + parray_walk(backup_list, pgBackupFree); + parray_free(backup_list); + } + + /* Try to pick backup_id and create backup directory with BACKUP_CONTROL_FILE */ + if (start_time != INVALID_BACKUP_ID) + { + /* If user already choosed backup_id for us, then try to use it. */ + if (start_time <= latest_backup_id) + /* don't care about freeing base36enc_dup memory, we exit anyway */ + elog(ERROR, "Can't assign backup_id from requested start_time (%s), " + "this time must be later that backup %s", + base36enc_dup(start_time), base36enc_dup(latest_backup_id)); + + current.backup_id = start_time; + pgBackupInitDir(¤t, instanceState->instance_backup_subdir_path); + } + else + { + /* We can generate our own unique backup_id + * Sometimes (when we try to backup twice in one second) + * backup_id will be duplicated -> try more times. + */ + int attempts = 10; + + if (time(NULL) < latest_backup_id) + elog(ERROR, "Can't assign backup_id, there is already a backup in future (%s)", + base36enc(latest_backup_id)); + + do + { + current.backup_id = time(NULL); + pgBackupInitDir(¤t, instanceState->instance_backup_subdir_path); + if (current.backup_id == INVALID_BACKUP_ID) + sleep(1); + } + while (current.backup_id == INVALID_BACKUP_ID && attempts-- > 0); + } + + /* If creation of backup dir was unsuccessful, there will be WARNINGS in logs already */ + if (current.backup_id == INVALID_BACKUP_ID) + elog(ERROR, "Can't create backup directory"); /* Update backup status and other metainfo. */ current.status = BACKUP_STATUS_RUNNING; diff --git a/src/catalog.c b/src/catalog.c index c118e954a..47513096c 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -23,7 +23,7 @@ static pgBackup* get_closest_backup(timelineInfo *tlinfo); static pgBackup* get_oldest_backup(timelineInfo *tlinfo); static const char *backupModes[] = {"", "PAGE", "PTRACK", "DELTA", "FULL"}; static pgBackup *readBackupControlFile(const char *path); -static void create_backup_dir(pgBackup *backup, const char *backup_instance_path); +static int create_backup_dir(pgBackup *backup, const char *backup_instance_path); static bool backup_lock_exit_hook_registered = false; static parray *locks = NULL; @@ -969,6 +969,7 @@ catalog_get_backup_list(InstanceState *instanceState, time_t requested_backup_id } else if (strcmp(base36enc(backup->start_time), data_ent->d_name) != 0) { + /* TODO there is no such guarantees */ elog(WARNING, "backup ID in control file \"%s\" doesn't match name of the backup folder \"%s\"", base36enc(backup->start_time), backup_conf_path); } @@ -1411,22 +1412,34 @@ get_multi_timeline_parent(parray *backup_list, parray *tli_list, return NULL; } -/* Create backup directory in $BACKUP_PATH - * Note, that backup_id attribute is updated, - * so it is possible to get diffrent values in +/* + * Create backup directory in $BACKUP_PATH + * (with proposed backup->backup_id) + * and initialize this directory. + * If creation of directory fails, then + * backup_id will be cleared (set to INVALID_BACKUP_ID). + * It is possible to get diffrent values in * pgBackup.start_time and pgBackup.backup_id. * It may be ok or maybe not, so it's up to the caller * to fix it or let it be. */ void -pgBackupCreateDir(pgBackup *backup, InstanceState *instanceState, time_t start_time) +pgBackupInitDir(pgBackup *backup, const char *backup_instance_path) { - int i; - parray *subdirs = parray_new(); - parray * backups; - pgBackup *target_backup; + int i; + char temp[MAXPGPATH]; + parray *subdirs; + /* Try to create backup directory at first */ + if (create_backup_dir(backup, backup_instance_path) != 0) + { + /* Clear backup_id as indication of error */ + backup->backup_id = INVALID_BACKUP_ID; + return; + } + + subdirs = parray_new(); parray_append(subdirs, pg_strdup(DATABASE_DIR)); /* Add external dirs containers */ @@ -1438,7 +1451,6 @@ pgBackupCreateDir(pgBackup *backup, InstanceState *instanceState, time_t start_t false); for (i = 0; i < parray_num(external_list); i++) { - char temp[MAXPGPATH]; /* Numeration of externaldirs starts with 1 */ makeExternalDirPathByNum(temp, EXTERNAL_DIR, i+1); parray_append(subdirs, pg_strdup(temp)); @@ -1446,30 +1458,6 @@ pgBackupCreateDir(pgBackup *backup, InstanceState *instanceState, time_t start_t free_dir_list(external_list); } - /* Get list of all backups*/ - backups = catalog_get_backup_list(instanceState, INVALID_BACKUP_ID); - if (parray_num(backups) > 0) - { - target_backup = (pgBackup *) parray_get(backups, 0); - if (start_time > target_backup->backup_id) - { - backup->backup_id = start_time; - create_backup_dir(backup, instanceState->instance_backup_subdir_path); - } - else - { - elog(ERROR, "Cannot create directory for older backup"); - } - } - else - { - backup->backup_id = start_time; - create_backup_dir(backup, instanceState->instance_backup_subdir_path); - } - - if (backup->backup_id == 0) - elog(ERROR, "Cannot create backup directory: %s", strerror(errno)); - backup->database_dir = pgut_malloc(MAXPGPATH); join_path_components(backup->database_dir, backup->root_dir, DATABASE_DIR); @@ -1479,10 +1467,8 @@ pgBackupCreateDir(pgBackup *backup, InstanceState *instanceState, time_t start_t /* create directories for actual backup files */ for (i = 0; i < parray_num(subdirs); i++) { - char path[MAXPGPATH]; - - join_path_components(path, backup->root_dir, parray_get(subdirs, i)); - fio_mkdir(path, DIR_PERMISSION, FIO_BACKUP_HOST); + join_path_components(temp, backup->root_dir, parray_get(subdirs, i)); + fio_mkdir(temp, DIR_PERMISSION, FIO_BACKUP_HOST); } free_dir_list(subdirs); @@ -1491,34 +1477,26 @@ pgBackupCreateDir(pgBackup *backup, InstanceState *instanceState, time_t start_t /* * Create root directory for backup, * update pgBackup.root_dir if directory creation was a success + * Return values (same as dir_create_dir()): + * 0 - ok + * -1 - error (warning message already emitted) */ -void +int create_backup_dir(pgBackup *backup, const char *backup_instance_path) { - int attempts = 10; + int rc; + char path[MAXPGPATH]; - while (attempts--) - { - int rc; - char path[MAXPGPATH]; - - join_path_components(path, backup_instance_path, base36enc(backup->backup_id)); + join_path_components(path, backup_instance_path, base36enc(backup->backup_id)); - /* TODO: add wrapper for remote mode */ - rc = dir_create_dir(path, DIR_PERMISSION, true); - - if (rc == 0) - { - backup->root_dir = pgut_strdup(path); - return; - } - else - { - elog(WARNING, "Cannot create directory \"%s\": %s", path, strerror(errno)); - sleep(1); - } - } + /* TODO: add wrapper for remote mode */ + rc = dir_create_dir(path, DIR_PERMISSION, true); + if (rc == 0) + backup->root_dir = pgut_strdup(path); + else + elog(WARNING, "Cannot create directory \"%s\": %s", path, strerror(errno)); + return rc; } /* diff --git a/src/pg_probackup.c b/src/pg_probackup.c index 5867bd490..1f6b6313e 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -78,7 +78,7 @@ pid_t my_pid = 0; __thread int my_thread_num = 1; bool progress = false; bool no_sync = false; -time_t start_time = 0; +time_t start_time = INVALID_BACKUP_ID; #if PG_VERSION_NUM >= 100000 char *replication_slot = NULL; bool temp_slot = false; @@ -202,7 +202,6 @@ static ConfigOption cmd_options[] = { 's', 'i', "backup-id", &backup_id_string, SOURCE_CMD_STRICT }, { 'b', 133, "no-sync", &no_sync, SOURCE_CMD_STRICT }, { 'b', 134, "no-color", &no_color, SOURCE_CMD_STRICT }, - { 'U', 241, "start-time", &start_time, SOURCE_CMD_STRICT }, /* backup options */ { 'b', 180, "backup-pg-log", &backup_logs, SOURCE_CMD_STRICT }, { 'f', 'b', "backup-mode", opt_backup_mode, SOURCE_CMD_STRICT }, @@ -217,6 +216,7 @@ static ConfigOption cmd_options[] = { 'b', 184, "merge-expired", &merge_expired, SOURCE_CMD_STRICT }, { 'b', 185, "dry-run", &dry_run, SOURCE_CMD_STRICT }, { 's', 238, "note", &backup_note, SOURCE_CMD_STRICT }, + { 'U', 241, "start-time", &start_time, SOURCE_CMD_STRICT }, /* catchup options */ { 's', 239, "source-pgdata", &catchup_source_pgdata, SOURCE_CMD_STRICT }, { 's', 240, "destination-pgdata", &catchup_destination_pgdata, SOURCE_CMD_STRICT }, @@ -975,9 +975,7 @@ main(int argc, char *argv[]) case BACKUP_CMD: { current.stream = stream_wal; - if (start_time == 0) - start_time = current_time; - else + if (start_time != INVALID_BACKUP_ID) elog(WARNING, "Please do not use the --start-time option to start backup. " "This is a service option required to work with other extensions. " "We do not guarantee future support for this flag."); diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 802cbb5c0..1885a191e 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -450,7 +450,10 @@ struct pgBackup { BackupMode backup_mode; /* Mode - one of BACKUP_MODE_xxx above*/ time_t backup_id; /* Identifier of the backup. - * Currently it's the same as start_time */ + * By default it's the same as start_time + * but can be increased if same backup_id + * already exists. It can be also set by + * start_time parameter */ BackupStatus status; /* Status - one of BACKUP_STATUS_xxx above*/ TimeLineID tli; /* timeline of start and stop backup lsns */ XLogRecPtr start_lsn; /* backup's starting transaction log location */ @@ -985,7 +988,7 @@ extern void write_backup_filelist(pgBackup *backup, parray *files, const char *root, parray *external_list, bool sync); -extern void pgBackupCreateDir(pgBackup *backup, InstanceState *instanceState, time_t start_time); +extern void pgBackupInitDir(pgBackup *backup, const char *backup_instance_path); extern void pgNodeInit(PGNodeInfo *node); extern void pgBackupInit(pgBackup *backup); extern void pgBackupFree(void *backup); diff --git a/tests/backup.py b/tests/backup.py index c5235120e..0cba8fe79 100644 --- a/tests/backup.py +++ b/tests/backup.py @@ -1,7 +1,7 @@ import unittest import os from time import sleep, time -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException +from .helpers.ptrack_helpers import base36enc, ProbackupTest, ProbackupException import shutil from distutils.dir_util import copy_tree from testgres import ProcessType, QueryException @@ -313,7 +313,7 @@ def test_backup_detect_corruption(self): self.set_archiving(backup_dir, 'node', node) node.slow_start() - if self.ptrack and node.major_version > 11: + if self.ptrack: node.safe_psql( "postgres", "create extension ptrack") @@ -459,7 +459,7 @@ def test_backup_detect_invalid_block_header(self): self.set_archiving(backup_dir, 'node', node) node.slow_start() - if self.ptrack and node.major_version > 11: + if self.ptrack: node.safe_psql( "postgres", "create extension ptrack") @@ -600,7 +600,7 @@ def test_backup_detect_missing_permissions(self): self.set_archiving(backup_dir, 'node', node) node.slow_start() - if self.ptrack and node.major_version > 11: + if self.ptrack: node.safe_psql( "postgres", "create extension ptrack") @@ -3402,10 +3402,11 @@ def test_pg_stop_backup_missing_permissions(self): # @unittest.skip("skip") def test_start_time(self): - + """Test, that option --start-time allows to set backup_id and restore""" fname = self.id().split('.')[3] node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), + set_replication=True, ptrack_enable=self.ptrack, initdb_params=['--data-checksums']) @@ -3418,138 +3419,81 @@ def test_start_time(self): # FULL backup startTime = int(time()) self.backup_node( - backup_dir, 'node', node, backup_type="full", - options=['--stream', '--start-time', str(startTime)]) - - # DELTA backup - startTime = int(time()) - self.backup_node( - backup_dir, 'node', node, backup_type="delta", - options=['--stream', '--start-time', str(startTime)]) - - # PAGE backup - startTime = int(time()) - self.backup_node( - backup_dir, 'node', node, backup_type="page", - options=['--stream', '--start-time', str(startTime)]) - - if self.ptrack and node.major_version > 11: - node.safe_psql( - "postgres", - "create extension ptrack") - - # PTRACK backup - startTime = int(time()) - self.backup_node( - backup_dir, 'node', node, backup_type="ptrack", - options=['--stream', '--start-time', str(startTime)]) - - # Clean after yourself - self.del_test_dir(module_name, fname) - - # @unittest.skip("skip") - def test_start_time_incorrect_time(self): - - fname = self.id().split('.')[3] - node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), - ptrack_enable=self.ptrack, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() + backup_dir, 'node', node, backup_type='full', + options=['--stream', '--start-time={0}'.format(str(startTime))]) + # restore FULL backup by backup_id calculated from start-time + self.restore_node( + backup_dir, 'node', + data_dir=os.path.join(self.tmp_path, module_name, fname, 'node_restored_full'), + backup_id=base36enc(startTime)) - startTime = int(time()) - #backup with correct start time - self.backup_node( - backup_dir, 'node', node, - options=['--stream', '--start-time', str(startTime)]) - #backups with incorrect start time + #FULL backup with incorrect start time try: + startTime = str(int(time()-100000)) self.backup_node( - backup_dir, 'node', node, backup_type="full", - options=['--stream', '--start-time', str(startTime-10000)]) + backup_dir, 'node', node, backup_type='full', + options=['--stream', '--start-time={0}'.format(startTime)]) # we should die here because exception is what we expect to happen self.assertEqual( 1, 0, - "Expecting Error because start time for new backup must be newer " - "\n Output: {0} \n CMD: {1}".format( + 'Expecting Error because start time for new backup must be newer ' + '\n Output: {0} \n CMD: {1}'.format( repr(self.output), self.cmd)) except ProbackupException as e: self.assertRegex( e.message, - "ERROR: Cannot create directory for older backup", + r"ERROR: Can't assign backup_id from requested start_time \(\w*\), this time must be later that backup \w*\n", "\n Unexpected Error Message: {0}\n CMD: {1}".format( repr(e.message), self.cmd)) - try: - self.backup_node( - backup_dir, 'node', node, backup_type="delta", - options=['--stream', '--start-time', str(startTime-10000)]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because start time for new backup must be newer " - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertRegex( - e.message, - "ERROR: Cannot create directory for older backup", - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) + # DELTA backup + startTime = int(time()) + self.backup_node( + backup_dir, 'node', node, backup_type='delta', + options=['--stream', '--start-time={0}'.format(str(startTime))]) + # restore DELTA backup by backup_id calculated from start-time + self.restore_node( + backup_dir, 'node', + data_dir=os.path.join(self.tmp_path, module_name, fname, 'node_restored_delta'), + backup_id=base36enc(startTime)) - try: - self.backup_node( - backup_dir, 'node', node, backup_type="page", - options=['--stream', '--start-time', str(startTime-10000)]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because start time for new backup must be newer " - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertRegex( - e.message, - "ERROR: Cannot create directory for older backup", - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) + # PAGE backup + startTime = int(time()) + self.backup_node( + backup_dir, 'node', node, backup_type='page', + options=['--stream', '--start-time={0}'.format(str(startTime))]) + # restore PAGE backup by backup_id calculated from start-time + self.restore_node( + backup_dir, 'node', + data_dir=os.path.join(self.tmp_path, module_name, fname, 'node_restored_page'), + backup_id=base36enc(startTime)) - if self.ptrack and node.major_version > 11: + # PTRACK backup + if self.ptrack: node.safe_psql( - "postgres", - "create extension ptrack") + 'postgres', + 'create extension ptrack') - try: - self.backup_node( - backup_dir, 'node', node, backup_type="page", - options=['--stream', '--start-time', str(startTime-10000)]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because start time for new backup must be newer " - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertRegex( - e.message, - "ERROR: Cannot create directory for older backup", - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) + startTime = int(time()) + self.backup_node( + backup_dir, 'node', node, backup_type='ptrack', + options=['--stream', '--start-time={0}'.format(str(startTime))]) + # restore PTRACK backup by backup_id calculated from start-time + self.restore_node( + backup_dir, 'node', + data_dir=os.path.join(self.tmp_path, module_name, fname, 'node_restored_ptrack'), + backup_id=base36enc(startTime)) # Clean after yourself self.del_test_dir(module_name, fname) # @unittest.skip("skip") def test_start_time_few_nodes(self): - + """Test, that we can synchronize backup_id's for different DBs""" fname = self.id().split('.')[3] node1 = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node1'), + set_replication=True, ptrack_enable=self.ptrack, initdb_params=['--data-checksums']) @@ -3561,6 +3505,7 @@ def test_start_time_few_nodes(self): node2 = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node2'), + set_replication=True, ptrack_enable=self.ptrack, initdb_params=['--data-checksums']) @@ -3571,200 +3516,61 @@ def test_start_time_few_nodes(self): node2.slow_start() # FULL backup - startTime = int(time()) + startTime = str(int(time())) self.backup_node( - backup_dir1, 'node1', node1, backup_type="full", - options=['--stream', '--start-time', str(startTime)]) + backup_dir1, 'node1', node1, backup_type='full', + options=['--stream', '--start-time={0}'.format(startTime)]) self.backup_node( - backup_dir2, 'node2', node2, backup_type="full", - options=['--stream', '--start-time', str(startTime)]) - + backup_dir2, 'node2', node2, backup_type='full', + options=['--stream', '--start-time={0}'.format(startTime)]) show_backup1 = self.show_pb(backup_dir1, 'node1')[0] show_backup2 = self.show_pb(backup_dir2, 'node2')[0] self.assertEqual(show_backup1['id'], show_backup2['id']) # DELTA backup - startTime = int(time()) + startTime = str(int(time())) self.backup_node( - backup_dir1, 'node1', node1, backup_type="delta", - options=['--stream', '--start-time', str(startTime)]) + backup_dir1, 'node1', node1, backup_type='delta', + options=['--stream', '--start-time={0}'.format(startTime)]) self.backup_node( - backup_dir2, 'node2', node2, backup_type="delta", - options=['--stream', '--start-time', str(startTime)]) + backup_dir2, 'node2', node2, backup_type='delta', + options=['--stream', '--start-time={0}'.format(startTime)]) show_backup1 = self.show_pb(backup_dir1, 'node1')[1] show_backup2 = self.show_pb(backup_dir2, 'node2')[1] self.assertEqual(show_backup1['id'], show_backup2['id']) # PAGE backup - startTime = int(time()) + startTime = str(int(time())) self.backup_node( - backup_dir1, 'node1', node1, backup_type="page", - options=['--stream', '--start-time', str(startTime)]) + backup_dir1, 'node1', node1, backup_type='page', + options=['--stream', '--start-time={0}'.format(startTime)]) self.backup_node( - backup_dir2, 'node2', node2, backup_type="page", - options=['--stream', '--start-time', str(startTime)]) + backup_dir2, 'node2', node2, backup_type='page', + options=['--stream', '--start-time={0}'.format(startTime)]) show_backup1 = self.show_pb(backup_dir1, 'node1')[2] show_backup2 = self.show_pb(backup_dir2, 'node2')[2] self.assertEqual(show_backup1['id'], show_backup2['id']) # PTRACK backup - startTime = int(time()) - if self.ptrack and node1.major_version > 11: + if self.ptrack: node1.safe_psql( - "postgres", - "create extension ptrack") - self.backup_node( - backup_dir1, 'node1', node1, backup_type="ptrack", - options=['--stream', '--start-time', str(startTime)]) - - if self.ptrack and node2.major_version > 11: + 'postgres', + 'create extension ptrack') node2.safe_psql( - "postgres", - "create extension ptrack") - self.backup_node( - backup_dir2, 'node2', node2, backup_type="ptrack", - options=['--stream', '--start-time', str(startTime)]) - show_backup1 = self.show_pb(backup_dir1, 'node1')[3] - show_backup2 = self.show_pb(backup_dir2, 'node2')[3] - self.assertEqual(show_backup1['id'], show_backup2['id']) - - # Clean after yourself - self.del_test_dir(module_name, fname) - - # @unittest.skip("skip") - def test_start_time_few_nodes_incorrect_time(self): - - fname = self.id().split('.')[3] - node1 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node1'), - ptrack_enable=self.ptrack, - initdb_params=['--data-checksums']) - - backup_dir1 = os.path.join(self.tmp_path, module_name, fname, 'backup1') - self.init_pb(backup_dir1) - self.add_instance(backup_dir1, 'node1', node1) - self.set_archiving(backup_dir1, 'node1', node1) - node1.slow_start() - - node2 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node2'), - ptrack_enable=self.ptrack, - initdb_params=['--data-checksums']) - - backup_dir2 = os.path.join(self.tmp_path, module_name, fname, 'backup2') - self.init_pb(backup_dir2) - self.add_instance(backup_dir2, 'node2', node2) - self.set_archiving(backup_dir2, 'node2', node2) - node2.slow_start() - - # FULL backup - startTime = int(time()) - self.backup_node( - backup_dir1, 'node1', node1, backup_type="full", - options=['--stream', '--start-time', str(startTime)]) - self.backup_node( - backup_dir2, 'node2', node2, backup_type="full", - options=['--stream', '--start-time', str(startTime-10000)]) - - show_backup1 = self.show_pb(backup_dir1, 'node1')[0] - show_backup2 = self.show_pb(backup_dir2, 'node2')[0] - self.assertGreater(show_backup1['id'], show_backup2['id']) - - # DELTA backup - startTime = int(time()) - self.backup_node( - backup_dir1, 'node1', node1, backup_type="delta", - options=['--stream', '--start-time', str(startTime)]) - # make backup with start time definitelly earlier, than existing - try: - self.backup_node( - backup_dir2, 'node2', node2, backup_type="delta", - options=['--stream', '--start-time', str(10000)]) - self.assertEqual( - 1, 0, - "Expecting Error because start time for new backup must be newer " - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertRegex( - e.message, - "ERROR: Cannot create directory for older backup", - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - show_backup1 = self.show_pb(backup_dir1, 'node1')[1] - show_backup2 = self.show_pb(backup_dir2, 'node2')[0] - self.assertGreater(show_backup1['id'], show_backup2['id']) + 'postgres', + 'create extension ptrack') - # PAGE backup - startTime = int(time()) - self.backup_node( - backup_dir1, 'node1', node1, backup_type="page", - options=['--stream', '--start-time', str(startTime)]) - # make backup with start time definitelly earlier, than existing - try: + startTime = str(int(time())) self.backup_node( - backup_dir2, 'node2', node2, backup_type="page", - options=['--stream', '--start-time', str(10000)]) - self.assertEqual( - 1, 0, - "Expecting Error because start time for new backup must be newer " - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertRegex( - e.message, - "ERROR: Cannot create directory for older backup", - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - show_backup1 = self.show_pb(backup_dir1, 'node1')[2] - show_backup2 = self.show_pb(backup_dir2, 'node2')[0] - self.assertGreater(show_backup1['id'], show_backup2['id']) - - # PTRACK backup - startTime = int(time()) - if self.ptrack and node1.major_version > 11: - node1.safe_psql( - "postgres", - "create extension ptrack") + backup_dir1, 'node1', node1, backup_type='ptrack', + options=['--stream', '--start-time={0}'.format(startTime)]) self.backup_node( - backup_dir1, 'node1', node1, backup_type="ptrack", - options=['--stream', '--start-time', str(startTime)]) - - if self.ptrack and node2.major_version > 11: - node2.safe_psql( - "postgres", - "create extension ptrack") - # make backup with start time definitelly earlier, than existing - try: - self.backup_node( - backup_dir2, 'node2', node2, backup_type="ptrack", - options=['--stream', '--start-time', str(10000)]) - self.assertEqual( - 1, 0, - "Expecting Error because start time for new backup must be newer " - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertRegex( - e.message, - "ERROR: Cannot create directory for older backup", - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - # FULL backup - startTime = int(time()) - self.backup_node( - backup_dir1, 'node1', node1, backup_type="full", - options=['--stream', '--start-time', str(startTime)]) - self.backup_node( - backup_dir2, 'node2', node2, backup_type="full", - options=['--stream', '--start-time', str(startTime)]) - - show_backup1 = self.show_pb(backup_dir1, 'node1')[4] - show_backup2 = self.show_pb(backup_dir2, 'node2')[1] - self.assertEqual(show_backup1['id'], show_backup2['id']) + backup_dir2, 'node2', node2, backup_type='ptrack', + options=['--stream', '--start-time={0}'.format(startTime)]) + show_backup1 = self.show_pb(backup_dir1, 'node1')[3] + show_backup2 = self.show_pb(backup_dir2, 'node2')[3] + self.assertEqual(show_backup1['id'], show_backup2['id']) # Clean after yourself self.del_test_dir(module_name, fname) + diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index 418ef4e17..59eb12aec 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -110,6 +110,26 @@ def is_nls_enabled(): return b'enable-nls' in p.communicate()[0] +def base36enc(number): + """Converts an integer to a base36 string.""" + alphabet = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ' + base36 = '' + sign = '' + + if number < 0: + sign = '-' + number = -number + + if 0 <= number < len(alphabet): + return sign + alphabet[number] + + while number != 0: + number, i = divmod(number, len(alphabet)) + base36 = alphabet[i] + base36 + + return sign + base36 + + class ProbackupException(Exception): def __init__(self, message, cmd): self.message = message @@ -947,7 +967,7 @@ def backup_node( backup_type='full', datname=False, options=[], asynchronous=False, gdb=False, old_binary=False, return_id=True, no_remote=False, - env=None, startTime=None + env=None ): if not node and not data_dir: print('You must provide ether node or data_dir for backup') @@ -980,9 +1000,6 @@ def backup_node( if not old_binary: cmd_list += ['--no-sync'] - if startTime: - cmd_list += ['--start-time', startTime] - return self.run_pb(cmd_list + options, asynchronous, gdb, old_binary, return_id, env=env) def checkdb_node( diff --git a/tests/merge.py b/tests/merge.py index 5f092543c..4c374bdfb 100644 --- a/tests/merge.py +++ b/tests/merge.py @@ -1965,7 +1965,11 @@ def test_failed_merge_after_delete_3(self): self.del_test_dir(module_name, fname) - # @unittest.skip("skip") + # Skipped, because backups from the future are invalid. + # This cause a "ERROR: Can't assign backup_id, there is already a backup in future" + # now (PBCKP-259). We can conduct such a test again when we + # untie 'backup_id' from 'start_time' + @unittest.skip("skip") def test_merge_backup_from_future(self): """ take FULL backup, table PAGE backup from future, diff --git a/tests/restore.py b/tests/restore.py index b619078d5..ae1c7cbe0 100644 --- a/tests/restore.py +++ b/tests/restore.py @@ -1853,7 +1853,11 @@ def test_restore_chain_with_corrupted_backup(self): # Clean after yourself self.del_test_dir(module_name, fname) - # @unittest.skip("skip") + # Skipped, because backups from the future are invalid. + # This cause a "ERROR: Can't assign backup_id, there is already a backup in future" + # now (PBCKP-259). We can conduct such a test again when we + # untie 'backup_id' from 'start_time' + @unittest.skip("skip") def test_restore_backup_from_future(self): """more complex test_restore_chain()""" fname = self.id().split('.')[3]