diff --git a/tests/unit/test_backups.py b/tests/unit/test_backups.py index 42b9d67718..fbca3c691b 100644 --- a/tests/unit/test_backups.py +++ b/tests/unit/test_backups.py @@ -1101,7 +1101,15 @@ def test_on_s3_credential_changed(harness): _can_initialise_stanza.assert_called_once() _is_primary.assert_not_called() - # Test that followers will not initialise the bucket + # Test that followers will not initialise the bucket (and that only the leader will + # remove the "require-change-bucket-after-restore" flag from the application databag). + with harness.hooks_disabled(): + harness.set_leader() + harness.update_relation_data( + peer_rel_id, + harness.charm.app.name, + {"require-change-bucket-after-restore": "True"}, + ) harness.charm.unit.status = ActiveStatus() _render_pgbackrest_conf_file.reset_mock() _can_initialise_stanza.return_value = True @@ -1116,6 +1124,10 @@ def test_on_s3_credential_changed(harness): relation=harness.model.get_relation(S3_PARAMETERS_RELATION, s3_rel_id) ) _render_pgbackrest_conf_file.assert_called_once() + tc.assertNotIn( + "require-change-bucket-after-restore", + harness.get_relation_data(peer_rel_id, harness.charm.app), + ) _is_primary.assert_called_once() _create_bucket_if_not_exists.assert_not_called() tc.assertIsInstance(harness.charm.unit.status, ActiveStatus) @@ -1123,7 +1135,9 @@ def test_on_s3_credential_changed(harness): _initialise_stanza.assert_not_called() # Test when the charm render the pgBackRest configuration file, but fails to - # access or create the S3 bucket. + # access or create the S3 bucket (and assert that a non-leader unit won't + # remove the "require-change-bucket-after-restore" flag from the application + # databag). _is_primary.return_value = True for error in [ ClientError( @@ -1132,6 +1146,13 @@ def test_on_s3_credential_changed(harness): ), ValueError, ]: + with harness.hooks_disabled(): + harness.set_leader(False) + harness.update_relation_data( + peer_rel_id, + harness.charm.app.name, + {"require-change-bucket-after-restore": "True"}, + ) _render_pgbackrest_conf_file.reset_mock() _create_bucket_if_not_exists.reset_mock() _create_bucket_if_not_exists.side_effect = error @@ -1139,6 +1160,12 @@ def test_on_s3_credential_changed(harness): relation=harness.model.get_relation(S3_PARAMETERS_RELATION, s3_rel_id) ) _render_pgbackrest_conf_file.assert_called_once() + tc.assertEqual( + harness.get_relation_data(peer_rel_id, harness.charm.app)[ + "require-change-bucket-after-restore" + ], + "True", + ) _create_bucket_if_not_exists.assert_called_once() tc.assertIsInstance(harness.charm.unit.status, BlockedStatus) tc.assertEqual( diff --git a/tests/unit/test_charm.py b/tests/unit/test_charm.py index ee2a254fed..e4a84777fd 100644 --- a/tests/unit/test_charm.py +++ b/tests/unit/test_charm.py @@ -2362,7 +2362,8 @@ def test_update_new_unit_status(harness): assert isinstance(harness.charm.unit.status, WaitingStatus) -def test_set_primary_status_message(harness): +@pytest.mark.parametrize("is_leader", [True, False]) +def test_set_primary_status_message(harness, is_leader): with ( patch("charm.Patroni.member_started", new_callable=PropertyMock) as _member_started, patch( @@ -2370,6 +2371,35 @@ def test_set_primary_status_message(harness): ) as _is_standby_leader, patch("charm.Patroni.get_primary") as _get_primary, ): + # Test scenario when it's needed to move to another S3 bucket after a restore. + databag_containing_restore_data = { + "require-change-bucket-after-restore": "True", + "restoring-backup": "2024-01-01T09:00:00Z", + "restore-stanza": "fake-stanza", + "restore-to-time": "", + } + with harness.hooks_disabled(): + harness.update_relation_data( + harness.model.get_relation(PEER).id, + harness.charm.app.name, + databag_containing_restore_data, + ) + harness.set_leader(is_leader) + harness.charm._set_primary_status_message() + harness.get_relation_data(harness.model.get_relation(PEER).id, harness.charm.app.name) == ( + {"require-change-bucket-after-restore": "True"} + if is_leader + else databag_containing_restore_data + ) + tc.assertIsInstance(harness.charm.unit.status, BlockedStatus) + + # Test other scenarios. + with harness.hooks_disabled(): + harness.update_relation_data( + harness.model.get_relation(PEER).id, + harness.charm.app.name, + {"require-change-bucket-after-restore": ""}, + ) for values in itertools.product( [ RetryError(last_attempt=1), diff --git a/tests/unit/test_cluster.py b/tests/unit/test_cluster.py index dbe2cb3901..8662b7b2af 100644 --- a/tests/unit/test_cluster.py +++ b/tests/unit/test_cluster.py @@ -10,7 +10,7 @@ from charms.operator_libs_linux.v2 import snap from jinja2 import Template from ops.testing import Harness -from tenacity import stop_after_delay +from tenacity import RetryError, stop_after_delay, wait_fixed from charm import PostgresqlOperatorCharm from cluster import Patroni @@ -429,7 +429,9 @@ def test_switchover(peers_ips, patroni): def test_update_synchronous_node_count(peers_ips, patroni): - with patch("requests.patch") as _patch: + with patch("cluster.stop_after_delay", return_value=stop_after_delay(0)) as _wait_fixed, patch( + "cluster.wait_fixed", return_value=wait_fixed(0) + ) as _wait_fixed, patch("requests.patch") as _patch: response = _patch.return_value response.status_code = 200 @@ -439,6 +441,11 @@ def test_update_synchronous_node_count(peers_ips, patroni): "http://1.1.1.1:8008/config", json={"synchronous_node_count": 0}, verify=True ) + # Test when the request fails. + response.status_code = 500 + with tc.assertRaises(RetryError): + patroni.update_synchronous_node_count() + def test_configure_patroni_on_unit(peers_ips, patroni): with ( @@ -542,3 +549,69 @@ def test_member_inactive_error(peers_ips, patroni): assert patroni.member_inactive _get.assert_called_once_with("http://1.1.1.1:8008/health", verify=True, timeout=5) + + +def test_patroni_logs(patroni): + with patch("charm.snap.SnapCache") as _snap_cache: + # Test when the logs are returned successfully. + logs = _snap_cache.return_value.__getitem__.return_value.logs + logs.return_value = "fake-logs" + assert patroni.patroni_logs() == "fake-logs" + + # Test the charm fails to get the logs. + logs.side_effect = snap.SnapError + assert patroni.patroni_logs() == "" + + +def test_last_postgresql_logs(patroni): + with patch("glob.glob") as _glob, patch( + "builtins.open", mock_open(read_data="fake-logs") + ) as _open: + # Test when there are no files to read. + assert patroni.last_postgresql_logs() == "" + _open.assert_not_called() + + # Test when there are multiple files in the logs directory. + _glob.return_value = [ + "/var/snap/charmed-postgresql/common/var/log/postgresql/postgresql.log.1", + "/var/snap/charmed-postgresql/common/var/log/postgresql/postgresql.log.2", + "/var/snap/charmed-postgresql/common/var/log/postgresql/postgresql.log.3", + ] + assert patroni.last_postgresql_logs() == "fake-logs" + _open.assert_called_once_with( + "/var/snap/charmed-postgresql/common/var/log/postgresql/postgresql.log.3", "r" + ) + + # Test when the charm fails to read the logs. + _open.reset_mock() + _open.side_effect = OSError + assert patroni.last_postgresql_logs() == "" + _open.assert_called_with( + "/var/snap/charmed-postgresql/common/var/log/postgresql/postgresql.log.3", "r" + ) + + +def test_get_patroni_restart_condition(patroni): + mock = mock_open() + with patch("builtins.open", mock) as _open: + # Test when there is a restart condition set. + _open.return_value.__enter__.return_value.read.return_value = "Restart=always" + assert patroni.get_patroni_restart_condition() == "always" + + # Test when there is no restart condition set. + _open.return_value.__enter__.return_value.read.return_value = "" + with tc.assertRaises(RuntimeError): + patroni.get_patroni_restart_condition() + + +@pytest.mark.parametrize("new_restart_condition", ["on-success", "on-failure"]) +def test_update_patroni_restart_condition(patroni, new_restart_condition): + with patch("builtins.open", mock_open(read_data="Restart=always")) as _open, patch( + "subprocess.run" + ) as _run: + _open.return_value.__enter__.return_value.read.return_value = "Restart=always" + patroni.update_patroni_restart_condition(new_restart_condition) + _open.return_value.__enter__.return_value.write.assert_called_once_with( + f"Restart={new_restart_condition}" + ) + _run.assert_called_once_with(["/bin/systemctl", "daemon-reload"])