diff --git a/.gitignore b/.gitignore index 5dbf7e8095..d16c81a975 100644 --- a/.gitignore +++ b/.gitignore @@ -12,3 +12,4 @@ build/* .coverage .tox/* tests/htmlcov/* +.DS_Store \ No newline at end of file diff --git a/tests/test_indefinite_freeze_attack.py b/tests/test_indefinite_freeze_attack.py index 1522568633..8644df0068 100755 --- a/tests/test_indefinite_freeze_attack.py +++ b/tests/test_indefinite_freeze_attack.py @@ -15,6 +15,15 @@ than verifying text output), use pre-generated repository files, and discontinue use of the old repository tools. -vladimir.v.diaz + March 9, 2016. + Additional test added relating to issue: + https://github.com/theupdateframework/tuf/issues/322 + If a metadata file is not updated (no indication of a new version + available), the expiration of the pre-existing, locally trusted metadata + must still be detected. This additional test complains if such does not + occur, and accompanies code in tuf.client.updater:refresh() to detect it. + -sebastien.awwad + See LICENSE for licensing information. @@ -171,21 +180,35 @@ def tearDown(self): def test_without_tuf(self): - # Scenario: - # 'timestamp.json' specifies the latest version of the repository files. - # A client should only accept the same version of this file up to a certain - # point, or else it cannot detect that new files are available for download. - # Modify the repository's timestamp.json' so that it expires soon, copy it - # over the to client, and attempt to re-fetch the same expired version. + # Without TUF, Test 1 and Test 2 are functionally equivalent, so we skip + # Test 1 and only perform Test 2. + # + # Test 1: If we find that the timestamp acquired from a mirror indicates + # that there is no new snapshot file, and our current snapshot + # file is expired, is it recognized as such? + # Test 2: If an expired timestamp is downloaded, is it recognized as such? + + + # Test 2 Begin: + # + # 'timestamp.json' specifies the latest version of the repository files. A + # client should only accept the same version of this file up to a certain + # point, or else it cannot detect that new files are available for + # download. Modify the repository's timestamp.json' so that it expires + # soon, copy it over to the client, and attempt to re-fetch the same + # expired version. + # # A non-TUF client (without a way to detect when metadata has expired) is # expected to download the same version, and thus the same outdated files. - # Verify that the same file size and hash of 'timestamp.json' is downloaded. + # Verify that the downloaded 'timestamp.json' contains the same file size + # and hash as the one available locally. timestamp_path = os.path.join(self.repository_directory, 'metadata', 'timestamp.json') timestamp_metadata = tuf.util.load_json_file(timestamp_path) - expires = tuf.formats.unix_timestamp_to_datetime(int(time.time() - 10)) + expiry_time = time.time() - 10 + expires = tuf.formats.unix_timestamp_to_datetime(int(expiry_time)) expires = expires.isoformat() + 'Z' timestamp_metadata['signed']['expires'] = expires tuf.formats.check_signable_object_format(timestamp_metadata) @@ -216,14 +239,113 @@ def test_without_tuf(self): self.assertEqual(download_fileinfo, fileinfo) - def test_with_tuf(self): - # The same scenario outlined in test_without_tuf() is followed here, except - # with a TUF client. The TUF client performs a refresh of top-level - # metadata, which also includes 'timestamp.json'. + # Two tests are conducted here. + # + # Test 1: If we find that the timestamp acquired from a mirror indicates + # that there is no new snapshot file, and our current snapshot + # file is expired, is it recognized as such? + # Test 2: If an expired timestamp is downloaded, is it recognized as such? + + + # Test 1 Begin: + # + # Addresses this issue: https://github.com/theupdateframework/tuf/issues/322 + # + # If time has passed and our snapshot or targets role is expired, and + # the mirror whose timestamp we fetched doesn't indicate the existence of a + # new snapshot version, we still need to check that it's expired and notify + # the software update system / application / user. This test creates that + # scenario. The correct behavior is to raise an exception. + # + # Background: Expiration checks (updater._ensure_not_expired) were + # previously conducted when the metadata file was downloaded. If no new + # metadata file was downloaded, no expiry check would occur. In particular, + # while root was checked for expiration at the beginning of each + # updater.refresh() cycle, and timestamp was always checked because it was + # always fetched, snapshot and targets were never checked if the user did + # not receive evidence that they had changed. This bug allowed a class of + # freeze attacks. + # That bug was fixed and this test tests that fix going forward. + + # Modify the timestamp file on the remote repository. 'timestamp.json' + # must be properly updated and signed with 'repository_tool.py', otherwise + # the client will reject it as invalid metadata. + + # Load the repository + repository = repo_tool.load_repository(self.repository_directory) + + # Load the timestamp and snapshot keys, since we will be signing a new + # timestamp and a new snapshot file. + key_file = os.path.join(self.keystore_directory, 'timestamp_key') + timestamp_private = repo_tool.import_rsa_privatekey_from_file(key_file, + 'password') + repository.timestamp.load_signing_key(timestamp_private) + key_file = os.path.join(self.keystore_directory, 'snapshot_key') + snapshot_private = repo_tool.import_rsa_privatekey_from_file(key_file, + 'password') + repository.snapshot.load_signing_key(snapshot_private) + + # Expire snapshot in 8s. This should be far enough into the future that we + # haven't reached it before the first refresh validates timestamp expiry. + # We want a successful refresh before expiry, then a second refresh after + # expiry (which we then expect to raise an exception due to expired + # metadata). + expiry_time = time.time() + 8 + datetime_object = tuf.formats.unix_timestamp_to_datetime(int(expiry_time)) + + repository.snapshot.expiration = datetime_object + + # Now write to the repository. + repository.write() + + # And move the staged metadata to the "live" metadata. + shutil.rmtree(os.path.join(self.repository_directory, 'metadata')) + shutil.copytree(os.path.join(self.repository_directory, 'metadata.staged'), + os.path.join(self.repository_directory, 'metadata')) + + # Refresh metadata on the client. For this refresh, all data is not expired. + logger.info('Test: Refreshing #1 - Initial metadata refresh occurring.') + self.repository_updater.refresh() + logger.info('Test: Refreshed #1 - Initial metadata refresh completed ' + 'successfully. Now sleeping until snapshot metadata expires.') + + # Sleep until expiry_time ('repository.snapshot.expiration') + time.sleep(max(0, expiry_time - time.time())) + + logger.info('Test: Refreshing #2 - Now trying to refresh again after local' + ' snapshot expiry.') + try: + self.repository_updater.refresh() # We expect this to fail! + + except tuf.ExpiredMetadataError: + logger.info('Test: Refresh #2 - failed as expected. Expired local' + ' snapshot case generated a tuf.ExpiredMetadataError' + ' exception as expected. Test pass.') - timestamp_path = os.path.join(self.repository_directory, 'metadata', - 'timestamp.json') + # I think that I only expect tuf.ExpiredMetadata error here. A + # NoWorkingMirrorError indicates something else in this case - unavailable + # repo, for example. + else: + self.fail('TUF failed to detect expired stale snapshot metadata. Freeze' + ' attack successful.') + + + + + # Test 2 Begin: + # + # 'timestamp.json' specifies the latest version of the repository files. + # A client should only accept the same version of this file up to a certain + # point, or else it cannot detect that new files are available for download. + # Modify the repository's 'timestamp.json' so that it is about to expire, + # copy it over the to client, wait a moment until it expires, and attempt to + # re-fetch the same expired version. + + # The same scenario as in test_without_tuf() is followed here, except with + # a TUF client. The TUF client performs a refresh of top-level metadata, + # which includes 'timestamp.json', and should detect a freeze attack if + # the repository serves an outdated 'timestamp.json'. # Modify the timestamp file on the remote repository. 'timestamp.json' # must be properly updated and signed with 'repository_tool.py', otherwise @@ -231,14 +353,18 @@ def test_with_tuf(self): # 'timestamp.json' should be valid metadata, but expired (as intended). repository = repo_tool.load_repository(self.repository_directory) - key_file = os.path.join(self.keystore_directory, 'timestamp_key') + key_file = os.path.join(self.keystore_directory, 'timestamp_key') timestamp_private = repo_tool.import_rsa_privatekey_from_file(key_file, 'password') repository.timestamp.load_signing_key(timestamp_private) - # expire in 1 second. - datetime_object = tuf.formats.unix_timestamp_to_datetime(int(time.time() + 1)) + # Set timestamp metadata to expire soon. + # We cannot set the timestamp expiration with + # 'repository.timestamp.expiration = ...' with already-expired timestamp + # metadata because of consistency checks that occur during that assignment. + expiry_time = time.time() + 1 + datetime_object = tuf.formats.unix_timestamp_to_datetime(int(expiry_time)) repository.timestamp.expiration = datetime_object repository.write() @@ -246,17 +372,30 @@ def test_with_tuf(self): shutil.rmtree(os.path.join(self.repository_directory, 'metadata')) shutil.copytree(os.path.join(self.repository_directory, 'metadata.staged'), os.path.join(self.repository_directory, 'metadata')) - - # Verify that the TUF client detects outdated metadata and refuses to - # continue the update process. Sleep for at least 2 seconds to ensure - # 'repository.timestamp.expiration' is reached. - time.sleep(2) + + # Wait just long enough for the timestamp metadata (which is now both on + # the repository and on the client) to expire. + time.sleep(max(0, expiry_time - time.time())) + + # Try to refresh top-level metadata on the client. Since we're already past + # 'repository.timestamp.expiration', the TUF client is expected to detect + # that timestamp metadata is outdated and refuse to continue the update + # process. try: - self.repository_updater.refresh() + self.repository_updater.refresh() # We expect NoWorkingMirrorError. except tuf.NoWorkingMirrorError as e: + # NoWorkingMirrorError indicates that we did not find valid, unexpired + # metadata at any mirror. That exception class preserves the errors from + # each mirror. We now assert that for each mirror, the particular error + # detected was that metadata was expired (the timestamp we manually + # expired). for mirror_url, mirror_error in six.iteritems(e.mirror_errors): self.assertTrue(isinstance(mirror_error, tuf.ExpiredMetadataError)) + + else: + self.fail('TUF failed to detect expired, stale timestamp metadata.' + ' Freeze attack successful.') if __name__ == '__main__': diff --git a/tuf/__init__.py b/tuf/__init__.py index ba4328ea33..0899478e6c 100755 --- a/tuf/__init__.py +++ b/tuf/__init__.py @@ -30,6 +30,9 @@ import six +import logging +logger = logging.getLogger('tuf.__init__') + # Import 'tuf.formats' if a module tries to import the # entire tuf package (i.e., from tuf import *). __all__ = ['formats'] @@ -332,13 +335,13 @@ def __init__(self, mirror_errors): def __str__(self): all_errors = 'No working mirror was found:' - for mirror_url, mirror_error in self.mirror_errors.iteritems(): + for mirror_url, mirror_error in six.iteritems(self.mirror_errors): try: # http://docs.python.org/2/library/urlparse.html#urlparse.urlparse mirror_url_tokens = six.moves.urllib.parse.urlparse(mirror_url) except: - logging.exception('Failed to parse mirror URL: ' + repr(mirror_url)) + logger.exception('Failed to parse mirror URL: ' + repr(mirror_url)) mirror_netloc = mirror_url else: diff --git a/tuf/client/updater.py b/tuf/client/updater.py index c7ebcf472f..9eae01ecda 100755 --- a/tuf/client/updater.py +++ b/tuf/client/updater.py @@ -563,15 +563,26 @@ def _import_delegations(self, parent_role): def refresh(self, unsafely_update_root_if_necessary=True): """ - Update the latest copies of the metadata for the top-level roles. - The update request process follows a specific order to ensure the - metadata files are securely updated. - - The client would call refresh() prior to requesting target file - information. Calling refresh() ensures target methods, like - all_targets() and target(), refer to the latest available content. - The latest copies, according to the currently trusted top-level metadata, - of delegated metadata are downloaded and updated by the target methods. + Update the latest copies of the metadata for the top-level roles. The + update request process follows a specific order to ensure the metadata + files are securely updated: + timestamp -> snapshot -> root (if necessary) -> targets. + + Delegated metadata is not refreshed by this method. After this method is + called, the use of target methods (e.g., all_targets(), + targets_of_role(), or target()) will update delegated metadata, when + required. Calling refresh() ensures that top-level metadata is + up-to-date, so that the target methods can refer to the latest available + content. Thus, refresh() should always be called by the client before any + requests of target file information. + + The expiration time for downloaded metadata is also verified, including + local metadata that the repository claims is up to date. + + If the refresh fails for any reason, then unless + 'unsafely_update_root_if_necessary' is set, refresh will be retried once + after first attempting to update the root metadata file. Only after this + check will the exceptions listed here potentially be raised. unsafely_update_root_if_necessary: @@ -584,7 +595,9 @@ def refresh(self, unsafely_update_root_if_necessary=True): If the metadata for any of the top-level roles cannot be updated. tuf.ExpiredMetadataError: - If any metadata has expired. + If any of the top-level metadata is expired (whether a new version was + downloaded expired or no new version was found and the existing + version is now expired). Updates the metadata files of the top-level roles with the latest @@ -643,6 +656,10 @@ def refresh(self, unsafely_update_root_if_necessary=True): logger.info('An expired Root metadata was loaded and must be updated.') raise + # If an exception is raised during the metadata update attempts, we will + # attempt to update root metadata once by recursing with a special argument + # (unsafely_update_root_if_necessary) to avoid further recursion. + # Use default but sane information for timestamp metadata, and do not # require strict checks on its required length. try: @@ -652,18 +669,48 @@ def refresh(self, unsafely_update_root_if_necessary=True): self._update_metadata_if_changed('root') self._update_metadata_if_changed('targets') - except tuf.NoWorkingMirrorError as e: + # There are two distinct error scenarios that can rise from the + # _update_metadata_if_changed calls in the try block above: + # + # - tuf.NoWorkingMirrorError: + # + # If a change to a metadata file IS detected in an + # _update_metadata_if_changed call, but we are unable to download a + # valid (not expired, properly signed, valid) version of that metadata + # file, a tuf.NoWorkingMirrorError rises to this point. + # + # - tuf.ExpiredMetadataError: + # + # If, on the other hand, a change to a metadata file IS NOT detected + # in a given _update_metadata_if_changed call, but we observe that the + # version of the metadata file we have on hand is now expired, a + # tuf.ExpiredMetadataError exception rises to this point. + # + except tuf.NoWorkingMirrorError: if unsafely_update_root_if_necessary: - message = 'Valid top-level metadata cannot be downloaded. Unsafely '+\ - 'update the Root metadata.' - logger.info(message) - + logger.info('Valid top-level metadata cannot be downloaded. Unsafely' + ' update the Root metadata.') self._update_metadata('root', DEFAULT_ROOT_UPPERLENGTH) self.refresh(unsafely_update_root_if_necessary=False) - + else: raise + except tuf.ExpiredMetadataError: + if unsafely_update_root_if_necessary: + logger.info('No changes were detected from the mirrors for a given role' + ', and that metadata that is available on disk has been found to be' + ' expired. Trying to update root in case of foul play.') + self._update_metadata('root', DEFAULT_ROOT_UPPERLENGTH) + self.refresh(unsafely_update_root_if_necessary=False) + + # The caller explicitly requested not to unsafely fetch an expired Root. + else: + logger.info('No changes were detected from the mirrors for a given role' + ', and that metadata that is available on disk has been found to be ' + 'expired. Your metadata is out of date.') + raise + @@ -1150,7 +1197,7 @@ def _get_metadata_file(self, metadata_role, remote_filename, return file_object else: - logger.exception('Failed to update {0} from all mirrors: {1}'.format( + logger.error('Failed to update {0} from all mirrors: {1}'.format( remote_filename, file_mirror_errors)) raise tuf.NoWorkingMirrorError(file_mirror_errors) @@ -1578,6 +1625,13 @@ def _update_metadata_if_changed(self, metadata_role, expected_versioninfo): logger.info(repr(uncompressed_metadata_filename) + ' up-to-date.') + # Since we have not downloaded a new version of this metadata, we + # should check to see if our local version is stale and notify the user + # if so. This raises tuf.ExpiredMetadataError if the metadata we + # have is expired. Resolves issue #322. + self._ensure_not_expired(self.metadata['current'][metadata_role], + metadata_role) + return logger.debug('Metadata ' + repr(uncompressed_metadata_filename) + ' has changed.')