diff --git a/requirements-pinned.txt b/requirements-pinned.txt index a68c52f64f..ae0b5a0cb8 100644 --- a/requirements-pinned.txt +++ b/requirements-pinned.txt @@ -11,7 +11,7 @@ pycparser==2.20 # via cffi pynacl==1.3.0 # via securesystemslib python-dateutil==2.8.1 # via securesystemslib requests==2.23.0 -securesystemslib[colors,crypto,pynacl]==0.14.2 +securesystemslib[colors,crypto,pynacl]==0.15.0 six==1.14.0 subprocess32==3.5.4 ; python_version < '3' # via securesystemslib urllib3==1.25.9 # via requests diff --git a/setup.py b/setup.py index 9e598e100d..af087872d7 100755 --- a/setup.py +++ b/setup.py @@ -116,7 +116,7 @@ 'iso8601>=0.1.12', 'requests>=2.19.1', 'six>=1.11.0', - 'securesystemslib>=0.12.0' + 'securesystemslib>=0.15.0' ], tests_require = [ 'mock; python_version < "3.3"' diff --git a/tests/repository_data/generate_project_data.py b/tests/repository_data/generate_project_data.py index f584790ef3..fc93fd594e 100755 --- a/tests/repository_data/generate_project_data.py +++ b/tests/repository_data/generate_project_data.py @@ -104,10 +104,6 @@ project.expiration = datetime.datetime(2030, 1, 1, 0, 0) project('role1').expiration = datetime.datetime(2030, 1, 1, 0, 0) -# Compress the project role metadata so that the unit tests have a pre-generated -# example of compressed metadata. -project.compressions = ['gz'] - # Create the actual metadata files, which are saved to 'metadata.staged'. if not options.dry_run: project.write() diff --git a/tests/test_developer_tool.py b/tests/test_developer_tool.py index f72cfa1a5d..b595aeda61 100755 --- a/tests/test_developer_tool.py +++ b/tests/test_developer_tool.py @@ -38,6 +38,7 @@ import tuf.exceptions import securesystemslib +import securesystemslib.exceptions from tuf.developer_tool import METADATA_DIRECTORY_NAME from tuf.developer_tool import TARGETS_DIRECTORY_NAME @@ -188,7 +189,8 @@ def test_load_project(self): # Test non-existent project filepath. nonexistent_path = os.path.join(local_tmp, 'nonexistent') - self.assertRaises(IOError, developer_tool.load_project, nonexistent_path) + self.assertRaises(securesystemslib.exceptions.StorageError, + developer_tool.load_project, nonexistent_path) # Copy the pregenerated metadata. project_data_filepath = os.path.join('repository_data', 'project') diff --git a/tests/test_repository_lib.py b/tests/test_repository_lib.py index 36527c1d55..425fff1d2d 100755 --- a/tests/test_repository_lib.py +++ b/tests/test_repository_lib.py @@ -52,8 +52,10 @@ import tuf.repository_tool as repo_tool import securesystemslib +import securesystemslib.exceptions import securesystemslib.rsa_keys import securesystemslib.interface +import securesystemslib.storage import six logger = logging.getLogger(__name__) @@ -126,8 +128,9 @@ def test_import_rsa_privatekey_from_file(self): # Non-existent key file. nonexistent_keypath = os.path.join(temporary_directory, 'nonexistent_keypath') - self.assertRaises(IOError, repo_lib.import_rsa_privatekey_from_file, - nonexistent_keypath, 'pw') + self.assertRaises(securesystemslib.exceptions.StorageError, + repo_lib.import_rsa_privatekey_from_file, + nonexistent_keypath, 'pw') # Invalid key file argument. invalid_keyfile = os.path.join(temporary_directory, 'invalid_keyfile') @@ -160,7 +163,8 @@ def test_import_ed25519_privatekey_from_file(self): # Non-existent key file. nonexistent_keypath = os.path.join(temporary_directory, 'nonexistent_keypath') - self.assertRaises(IOError, repo_lib.import_ed25519_privatekey_from_file, + self.assertRaises(securesystemslib.exceptions.StorageError, + repo_lib.import_ed25519_privatekey_from_file, nonexistent_keypath, 'pw') # Invalid key file argument. @@ -215,7 +219,7 @@ def test_get_metadata_filenames(self): 'targets.json': os.path.join(metadata_directory, 'targets.json'), 'snapshot.json': os.path.join(metadata_directory, 'snapshot.json'), 'timestamp.json': os.path.join(metadata_directory, 'timestamp.json')} - self.assertEqual(filenames, repo_lib.get_metadata_filenames()) + self.assertEqual(filenames, repo_lib.get_metadata_filenames(metadata_directory)) # Test improperly formatted argument. @@ -241,17 +245,23 @@ def test_get_metadata_fileinfo(self): fileinfo = {'length': file_length, 'hashes': file_hashes} self.assertTrue(tuf.formats.FILEINFO_SCHEMA.matches(fileinfo)) - self.assertEqual(fileinfo, repo_lib.get_metadata_fileinfo(test_filepath)) + storage_backend = securesystemslib.storage.FilesystemBackend() + + self.assertEqual(fileinfo, repo_lib.get_metadata_fileinfo(test_filepath, + storage_backend)) # Test improperly formatted argument. - self.assertRaises(securesystemslib.exceptions.FormatError, repo_lib.get_metadata_fileinfo, 3) + self.assertRaises(securesystemslib.exceptions.FormatError, + repo_lib.get_metadata_fileinfo, 3, + storage_backend) # Test non-existent file. nonexistent_filepath = os.path.join(temporary_directory, 'oops.txt') - self.assertRaises(securesystemslib.exceptions.Error, repo_lib.get_metadata_fileinfo, - nonexistent_filepath) + self.assertRaises(securesystemslib.exceptions.Error, + repo_lib.get_metadata_fileinfo, + nonexistent_filepath, storage_backend) @@ -440,8 +450,9 @@ def test_generate_snapshot_metadata(self): # Load a valid repository so that top-level roles exist in roledb and # generate_snapshot_metadata() has roles to specify in snapshot metadata. + storage_backend = securesystemslib.storage.FilesystemBackend() repository = repo_tool.Repository(repository_directory, metadata_directory, - targets_directory) + targets_directory, storage_backend) repository_junk = repo_tool.load_repository(repository_directory) @@ -458,6 +469,7 @@ def test_generate_snapshot_metadata(self): repo_lib.generate_snapshot_metadata(metadata_directory, version, expiration_date, targets_filename, + storage_backend, consistent_snapshot=False) self.assertTrue(tuf.formats.SNAPSHOT_SCHEMA.matches(snapshot_metadata)) @@ -465,19 +477,19 @@ def test_generate_snapshot_metadata(self): # Test improperly formatted arguments. self.assertRaises(securesystemslib.exceptions.FormatError, repo_lib.generate_snapshot_metadata, 3, version, expiration_date, - targets_filename, consistent_snapshot=False) + targets_filename, consistent_snapshot=False, storage_backend=storage_backend) self.assertRaises(securesystemslib.exceptions.FormatError, repo_lib.generate_snapshot_metadata, metadata_directory, '3', expiration_date, - targets_filename, consistent_snapshot=False) + targets_filename, storage_backend, consistent_snapshot=False) self.assertRaises(securesystemslib.exceptions.FormatError, repo_lib.generate_snapshot_metadata, metadata_directory, version, '3', - targets_filename, consistent_snapshot=False) + targets_filename, storage_backend, consistent_snapshot=False) self.assertRaises(securesystemslib.exceptions.FormatError, repo_lib.generate_snapshot_metadata, metadata_directory, version, expiration_date, - 3, consistent_snapshot=False) + 3, storage_backend, consistent_snapshot=False) self.assertRaises(securesystemslib.exceptions.FormatError, repo_lib.generate_snapshot_metadata, metadata_directory, version, expiration_date, - targets_filename, 3) + targets_filename, 3, storage_backend) @@ -599,85 +611,25 @@ def test_write_metadata_file(self): version_number = root_signable['signed']['version'] + 1 self.assertFalse(os.path.exists(output_filename)) + storage_backend = securesystemslib.storage.FilesystemBackend() repo_lib.write_metadata_file(root_signable, output_filename, version_number, - consistent_snapshot=False) + consistent_snapshot=False, storage_backend=storage_backend) self.assertTrue(os.path.exists(output_filename)) # Attempt to over-write the previously written metadata file. An exception # is not raised in this case, only a debug message is logged. repo_lib.write_metadata_file(root_signable, output_filename, version_number, - consistent_snapshot=False) - - # Try to write a consistent metadate file. An exception is not raised in - # this case. For testing purposes, root.json should be a hard link to the - # consistent metadata file. We should verify that root.json points to - # the latest consistent files. - tuf.settings.CONSISTENT_METHOD = 'hard_link' - repo_lib.write_metadata_file(root_signable, output_filename, version_number, - consistent_snapshot=True) - - # Test if the consistent files are properly named - # Filename format of a consistent file: .rolename.json - version_and_filename = str(version_number) + '.' + 'root.json' - first_version_output_file = os.path.join(temporary_directory, version_and_filename) - self.assertTrue(os.path.exists(first_version_output_file)) - - # Verify that the consistent file content is equal to 'output_filename'. - self.assertEqual( - securesystemslib.util.get_file_details(output_filename), - securesystemslib.util.get_file_details(first_version_output_file)) - - # Try to add more consistent metadata files. - version_number += 1 - root_signable['signed']['version'] = version_number - repo_lib.write_metadata_file(root_signable, output_filename, - version_number, consistent_snapshot=True) - - # Test if the latest root.json points to the expected consistent file - # and consistent metadata do not all point to the same root.json - version_and_filename = str(version_number) + '.' + 'root.json' - second_version_output_file = os.path.join(temporary_directory, version_and_filename) - self.assertTrue(os.path.exists(second_version_output_file)) - - # Verify that the second version is equal to the second output file, and - # that the second output filename differs from the first. - self.assertEqual(securesystemslib.util.get_file_details(output_filename), - securesystemslib.util.get_file_details(second_version_output_file)) - self.assertNotEqual(securesystemslib.util.get_file_details(output_filename), - securesystemslib.util.get_file_details(first_version_output_file)) - - # Test for an improper settings.CONSISTENT_METHOD string value. - tuf.settings.CONSISTENT_METHOD = 'somebadidea' - - # Test for invalid consistent methods on systems other than Windows, - # which always uses the copy method. - if platform.system() == 'Windows': - pass - - else: - self.assertRaises(securesystemslib.exceptions.InvalidConfigurationError, - repo_lib.write_metadata_file, root_signable, output_filename, - version_number, consistent_snapshot=True) - - # Try to create a link to root.json when root.json doesn't exist locally. - # repository_lib should log a message if this is the case. - tuf.settings.CONSISTENT_METHOD = 'hard_link' - os.remove(output_filename) - repo_lib.write_metadata_file(root_signable, output_filename, version_number, - consistent_snapshot=True) - - # Reset CONSISTENT_METHOD so that subsequent tests work as expected. - tuf.settings.CONSISTENT_METHOD = 'copy' + consistent_snapshot=False, storage_backend=storage_backend) # Test improperly formatted arguments. self.assertRaises(securesystemslib.exceptions.FormatError, repo_lib.write_metadata_file, - 3, output_filename, version_number, False) + 3, output_filename, version_number, False, storage_backend) self.assertRaises(securesystemslib.exceptions.FormatError, repo_lib.write_metadata_file, - root_signable, 3, version_number, False) + root_signable, 3, version_number, False, storage_backend) self.assertRaises(securesystemslib.exceptions.FormatError, repo_lib.write_metadata_file, - root_signable, output_filename, '3', False) + root_signable, output_filename, '3', False, storage_backend) self.assertRaises(securesystemslib.exceptions.FormatError, repo_lib.write_metadata_file, - root_signable, output_filename, version_number, 3) + root_signable, output_filename, storage_backend, version_number, 3) @@ -731,13 +683,6 @@ def test_create_tuf_client_directory(self): - def test__check_directory(self): - # Test for non-existent directory. - self.assertRaises(securesystemslib.exceptions.Error, - repo_lib._check_directory, 'non-existent') - - - def test__generate_and_write_metadata(self): # Test for invalid, or unsupported, rolename. # Load the root metadata provided in 'tuf/tests/repository_data/'. @@ -774,9 +719,11 @@ def test__generate_and_write_metadata(self): tuf.roledb.add_role('obsolete_role', targets_roleinfo, repository_name=repository_name) + storage_backend = securesystemslib.storage.FilesystemBackend() repo_lib._generate_and_write_metadata('obsolete_role', obsolete_metadata, - targets_directory, metadata_directory, consistent_snapshot=False, - filenames=None, repository_name=repository_name) + targets_directory, metadata_directory, storage_backend, + consistent_snapshot=False, filenames=None, + repository_name=repository_name) snapshot_filepath = os.path.join('repository_data', 'repository', 'metadata', 'snapshot.json') @@ -785,7 +732,8 @@ def test__generate_and_write_metadata(self): self.assertTrue(os.path.exists(os.path.join(metadata_directory, 'obsolete_role.json'))) tuf.repository_lib._delete_obsolete_metadata(metadata_directory, - snapshot_signable['signed'], False, repository_name) + snapshot_signable['signed'], False, repository_name, + storage_backend) self.assertFalse(os.path.exists(metadata_directory + 'obsolete_role.json')) shutil.copyfile(targets_metadata, obsolete_metadata) @@ -801,6 +749,7 @@ def test__delete_obsolete_metadata(self): snapshot_filepath = os.path.join('repository_data', 'repository', 'metadata', 'snapshot.json') snapshot_signable = securesystemslib.util.load_json_file(snapshot_filepath) + storage_backend = securesystemslib.storage.FilesystemBackend() # Create role metadata that should not exist in snapshot.json. role1_filepath = os.path.join('repository_data', 'repository', 'metadata', @@ -808,20 +757,21 @@ def test__delete_obsolete_metadata(self): shutil.copyfile(role1_filepath, os.path.join(metadata_directory, 'role2.json')) repo_lib._delete_obsolete_metadata(metadata_directory, - snapshot_signable['signed'], True, repository_name) + snapshot_signable['signed'], True, repository_name, storage_backend) # _delete_obsolete_metadata should never delete root.json. root_filepath = os.path.join('repository_data', 'repository', 'metadata', 'root.json') shutil.copyfile(root_filepath, os.path.join(metadata_directory, 'root.json')) repo_lib._delete_obsolete_metadata(metadata_directory, - snapshot_signable['signed'], True, repository_name) + snapshot_signable['signed'], True, repository_name, storage_backend) self.assertTrue(os.path.exists(os.path.join(metadata_directory, 'root.json'))) # Verify what happens for a non-existent metadata directory (a debug # message is logged). - repo_lib._delete_obsolete_metadata('non-existent', - snapshot_signable['signed'], True, repository_name) + self.assertRaises(securesystemslib.exceptions.StorageError, + repo_lib._delete_obsolete_metadata, 'non-existent', + snapshot_signable['signed'], True, repository_name, storage_backend) def test__load_top_level_metadata(self): @@ -843,12 +793,8 @@ def test__load_top_level_metadata(self): signable = securesystemslib.util.load_json_file(os.path.join(metadata_directory, 'root.json')) signable['signatures'].append(signable['signatures'][0]) - repo_lib.write_metadata_file(signable, root_file, 8, False) - - # Attempt to load a repository that contains a compressed Root file. - repository = repo_tool.create_new_repository(repository_directory, repository_name) - filenames = repo_lib.get_metadata_filenames(metadata_directory) - repo_lib._load_top_level_metadata(repository, filenames, repository_name) + storage_backend = securesystemslib.storage.FilesystemBackend() + repo_lib.write_metadata_file(signable, root_file, 8, False, storage_backend) filenames = repo_lib.get_metadata_filenames(metadata_directory) repository = repo_tool.create_new_repository(repository_directory, repository_name) @@ -872,7 +818,9 @@ def test__load_top_level_metadata(self): if role_file.endswith('.json') and not role_file.startswith('root'): role_filename = os.path.join(metadata_directory, role_file) os.remove(role_filename) - repo_lib._load_top_level_metadata(repository, filenames, repository_name) + self.assertRaises(tuf.exceptions.RepositoryError, + repo_lib._load_top_level_metadata, repository, filenames, + repository_name) # Remove the required Root file and verify that an exception is raised. os.remove(os.path.join(metadata_directory, 'root.json')) diff --git a/tests/test_repository_tool.py b/tests/test_repository_tool.py index e4ac72bdf1..9ca2991f3e 100755 --- a/tests/test_repository_tool.py +++ b/tests/test_repository_tool.py @@ -48,6 +48,7 @@ import securesystemslib.exceptions import securesystemslib +import securesystemslib.storage import six logger = logging.getLogger(__name__) @@ -96,20 +97,22 @@ def tearDown(self): def test_init(self): # Test normal case. repository_name = 'test_repository' + storage_backend = securesystemslib.storage.FilesystemBackend() repository = repo_tool.Repository('repository_directory/', - 'metadata_directory/', 'targets_directory/', repository_name) + 'metadata_directory/', 'targets_directory/', storage_backend, + repository_name) self.assertTrue(isinstance(repository.root, repo_tool.Root)) self.assertTrue(isinstance(repository.snapshot, repo_tool.Snapshot)) self.assertTrue(isinstance(repository.timestamp, repo_tool.Timestamp)) self.assertTrue(isinstance(repository.targets, repo_tool.Targets)) # Test improperly formatted arguments. - self.assertRaises(securesystemslib.exceptions.FormatError, repo_tool.Repository, 3, - 'metadata_directory/', 'targets_directory') self.assertRaises(securesystemslib.exceptions.FormatError, repo_tool.Repository, - 'repository_directory', 3, 'targets_directory') + storage_backend, 3, 'metadata_directory/', 'targets_directory') + self.assertRaises(securesystemslib.exceptions.FormatError, repo_tool.Repository, + 'repository_directory', storage_backend, 3, 'targets_directory') self.assertRaises(securesystemslib.exceptions.FormatError, repo_tool.Repository, - 'repository_directory', 'metadata_directory', 3) + 'repository_directory', 'metadata_directory', 3, storage_backend) @@ -340,12 +343,15 @@ def test_writeall(self): # loaded before writing consistent snapshot. repository.root.load_signing_key(root_privkey) repository.snapshot.load_signing_key(snapshot_privkey) + # Must also load targets signing key, because targets is re-signed when + # updating 'role1'. + repository.targets.load_signing_key(targets_privkey) repository.targets('role1').load_signing_key(role1_privkey) # Verify that a consistent snapshot can be written and loaded. The roles # above must be marked as dirty, otherwise writeall() will not create a # consistent snapshot for them. - repository.mark_dirty(['role1', 'root', 'snapshot', 'timestamp']) + repository.mark_dirty(['role1', 'targets', 'root', 'snapshot', 'timestamp']) repository.writeall(consistent_snapshot=True) # Verify that the newly written consistent snapshot can be loaded @@ -1818,14 +1824,9 @@ def test_create_new_repository(self): repo_tool.create_new_repository, 3, repository_name) # For testing purposes, try to create a repository directory that - # fails due to a non-errno.EEXIST exception raised. create_new_repository() - # should only pass for OSError (errno.EEXIST). - try: - repo_tool.create_new_repository('bad' * 2000, repository_name) - - except OSError as e: - # errno.ENOENT is raised in Windows. - self.assertTrue(e.errno == errno.ENAMETOOLONG or e.errno == errno.ENOENT) + # fails due to a non-errno.EEXIST exception raised. + self.assertRaises(securesystemslib.exceptions.StorageError, + repo_tool.create_new_repository, 'bad' * 2000, repository_name) # Reset the 'repository_directory' so that the metadata and targets # directories can be tested likewise. @@ -1836,12 +1837,8 @@ def test_create_new_repository(self): tuf.repository_tool.METADATA_STAGED_DIRECTORY_NAME tuf.repository_tool.METADATA_STAGED_DIRECTORY_NAME = 'bad' * 2000 - try: - repo_tool.create_new_repository(repository_directory, repository_name) - - except OSError as e: - # errno.ENOENT is raised in Windows. - self.assertTrue(e.errno == errno.ENAMETOOLONG or e.errno == errno.ENOENT) + self.assertRaises(securesystemslib.exceptions.StorageError, + repo_tool.create_new_repository, repository_directory, repository_name) # Reset metadata staged directory so that the targets directory can be # tested... @@ -1851,12 +1848,8 @@ def test_create_new_repository(self): original_targets_directory = tuf.repository_tool.TARGETS_DIRECTORY_NAME tuf.repository_tool.TARGETS_DIRECTORY_NAME = 'bad' * 2000 - try: - repo_tool.create_new_repository(repository_directory, repository_name) - - except OSError as e: - # errno.ENOENT is raised in Windows. - self.assertTrue(e.errno == errno.ENAMETOOLONG or e.errno == errno.ENOENT) + self.assertRaises(securesystemslib.exceptions.StorageError, + repo_tool.create_new_repository, repository_directory, repository_name) tuf.repository_tool.TARGETS_DIRECTORY_NAME = \ original_targets_directory @@ -1943,8 +1936,10 @@ def test_dump_signable_metadata(self): metadata_content = repo_tool.dump_signable_metadata(targets_metadata_file) # Test for an invalid targets metadata file.. - self.assertRaises(securesystemslib.exceptions.FormatError, repo_tool.dump_signable_metadata, 1) - self.assertRaises(IOError, repo_tool.dump_signable_metadata, 'bad file path') + self.assertRaises(securesystemslib.exceptions.FormatError, + repo_tool.dump_signable_metadata, 1) + self.assertRaises(securesystemslib.exceptions.StorageError, + repo_tool.dump_signable_metadata, 'bad file path') diff --git a/tests/test_root_versioning_integration.py b/tests/test_root_versioning_integration.py index 1eeb08d6ff..815674040e 100755 --- a/tests/test_root_versioning_integration.py +++ b/tests/test_root_versioning_integration.py @@ -41,6 +41,7 @@ import tuf.repository_tool as repo_tool import securesystemslib +import securesystemslib.storage logger = logging.getLogger(__name__) @@ -63,9 +64,11 @@ def tearDown(self): def test_init(self): # Test normal case. + storage_backend = securesystemslib.storage.FilesystemBackend() repository = repo_tool.Repository('repository_directory/', 'metadata_directory/', - 'targets_directory/') + 'targets_directory/', + storage_backend) self.assertTrue(isinstance(repository.root, repo_tool.Root)) self.assertTrue(isinstance(repository.snapshot, repo_tool.Snapshot)) self.assertTrue(isinstance(repository.timestamp, repo_tool.Timestamp)) @@ -73,11 +76,11 @@ def test_init(self): # Test improperly formatted arguments. self.assertRaises(securesystemslib.exceptions.FormatError, repo_tool.Repository, 3, - 'metadata_directory/', 'targets_directory') + 'metadata_directory/', 'targets_directory', storage_backend) self.assertRaises(securesystemslib.exceptions.FormatError, repo_tool.Repository, - 'repository_directory', 3, 'targets_directory') + 'repository_directory', 3, 'targets_directory', storage_backend) self.assertRaises(securesystemslib.exceptions.FormatError, repo_tool.Repository, - 'repository_directory', 'metadata_directory', 3) + 'repository_directory', 'metadata_directory', storage_backend, 3) diff --git a/tests/test_updater.py b/tests/test_updater.py index eb3e8e3790..24c0f236be 100644 --- a/tests/test_updater.py +++ b/tests/test_updater.py @@ -436,6 +436,7 @@ def test_1__refresh_must_not_count_duplicate_keyids_towards_threshold(self): repository.root.threshold = 2 repository.root.load_signing_key(self.role_keys['root']['private']) + storage_backend = securesystemslib.storage.FilesystemBackend() # The client uses the threshold from the previous root file to verify the # new root. Thus we need to make two updates so that the threshold used for # verification becomes 2. I.e. we bump the version, sign twice with the @@ -455,7 +456,8 @@ def test_1__refresh_must_not_count_duplicate_keyids_towards_threshold(self): # catch the unmet threshold. # We also skip writing to 'metadata.staged' and copying to 'metadata' and # instead write directly to 'metadata' - repo_lib.write_metadata_file(signed_metadata, live_root_path, info["version"], True) + repo_lib.write_metadata_file(signed_metadata, live_root_path, + info["version"], True, storage_backend) # Update from current '1.root.json' to '3.root.json' on client and assert diff --git a/tuf/client/updater.py b/tuf/client/updater.py index 36b14f1c67..c52ec661c2 100755 --- a/tuf/client/updater.py +++ b/tuf/client/updater.py @@ -140,6 +140,7 @@ import tuf.sig import tuf.exceptions +import securesystemslib.exceptions import securesystemslib.hash import securesystemslib.keys import securesystemslib.util @@ -211,7 +212,7 @@ def __init__(self, map_file): # The map file dictionary that associates targets with repositories. self.map_file = securesystemslib.util.load_json_file(map_file) - except (securesystemslib.exceptions.Error, IOError) as e: + except (securesystemslib.exceptions.Error) as e: raise tuf.exceptions.Error('Cannot load the map file: ' + str(e)) # Raise securesystemslib.exceptions.FormatError if the map file is @@ -3174,7 +3175,7 @@ def updated_targets(self, targets, destination_directory): algorithm=algorithm) # This exception would occur if the target does not exist locally. - except IOError: + except securesystemslib.exceptions.StorageError: updated_targets.append(target) updated_targetpaths.append(target_filepath) break diff --git a/tuf/developer_tool.py b/tuf/developer_tool.py index 177df4ace7..33e0c9aca8 100755 --- a/tuf/developer_tool.py +++ b/tuf/developer_tool.py @@ -501,8 +501,9 @@ def _generate_and_write_metadata(rolename, metadata_filename, write_partial, if tuf.sig.verify(signable, rolename, repository_name) or write_partial: repo_lib._remove_invalid_and_duplicate_signatures(signable, repository_name) + storage_backend = securesystemslib.storage.FilesystemBackend() filename = repo_lib.write_metadata_file(signable, metadata_filename, - metadata['version'], False) + metadata['version'], False, storage_backend) # 'signable' contains an invalid threshold of signatures. else: diff --git a/tuf/repository_lib.py b/tuf/repository_lib.py index 68ab80aff0..319ec862ea 100755 --- a/tuf/repository_lib.py +++ b/tuf/repository_lib.py @@ -37,7 +37,6 @@ import logging import shutil import json -import platform import tempfile import tuf @@ -56,6 +55,8 @@ import iso8601 import six +import securesystemslib.storage + # See 'log.py' to learn how logging is handled in TUF. logger = logging.getLogger(__name__) @@ -92,9 +93,10 @@ def _generate_and_write_metadata(rolename, metadata_filename, - targets_directory, metadata_directory, consistent_snapshot=False, - filenames=None, allow_partially_signed=False, increment_version_number=True, - repository_name='default', use_existing_fileinfo=False): + targets_directory, metadata_directory, storage_backend, + consistent_snapshot=False, filenames=None, allow_partially_signed=False, + increment_version_number=True, repository_name='default', + use_existing_fileinfo=False): """ Non-public function that can generate and write the metadata for the specified 'rolename'. It also increments the version number of 'rolename' if @@ -124,7 +126,7 @@ def _generate_and_write_metadata(rolename, metadata_filename, targets_filename = TARGETS_FILENAME[:-len(METADATA_EXTENSION)] metadata = generate_snapshot_metadata(metadata_directory, roleinfo['version'], roleinfo['expires'], targets_filename, - consistent_snapshot, repository_name) + storage_backend, consistent_snapshot, repository_name) _log_warning_if_expires_soon(SNAPSHOT_FILENAME, roleinfo['expires'], @@ -151,7 +153,7 @@ def _generate_and_write_metadata(rolename, metadata_filename, metadata = generate_targets_metadata(targets_directory, roleinfo['paths'], roleinfo['version'], roleinfo['expires'], roleinfo['delegations'], - consistent_targets, use_existing_fileinfo) + consistent_targets, use_existing_fileinfo, storage_backend) # Before writing 'rolename' to disk, automatically increment its version # number (if 'increment_version_number' is True) so that the caller does not @@ -205,7 +207,7 @@ def should_write(): if rolename == 'root': consistent_snapshot = True filename = write_metadata_file(signable, metadata_filename, - metadata['version'], consistent_snapshot) + metadata['version'], consistent_snapshot, storage_backend) # 'signable' contains an invalid threshold of signatures. else: @@ -231,11 +233,12 @@ def should_write(): # .root.json and root.json). if rolename == 'root': filename = write_metadata_file(signable, metadata_filename, - metadata['version'], consistent_snapshot=True) + metadata['version'], consistent_snapshot=True, + storage_backend=storage_backend) else: filename = write_metadata_file(signable, metadata_filename, - metadata['version'], consistent_snapshot) + metadata['version'], consistent_snapshot, storage_backend) return signable, filename @@ -273,50 +276,6 @@ def _metadata_is_partially_loaded(rolename, signable, repository_name): -def _check_directory(directory, must_exist=True): - """ - - Non-public function that ensures 'directory' is valid and it exists. This - is not a security check, but a way for the caller to determine the cause of - an invalid directory provided by the user. If the directory argument is - valid, it is returned normalized and as an absolute path. - - - directory: - The directory to check. - - must_exist: - A boolean indicating whether to check the directory exists. - - - securesystemslib.exceptions.Error, if 'directory' could not be validated. - - securesystemslib.exceptions.FormatError, if 'directory' is not properly - formatted. - - - None. - - - The normalized absolutized path of 'directory'. - """ - - # Does 'directory' have the correct format? - # Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch. - securesystemslib.formats.PATH_SCHEMA.check_match(directory) - - # Check if the directory exists. - if must_exist and not os.path.isdir(directory): - raise securesystemslib.exceptions.Error(repr(directory) + ' directory does not exist.') - - directory = os.path.abspath(directory) - - return directory - - - - - def _check_role_keys(rolename, repository_name): """ Non-public function that verifies the public and signing keys of 'rolename'. @@ -394,7 +353,7 @@ def _remove_invalid_and_duplicate_signatures(signable, repository_name): def _delete_obsolete_metadata(metadata_directory, snapshot_metadata, - consistent_snapshot, repository_name): + consistent_snapshot, repository_name, storage_backend): """ Non-public function that deletes metadata files marked as removed by 'repository_tool.py'. Revoked metadata files are not actually deleted until @@ -410,71 +369,58 @@ def _delete_obsolete_metadata(metadata_directory, snapshot_metadata, # is stored (including delegated roles). The 'django.json' role (e.g., # delegated by Targets) would be located in the # '{repository_directory}/metadata/' directory. - if os.path.exists(metadata_directory) and os.path.isdir(metadata_directory): - for directory_path, junk, files in os.walk(metadata_directory): - - # 'files' here is a list of target file names. - for basename in files: - - # If we encounter 'root.json', skip it. We don't ever delete root.json - # files, since they should it always exist. - if basename.endswith('root.json'): - continue - - metadata_path = os.path.join(directory_path, basename) - # Strip the metadata dirname and the leading path separator. - # '{repository_directory}/metadata/django.json' --> - # 'django.json' - metadata_name = \ - metadata_path[len(metadata_directory):].lstrip(os.path.sep) - - # Strip the version number if 'consistent_snapshot' is True. Example: - # '10.django.json' --> 'django.json'. Consistent and non-consistent - # metadata might co-exist if write() and - # write(consistent_snapshot=True) are mixed, so ensure only - # '.filename' metadata is stripped. - - # Should we check if 'consistent_snapshot' is True? It might have been - # set previously, but 'consistent_snapshot' can potentially be False - # now. We'll proceed with the understanding that 'metadata_name' can - # have a prepended version number even though the repository is now - # a non-consistent one. - if metadata_name not in snapshot_metadata['meta']: - metadata_name, junk = _strip_version_number(metadata_name, - consistent_snapshot) + metadata_files = sorted(storage_backend.list_folder(metadata_directory)) + for metadata_role in metadata_files: + if metadata_role.endswith('root.json'): + continue - else: - logger.debug(repr(metadata_name) + ' found in the snapshot role.') + metadata_path = os.path.join(metadata_directory, metadata_role) + # Strip the version number if 'consistent_snapshot' is True. Example: + # '10.django.json' --> 'django.json'. Consistent and non-consistent + # metadata might co-exist if write() and + # write(consistent_snapshot=True) are mixed, so ensure only + # '.filename' metadata is stripped. + # Should we check if 'consistent_snapshot' is True? It might have been + # set previously, but 'consistent_snapshot' can potentially be False + # now. We'll proceed with the understanding that 'metadata_name' can + # have a prepended version number even though the repository is now + # a non-consistent one. + if metadata_role not in snapshot_metadata['meta']: + metadata_role, junk = _strip_version_number(metadata_role, + consistent_snapshot) - # Strip metadata extension from filename. The role database does not - # include the metadata extension. - if metadata_name.endswith(METADATA_EXTENSION): - metadata_name = metadata_name[:-len(METADATA_EXTENSION)] + else: + logger.debug(repr(metadata_role) + ' found in the snapshot role.') - else: - logger.debug(repr(metadata_name) + ' does not match' - ' supported extension ' + repr(METADATA_EXTENSION)) - if metadata_name in ['root', 'targets', 'snapshot', 'timestamp']: - return - # Delete the metadata file if it does not exist in 'tuf.roledb'. - # 'repository_tool.py' might have removed 'metadata_name,' - # but its metadata file is not actually deleted yet. Do it now. - if not tuf.roledb.role_exists(metadata_name, repository_name): - logger.info('Removing outdated metadata: ' + repr(metadata_path)) - os.remove(metadata_path) + # Strip metadata extension from filename. The role database does not + # include the metadata extension. + if metadata_role.endswith(METADATA_EXTENSION): + metadata_role = metadata_role[:-len(METADATA_EXTENSION)] - else: - logger.debug('Not removing metadata: ' + repr(metadata_path)) + else: + logger.debug(repr(metadata_role) + ' does not match' + ' supported extension ' + repr(METADATA_EXTENSION)) - # TODO: Should we delete outdated consistent snapshots, or does it make - # more sense for integrators to remove outdated consistent snapshots? + if metadata_role in ['root', 'targets', 'snapshot', 'timestamp']: + logger.debug('Not removing top-level metadata ' + repr(metadata_role)) + return - else: - logger.debug('Metadata directory does not exist: ' + repr(metadata_directory)) + # Delete the metadata file if it does not exist in 'tuf.roledb'. + # 'repository_tool.py' might have removed 'metadata_name,' + # but its metadata file is not actually deleted yet. Do it now. + if not tuf.roledb.role_exists(metadata_role, repository_name): + logger.info('Removing outdated metadata: ' + repr(metadata_path)) + storage_backend.remove(metadata_path) + + else: + logger.debug('Not removing metadata: ' + repr(metadata_path)) + + # TODO: Should we delete outdated consistent snapshots, or does it make + # more sense for integrators to remove outdated consistent snapshots? @@ -540,8 +486,7 @@ def _load_top_level_metadata(repository, top_level_filenames, repository_name): # Load 'root.json'. A Root role file without a version number is always # written. - if os.path.exists(root_filename): - + try: # Initialize the key and role metadata of the top-level roles. signable = securesystemslib.util.load_json_file(root_filename) tuf.formats.check_signable_object_format(signable) @@ -579,13 +524,13 @@ def _load_top_level_metadata(repository, top_level_filenames, repository_name): # Ensure the 'consistent_snapshot' field is extracted. consistent_snapshot = root_metadata['consistent_snapshot'] - else: + except securesystemslib.exceptions.StorageError: raise tuf.exceptions.RepositoryError('Cannot load the required' - ' root file: ' + repr(root_filename)) + ' root file: ' + repr(root_filename)) # Load 'timestamp.json'. A Timestamp role file without a version number is # always written. - if os.path.exists(timestamp_filename): + try: signable = securesystemslib.util.load_json_file(timestamp_filename) timestamp_metadata = signable['signed'] for signature in signable['signatures']: @@ -608,8 +553,9 @@ def _load_top_level_metadata(repository, top_level_filenames, repository_name): tuf.roledb.update_roleinfo('timestamp', roleinfo, mark_role_as_dirty=False, repository_name=repository_name) - else: - logger.debug('Cannot load the Timestamp file: ' + repr(timestamp_filename)) + except securesystemslib.exceptions.StorageError: + raise tuf.exceptions.RepositoryError('Cannot load the Timestamp file: ' + + repr(timestamp_filename)) # Load 'snapshot.json'. A consistent snapshot.json must be calculated if # 'consistent_snapshot' is True. @@ -622,7 +568,7 @@ def _load_top_level_metadata(repository, top_level_filenames, repository_name): snapshot_filename = os.path.join(dirname, str(snapshot_version) + '.' + basename + METADATA_EXTENSION) - if os.path.exists(snapshot_filename): + try: signable = securesystemslib.util.load_json_file(snapshot_filename) tuf.formats.check_signable_object_format(signable) snapshot_metadata = signable['signed'] @@ -647,8 +593,9 @@ def _load_top_level_metadata(repository, top_level_filenames, repository_name): tuf.roledb.update_roleinfo('snapshot', roleinfo, mark_role_as_dirty=False, repository_name=repository_name) - else: - logger.debug('The Snapshot file cannot be loaded: ' + repr(snapshot_filename)) + except securesystemslib.exceptions.StorageError: + raise tuf.exceptions.RepositoryError('The Snapshot file cannot be loaded: ' + + repr(snapshot_filename)) # Load 'targets.json'. A consistent snapshot of the Targets role must be # calculated if 'consistent_snapshot' is True. @@ -657,7 +604,7 @@ def _load_top_level_metadata(repository, top_level_filenames, repository_name): dirname, basename = os.path.split(targets_filename) targets_filename = os.path.join(dirname, str(targets_version) + '.' + basename) - if os.path.exists(targets_filename): + try: signable = securesystemslib.util.load_json_file(targets_filename) tuf.formats.check_signable_object_format(signable) targets_metadata = signable['signed'] @@ -711,8 +658,9 @@ def _load_top_level_metadata(repository, top_level_filenames, repository_name): except tuf.exceptions.KeyAlreadyExistsError: pass - else: - logger.debug('The Targets file cannot be loaded: ' + repr(targets_filename)) + except securesystemslib.exceptions.StorageError: + raise tuf.exceptions.RepositoryError('The Targets file can not be loaded: ' + + repr(targets_filename)) return repository, consistent_snapshot @@ -864,7 +812,7 @@ def import_ed25519_privatekey_from_file(filepath, password=None): return private_key -def get_metadata_filenames(metadata_directory=None): +def get_metadata_filenames(metadata_directory): """ Return a dictionary containing the filenames of the top-level roles. @@ -895,9 +843,6 @@ def get_metadata_filenames(metadata_directory=None): metadata files, such as 'root.json' and 'snapshot.json'. """ - if metadata_directory is None: - metadata_directory = os.getcwd() - # Does 'metadata_directory' have the correct format? # Ensure the arguments have the appropriate number of objects and object # types, and that all dict keys are properly named. @@ -926,7 +871,7 @@ def get_metadata_filenames(metadata_directory=None): -def get_metadata_fileinfo(filename, custom=None): +def get_metadata_fileinfo(filename, storage_backend, custom=None): """ Retrieve the file information of 'filename'. The object returned @@ -945,12 +890,14 @@ def get_metadata_fileinfo(filename, custom=None): custom: An optional object providing additional information about the file. + storage_backend: + An object which implements + securesystemslib.storage.StorageBackendInterface. + securesystemslib.exceptions.FormatError, if 'filename' is improperly formatted. - securesystemslib.exceptions.Error, if 'filename' doesn't exist. - The file is opened and information about the file is generated, such as file size and its hash. @@ -969,17 +916,13 @@ def get_metadata_fileinfo(filename, custom=None): if custom is not None: tuf.formats.CUSTOM_SCHEMA.check_match(custom) - if not os.path.isfile(filename): - message = repr(filename) + ' is not a file.' - raise securesystemslib.exceptions.Error(message) - # Note: 'filehashes' is a dictionary of the form # {'sha256': 1233dfba312, ...}. 'custom' is an optional # dictionary that a client might define to include additional # file information, such as the file's author, version/revision # numbers, etc. filesize, filehashes = securesystemslib.util.get_file_details(filename, - tuf.settings.FILE_HASH_ALGORITHMS) + tuf.settings.FILE_HASH_ALGORITHMS, storage_backend) return tuf.formats.make_fileinfo(filesize, filehashes, custom=custom) @@ -1215,7 +1158,7 @@ def generate_root_metadata(version, expiration_date, consistent_snapshot, def generate_targets_metadata(targets_directory, target_files, version, expiration_date, delegations=None, write_consistent_targets=False, - use_existing_fileinfo=False): + use_existing_fileinfo=False, storage_backend=None): """ Generate the targets metadata object. The targets in 'target_files' must @@ -1225,8 +1168,8 @@ def generate_targets_metadata(targets_directory, target_files, version, targets_directory: - The directory containing the target files and directories of the - repository. + The absolute path to a directory containing the target files and + directories of the repository. target_files: The target files tracked by 'targets.json'. 'target_files' is a @@ -1255,6 +1198,10 @@ def generate_targets_metadata(targets_directory, target_files, version, hashes, as already exists in the roledb (True) or whether to generate hashes (False). + storage_backend: + An object which implements + securesystemslib.storage.StorageBackendInterface. + securesystemslib.exceptions.FormatError, if an error occurred trying to generate the targets metadata object. @@ -1269,8 +1216,8 @@ def generate_targets_metadata(targets_directory, target_files, version, write_consistent_targets are True. - If use_existing_fileinfo is False, the target files are read and file - information generated about them. + If use_existing_fileinfo is False, the target files are read from storage + and file information about them is generated. If 'write_consistent_targets' is True, each target in 'target_files' will be copied to a file with a digest prepended to its filename. For example, if 'some_file.txt' is one of the targets of 'target_files', consistent targets @@ -1305,11 +1252,6 @@ def generate_targets_metadata(targets_directory, target_files, version, # targets metadata object returned. filedict = {} - # Ensure the user is aware of a non-existent 'target_directory', and convert - # it to its abosolute path, if it exists. - check_exists = not use_existing_fileinfo - targets_directory = _check_directory(targets_directory, check_exists) - if use_existing_fileinfo: for target, fileinfo in six.iteritems(target_files): @@ -1325,8 +1267,11 @@ def generate_targets_metadata(targets_directory, target_files, version, filedict[target] = fileinfo else: + if storage_backend is None: + storage_backend = securesystemslib.storage.FilesystemBackend() + filedict = _generate_targets_fileinfo(target_files, targets_directory, - write_consistent_targets) + write_consistent_targets, storage_backend) # Generate the targets metadata object. # Use generalized build_dict_conforming_to_schema func to produce a dict that @@ -1361,7 +1306,7 @@ def generate_targets_metadata(targets_directory, target_files, version, def _generate_targets_fileinfo(target_files, targets_directory, - write_consistent_targets): + write_consistent_targets, storage_backend): """ Iterate over target_files and: * ensure they exist in the targets_directory @@ -1385,19 +1330,13 @@ def _generate_targets_fileinfo(target_files, targets_directory, # path separator (i.e., is treated as an absolute path). target_path = os.path.join(targets_directory, target.lstrip(os.sep)) - # Ensure all target files listed in 'target_files' exist. If just one of - # these files does not exist, raise an exception. - if not os.path.exists(target_path): - raise securesystemslib.exceptions.Error(repr(target_path) + ' cannot' - ' be read. Unable to generate targets metadata.') - # Add 'custom' if it has been provided. Custom data about the target is # optional and will only be included in metadata (i.e., a 'custom' field in # the target's fileinfo dictionary) if specified here. custom_data = fileinfo.get('custom', None) filedict[relative_targetpath] = \ - get_metadata_fileinfo(target_path, custom_data) + get_metadata_fileinfo(target_path, storage_backend, custom_data) # Copy 'target_path' to 'digest_target' if consistent hashing is enabled. if write_consistent_targets: @@ -1413,7 +1352,8 @@ def _generate_targets_fileinfo(target_files, targets_directory, def generate_snapshot_metadata(metadata_directory, version, expiration_date, - targets_filename, consistent_snapshot=False, repository_name='default'): + targets_filename, storage_backend, consistent_snapshot=False, + repository_name='default'): """ Create the snapshot metadata. The minimum metadata must exist (i.e., @@ -1439,6 +1379,10 @@ def generate_snapshot_metadata(metadata_directory, version, expiration_date, The filename of the top-level targets role. The hash and file size of this file is listed in the snapshot role. + storage_backend: + An object which implements + securesystemslib.storage.StorageBackendInterface. + consistent_snapshot: Boolean. If True, a file digest is expected to be prepended to the filename of any target file located in the targets directory. Each digest @@ -1473,24 +1417,19 @@ def generate_snapshot_metadata(metadata_directory, version, expiration_date, securesystemslib.formats.BOOLEAN_SCHEMA.check_match(consistent_snapshot) securesystemslib.formats.NAME_SCHEMA.check_match(repository_name) - metadata_directory = _check_directory(metadata_directory) - # Snapshot's 'fileinfodict' shall contain the version number of Root, # Targets, and all delegated roles fo the repository. fileinfodict = {} fileinfodict[TARGETS_FILENAME] = get_metadata_versioninfo(targets_filename, repository_name) - # We previously also stored the compressed versions of roles in - # snapshot.json, however, this is no longer needed as their hashes and - # lengths are not used and their version numbers match the uncompressed role - # files. - # Search the metadata directory and generate the versioninfo of all the role # files found there. This information is stored in the 'meta' field of # 'snapshot.json'. - for metadata_filename in sorted(os.listdir(metadata_directory), reverse=True): + metadata_files = sorted(storage_backend.list_folder(metadata_directory), + reverse=True) + for metadata_filename in metadata_files: # Strip the version number if 'consistent_snapshot' is True. # Example: '10.django.json' --> 'django.json' metadata_name, junk = _strip_version_number(metadata_filename, @@ -1557,6 +1496,10 @@ def generate_timestamp_metadata(snapshot_filename, version, expiration_date, The name of the repository. If not supplied, 'rolename' is added to the 'default' repository. + storage_backend: + An object which implements + securesystemslib.storage.StorageBackendInterface. + securesystemslib.exceptions.FormatError, if the generated timestamp metadata object cannot be formatted correctly, or one of the arguments is improperly @@ -1586,11 +1529,6 @@ def generate_timestamp_metadata(snapshot_filename, version, expiration_date, snapshot_fileinfo[SNAPSHOT_FILENAME] = \ tuf.formats.make_fileinfo(length, hashes, version=snapshot_version['version']) - # We previously saved the versioninfo of the compressed versions of - # 'snapshot.json' in 'versioninfo'. Since version numbers are now stored, - # the version numbers of compressed roles do not change and can thus be - # excluded. - # Generate the timestamp metadata object. # Use generalized build_dict_conforming_to_schema func to produce a dict that # contains all the appropriate information for timestamp metadata, @@ -1698,7 +1636,8 @@ def sign_metadata(metadata_object, keyids, filename, repository_name): -def write_metadata_file(metadata, filename, version_number, consistent_snapshot): +def write_metadata_file(metadata, filename, version_number, consistent_snapshot, + storage_backend): """ If necessary, write the 'metadata' signable object to 'filename'. @@ -1720,6 +1659,10 @@ def write_metadata_file(metadata, filename, version_number, consistent_snapshot) Boolean that determines whether the metadata file's digest should be prepended to the filename. + storage_backend: + An object which implements + securesystemslib.storage.StorageBackendInterface. + securesystemslib.exceptions.FormatError, if the arguments are improperly formatted. @@ -1745,11 +1688,8 @@ def write_metadata_file(metadata, filename, version_number, consistent_snapshot) tuf.formats.METADATAVERSION_SCHEMA.check_match(version_number) securesystemslib.formats.BOOLEAN_SCHEMA.check_match(consistent_snapshot) - # Verify the directory of 'filename', and convert 'filename' to its absolute - # path so that temporary files are moved to their expected destinations. - filename = os.path.abspath(filename) - written_filename = filename - _check_directory(os.path.dirname(filename)) + if storage_backend is None: + storage_backend = securesystemslib.storage.FilesystemBackend() # Generate the actual metadata file content of 'metadata'. Metadata is # saved as JSON and includes formatting, such as indentation and sorted @@ -1772,7 +1712,7 @@ def write_metadata_file(metadata, filename, version_number, consistent_snapshot) file_object.write(file_content) if consistent_snapshot: - dirname, basename = os.path.split(written_filename) + dirname, basename = os.path.split(filename) basename = basename.split(METADATA_EXTENSION, 1)[0] version_and_filename = str(version_number) + '.' + basename + METADATA_EXTENSION written_consistent_filename = os.path.join(dirname, version_and_filename) @@ -1781,50 +1721,27 @@ def write_metadata_file(metadata, filename, version_number, consistent_snapshot) # would always point to the current version. Example: 1.root.json and # 2.root.json -> root.json. If consistent snapshot is True, we should save # the consistent snapshot and point 'written_filename' to it. - logger.debug('Creating a consistent file for ' + repr(written_filename)) + logger.debug('Creating a consistent file for ' + repr(filename)) logger.debug('Saving ' + repr(written_consistent_filename)) - securesystemslib.util.persist_temp_file(file_object, written_consistent_filename) - - # For GitHub issue #374 https://github.com/theupdateframework/tuf/issues/374 - # We provide the option of either (1) creating a link via os.link() to the - # consistent file or (2) creating a copy of the consistent file and saving - # to its expected filename (e.g., root.json). The option of either - # creating a copy or link should be configurable in tuf.settings.py. - if tuf.settings.CONSISTENT_METHOD == 'copy' or platform.system() == 'Windows': - logger.debug('Pointing ' + repr(filename) + ' to the consistent' - ' file: ' + repr(written_consistent_filename)) - shutil.copyfile(written_consistent_filename, written_filename) - - elif tuf.settings.CONSISTENT_METHOD == 'hard_link': - logger.info('Hard linking ' + repr(written_consistent_filename)) - - # 'written_filename' must not exist, otherwise os.link() complains. - if os.path.exists(written_filename): - os.remove(written_filename) + securesystemslib.util.persist_temp_file(file_object, + written_consistent_filename, should_close=False) - else: - logger.debug(repr(written_filename) + ' does not exist.') + else: + logger.debug('Not creating a consistent snapshot for ' + repr(filename)) - os.link(written_consistent_filename, written_filename) + logger.debug('Saving ' + repr(filename)) + storage_backend.put(file_object, filename) - else: - raise securesystemslib.exceptions.InvalidConfigurationError('The' - ' consistent method specified in tuf.settings.py is not supported, try' - ' either "copy" or "hard_link"') - - else: - logger.debug('Not creating a consistent snapshot for ' + repr(written_filename)) - logger.debug('Saving ' + repr(written_filename)) - securesystemslib.util.persist_temp_file(file_object, written_filename) + file_object.close() - return written_filename + return filename def _log_status_of_top_level_roles(targets_directory, metadata_directory, - repository_name): + repository_name, storage_backend): """ Non-public function that logs whether any of the top-level roles contain an invalid number of public and private keys, or an insufficient threshold of @@ -1877,7 +1794,7 @@ def _log_status_of_top_level_roles(targets_directory, metadata_directory, try: signable, root_filename = \ _generate_and_write_metadata('root', root_filename, targets_directory, - metadata_directory, repository_name=repository_name) + metadata_directory, storage_backend, repository_name=repository_name) _log_status('root', signable, repository_name) # 'tuf.exceptions.UnsignedMetadataError' raised if metadata contains an @@ -1904,7 +1821,8 @@ def _log_status_of_top_level_roles(targets_directory, metadata_directory, try: signable, targets_filename = \ _generate_and_write_metadata('targets', targets_filename, - targets_directory, metadata_directory, repository_name=repository_name) + targets_directory, metadata_directory, storage_backend, + repository_name=repository_name) _log_status('targets', signable, repository_name) except tuf.exceptions.UnsignedMetadataError as e: @@ -1929,8 +1847,8 @@ def _log_status_of_top_level_roles(targets_directory, metadata_directory, try: signable, snapshot_filename = \ _generate_and_write_metadata('snapshot', snapshot_filename, - targets_directory, metadata_directory, False, filenames, - repository_name=repository_name) + targets_directory, metadata_directory, storage_backend, False, + filenames, repository_name=repository_name) _log_status('snapshot', signable, repository_name) except tuf.exceptions.UnsignedMetadataError as e: @@ -1955,8 +1873,8 @@ def _log_status_of_top_level_roles(targets_directory, metadata_directory, try: signable, timestamp_filename = \ _generate_and_write_metadata('timestamp', timestamp_filename, - targets_directory, metadata_directory, False, filenames, - repository_name=repository_name) + targets_directory, metadata_directory, storage_backend, + False, filenames, repository_name=repository_name) _log_status('timestamp', signable, repository_name) except tuf.exceptions.UnsignedMetadataError as e: diff --git a/tuf/repository_tool.py b/tuf/repository_tool.py index 47ef7512ca..6e2e967f0a 100755 --- a/tuf/repository_tool.py +++ b/tuf/repository_tool.py @@ -32,7 +32,6 @@ from __future__ import unicode_literals import os -import errno import time import datetime import logging @@ -54,6 +53,8 @@ import iso8601 import six +import securesystemslib.storage + # Copy API # pylint: disable=unused-import @@ -170,6 +171,10 @@ class Repository(object): downloaded. Metadata files are similarly referenced in the top-level metadata. + storage_backend: + An object which implements + securesystemslib.storage.StorageBackendInterface. + repository_name: The name of the repository. If not supplied, 'rolename' is added to the 'default' repository. @@ -187,7 +192,7 @@ class Repository(object): """ def __init__(self, repository_directory, metadata_directory, - targets_directory, repository_name='default'): + targets_directory, storage_backend, repository_name='default'): # Do the arguments have the correct format? # Ensure the arguments have the appropriate number of objects and object @@ -202,6 +207,7 @@ def __init__(self, repository_directory, metadata_directory, self._metadata_directory = metadata_directory self._targets_directory = targets_directory self._repository_name = repository_name + self._storage_backend = storage_backend try: tuf.roledb.create_roledb(repository_name) @@ -294,7 +300,7 @@ def writeall(self, consistent_snapshot=False, use_existing_fileinfo=False): dirty_rolename + METADATA_EXTENSION) repo_lib._generate_and_write_metadata(dirty_rolename, dirty_filename, self._targets_directory, self._metadata_directory, - consistent_snapshot, filenames, + self._storage_backend, consistent_snapshot, filenames, repository_name=self._repository_name, use_existing_fileinfo=use_existing_fileinfo) @@ -308,14 +314,14 @@ def writeall(self, consistent_snapshot=False, use_existing_fileinfo=False): if 'root' in dirty_rolenames or consistent_snapshot != old_consistent_snapshot: repo_lib._generate_and_write_metadata('root', filenames['root'], self._targets_directory, self._metadata_directory, - consistent_snapshot, filenames, + self._storage_backend, consistent_snapshot, filenames, repository_name=self._repository_name) # Generate the 'targets.json' metadata file. if 'targets' in dirty_rolenames: repo_lib._generate_and_write_metadata('targets', filenames['targets'], self._targets_directory, self._metadata_directory, - consistent_snapshot, + self._storage_backend, consistent_snapshot, repository_name=self._repository_name, use_existing_fileinfo=use_existing_fileinfo) @@ -323,13 +329,15 @@ def writeall(self, consistent_snapshot=False, use_existing_fileinfo=False): if 'snapshot' in dirty_rolenames: snapshot_signable, junk = repo_lib._generate_and_write_metadata('snapshot', filenames['snapshot'], self._targets_directory, - self._metadata_directory, consistent_snapshot, filenames, + self._metadata_directory, self._storage_backend, + consistent_snapshot, filenames, repository_name=self._repository_name) # Generate the 'timestamp.json' metadata file. if 'timestamp' in dirty_rolenames: repo_lib._generate_and_write_metadata('timestamp', filenames['timestamp'], - self._targets_directory, self._metadata_directory, consistent_snapshot, + self._targets_directory, self._metadata_directory, + self._storage_backend, consistent_snapshot, filenames, repository_name=self._repository_name) tuf.roledb.unmark_dirty(dirty_rolenames, self._repository_name) @@ -340,7 +348,8 @@ def writeall(self, consistent_snapshot=False, use_existing_fileinfo=False): # load them. if snapshot_signable is not None: repo_lib._delete_obsolete_metadata(self._metadata_directory, - snapshot_signable['signed'], consistent_snapshot, self._repository_name) + snapshot_signable['signed'], consistent_snapshot, self._repository_name, + self._storage_backend) @@ -399,7 +408,8 @@ def write(self, rolename, consistent_snapshot=False, increment_version_number=Tr 'timestamp': os.path.join(self._metadata_directory, repo_lib.TIMESTAMP_FILENAME)} repo_lib._generate_and_write_metadata(rolename, rolename_filename, - self._targets_directory, self._metadata_directory, consistent_snapshot, + self._targets_directory, self._metadata_directory, + self._storage_backend, consistent_snapshot, filenames=filenames, allow_partially_signed=True, increment_version_number=increment_version_number, repository_name=self._repository_name, @@ -451,7 +461,7 @@ def status(self): # Verify the top-level roles and log the results. repo_lib._log_status_of_top_level_roles(targets_directory, - metadata_directory, self._repository_name) + metadata_directory, self._repository_name, self._storage_backend) finally: shutil.rmtree(temp_repository_directory, ignore_errors=True) @@ -2938,7 +2948,8 @@ def _find_bin_for_hash(path_hash, number_of_bins): -def create_new_repository(repository_directory, repository_name='default'): +def create_new_repository(repository_directory, repository_name='default', + storage_backend=None): """ Create a new repository, instantiate barebones metadata for the top-level @@ -2957,6 +2968,11 @@ def create_new_repository(repository_directory, repository_name='default'): The name of the repository. If not supplied, 'rolename' is added to the 'default' repository. + storage_backend: + An object which implements + securesystemslib.storage.StorageBackendInterface. When no object is + passed a FilesystemBackend will be instantiated and used. + securesystemslib.exceptions.FormatError, if the arguments are improperly formatted. @@ -2976,25 +2992,18 @@ def create_new_repository(repository_directory, repository_name='default'): securesystemslib.formats.PATH_SCHEMA.check_match(repository_directory) securesystemslib.formats.NAME_SCHEMA.check_match(repository_name) + if storage_backend is None: + storage_backend = securesystemslib.storage.FilesystemBackend() + # Set the repository, metadata, and targets directories. These directories # are created if they do not exist. repository_directory = os.path.abspath(repository_directory) metadata_directory = None targets_directory = None - # Try to create 'repository_directory' if it does not exist. - try: - logger.info('Creating ' + repr(repository_directory)) - os.makedirs(repository_directory) - - # 'OSError' raised if the leaf directory already exists or cannot be created. - # Check for case where 'repository_directory' has already been created. - except OSError as e: - if e.errno == errno.EEXIST: - pass - - else: - raise + # Ensure the 'repository_directory' exists + logger.info('Creating ' + repr(repository_directory)) + storage_backend.create_folder(repository_directory) # Set the metadata and targets directories. The metadata directory is a # staged one so that the "live" repository is not affected. The @@ -3004,37 +3013,19 @@ def create_new_repository(repository_directory, repository_name='default'): METADATA_STAGED_DIRECTORY_NAME) targets_directory = os.path.join(repository_directory, TARGETS_DIRECTORY_NAME) - # Try to create the metadata directory that will hold all of the metadata - # files, such as 'root.json' and 'snapshot.json'. - try: - logger.info('Creating ' + repr(metadata_directory)) - os.mkdir(metadata_directory) - - # 'OSError' raised if the leaf directory already exists or cannot be created. - except OSError as e: - if e.errno == errno.EEXIST: - pass - - else: - raise + # Ensure the metadata directory exists + logger.info('Creating ' + repr(metadata_directory)) + storage_backend.create_folder(metadata_directory) - # Try to create the targets directory that will hold all of the target files. - try: - logger.info('Creating ' + repr(targets_directory)) - os.mkdir(targets_directory) - - except OSError as e: - if e.errno == errno.EEXIST: - pass - - else: - raise + # Ensure the targets directory exists + logger.info('Creating ' + repr(targets_directory)) + storage_backend.create_folder(targets_directory) # Create the bare bones repository object, where only the top-level roles # have been set and contain default values (e.g., Root roles has a threshold # of 1, expires 1 year into the future, etc.) repository = Repository(repository_directory, metadata_directory, - targets_directory, repository_name) + targets_directory, storage_backend, repository_name) return repository @@ -3042,7 +3033,8 @@ def create_new_repository(repository_directory, repository_name='default'): -def load_repository(repository_directory, repository_name='default'): +def load_repository(repository_directory, repository_name='default', + storage_backend=None): """ Return a repository object containing the contents of metadata files loaded @@ -3057,6 +3049,11 @@ def load_repository(repository_directory, repository_name='default'): The name of the repository. If not supplied, 'default' is used as the repository name. + storage_backend: + An object which implements + securesystemslib.storage.StorageBackendInterface. When no object is + passed a FilesystemBackend will be instantiated and used. + securesystemslib.exceptions.FormatError, if 'repository_directory' or any of the metadata files are improperly formatted. @@ -3077,6 +3074,9 @@ def load_repository(repository_directory, repository_name='default'): securesystemslib.formats.PATH_SCHEMA.check_match(repository_directory) securesystemslib.formats.NAME_SCHEMA.check_match(repository_name) + if storage_backend is None: + storage_backend = securesystemslib.storage.FilesystemBackend() + repository_directory = os.path.abspath(repository_directory) metadata_directory = os.path.join(repository_directory, METADATA_STAGED_DIRECTORY_NAME) @@ -3085,7 +3085,7 @@ def load_repository(repository_directory, repository_name='default'): # The Repository() object loaded (i.e., containing all the metadata roles # found) and returned. repository = Repository(repository_directory, metadata_directory, - targets_directory, repository_name) + targets_directory, storage_backend, repository_name) filenames = repo_lib.get_metadata_filenames(metadata_directory) @@ -3106,7 +3106,9 @@ def load_repository(repository_directory, repository_name='default'): loaded_metadata = [] targets_objects['targets'] = repository.targets - for metadata_role in sorted(os.listdir(metadata_directory), reverse=True): + metadata_files = sorted(storage_backend.list_folder(metadata_directory), + reverse=True) + for metadata_role in metadata_files: metadata_path = os.path.join(metadata_directory, metadata_role) metadata_name = \ diff --git a/tuf/settings.py b/tuf/settings.py index eb8ae34acf..2dcc8e3b25 100755 --- a/tuf/settings.py +++ b/tuf/settings.py @@ -87,14 +87,6 @@ # By default, limit number of delegatees we visit for any target. MAX_NUMBER_OF_DELEGATIONS = 2**5 -# This configuration is for indicating how consistent files should be created. -# There are two options: "copy" and "hard_link". For "copy", the consistent -# file with be a copy of root.json. This approach will require the most disk -# space out of the two options. For "hard_link", the latest root.json will be -# a hard link to 2.root.json (for example). This approach is more efficient in -# terms of disk space usage. By default, we use 'copy'. -CONSISTENT_METHOD = 'copy' - # A setting for the instances where a default hashing algorithm is needed. # This setting is currently used to calculate the path hash prefixes of hashed # bin delegations, and digests of targets filepaths. The other instances